| 30 |
30 |
| 31 #include <stdio.h> |
31 #include <stdio.h> |
| 32 #include <string.h> |
32 #include <string.h> |
| 33 #include <errno.h> |
33 #include <errno.h> |
| 34 |
34 |
| |
35 #ifdef _WIN32 |
| |
36 #include <Windows.h> |
| |
37 #include <sysinfoapi.h> |
| |
38 static unsigned long system_page_size() { |
| |
39 static unsigned long ps = 0; |
| |
40 if (ps == 0) { |
| |
41 SYSTEM_INFO sysinfo; |
| |
42 GetSystemInfo(&sysinfo); |
| |
43 ps = sysinfo.dwPageSize; |
| |
44 } |
| |
45 return ps; |
| |
46 } |
| |
47 #define SYSTEM_PAGE_SIZE system_page_size() |
| |
48 #else |
| |
49 #include <unistd.h> |
| |
50 #define SYSTEM_PAGE_SIZE sysconf(_SC_PAGESIZE) |
| |
51 #endif |
| |
52 |
| 35 static int buffer_copy_on_write(CxBuffer* buffer) { |
53 static int buffer_copy_on_write(CxBuffer* buffer) { |
| 36 if (0 == (buffer->flags & CX_BUFFER_COPY_ON_WRITE)) return 0; |
54 if (0 == (buffer->flags & CX_BUFFER_COPY_ON_WRITE)) return 0; |
| 37 void *newspace = cxMalloc(buffer->allocator, buffer->capacity); |
55 void *newspace = cxMalloc(buffer->allocator, buffer->capacity); |
| 38 if (NULL == newspace) return -1; |
56 if (NULL == newspace) return -1; |
| 39 memcpy(newspace, buffer->space, buffer->size); |
57 memcpy(newspace, buffer->space, buffer->size); |
| 78 |
96 |
| 79 int cxBufferEnableFlushing( |
97 int cxBufferEnableFlushing( |
| 80 CxBuffer *buffer, |
98 CxBuffer *buffer, |
| 81 CxBufferFlushConfig config |
99 CxBufferFlushConfig config |
| 82 ) { |
100 ) { |
| 83 buffer->flush = malloc(sizeof(CxBufferFlushConfig)); |
101 buffer->flush = cxMallocDefault(sizeof(CxBufferFlushConfig)); |
| 84 if (buffer->flush == NULL) return -1; // LCOV_EXCL_LINE |
102 if (buffer->flush == NULL) return -1; // LCOV_EXCL_LINE |
| 85 memcpy(buffer->flush, &config, sizeof(CxBufferFlushConfig)); |
103 memcpy(buffer->flush, &config, sizeof(CxBufferFlushConfig)); |
| 86 return 0; |
104 return 0; |
| 87 } |
105 } |
| 88 |
106 |
| 89 void cxBufferDestroy(CxBuffer *buffer) { |
107 void cxBufferDestroy(CxBuffer *buffer) { |
| 90 if (buffer->flags & CX_BUFFER_FREE_CONTENTS) { |
108 if (buffer->flags & CX_BUFFER_FREE_CONTENTS) { |
| 91 cxFree(buffer->allocator, buffer->bytes); |
109 cxFree(buffer->allocator, buffer->bytes); |
| 92 } |
110 } |
| 93 free(buffer->flush); |
111 cxFreeDefault(buffer->flush); |
| 94 memset(buffer, 0, sizeof(CxBuffer)); |
112 memset(buffer, 0, sizeof(CxBuffer)); |
| 95 } |
113 } |
| 96 |
114 |
| 97 CxBuffer *cxBufferCreate( |
115 CxBuffer *cxBufferCreate( |
| 98 void *space, |
116 void *space, |
| 181 size_t newcap |
205 size_t newcap |
| 182 ) { |
206 ) { |
| 183 if (newcap <= buffer->capacity) { |
207 if (newcap <= buffer->capacity) { |
| 184 return 0; |
208 return 0; |
| 185 } |
209 } |
| |
210 |
| |
211 unsigned long pagesize = SYSTEM_PAGE_SIZE; |
| |
212 // if page size is larger than 64 KB - for some reason - truncate to 64 KB |
| |
213 if (pagesize > 65536) pagesize = 65536; |
| |
214 if (newcap < pagesize) { |
| |
215 // when smaller as one page, map to the next power of two |
| |
216 newcap--; |
| |
217 newcap |= newcap >> 1; |
| |
218 newcap |= newcap >> 2; |
| |
219 newcap |= newcap >> 4; |
| |
220 // last operation only needed for pages larger 4096 bytes |
| |
221 // but if/else would be more expensive than just doing this |
| |
222 newcap |= newcap >> 8; |
| |
223 newcap++; |
| |
224 } else { |
| |
225 // otherwise, map to a multiple of the page size |
| |
226 newcap -= newcap % pagesize; |
| |
227 newcap += pagesize; |
| |
228 // note: if newcap is already page aligned, |
| |
229 // this gives a full additional page (which is good) |
| |
230 } |
| |
231 |
| 186 |
232 |
| 187 const int force_copy_flags = CX_BUFFER_COPY_ON_WRITE | CX_BUFFER_COPY_ON_EXTEND; |
233 const int force_copy_flags = CX_BUFFER_COPY_ON_WRITE | CX_BUFFER_COPY_ON_EXTEND; |
| 188 if (buffer->flags & force_copy_flags) { |
234 if (buffer->flags & force_copy_flags) { |
| 189 void *newspace = cxMalloc(buffer->allocator, newcap); |
235 void *newspace = cxMalloc(buffer->allocator, newcap); |
| 190 if (NULL == newspace) return -1; |
236 if (NULL == newspace) return -1; |
| 198 (void **) &buffer->bytes, newcap) == 0) { |
244 (void **) &buffer->bytes, newcap) == 0) { |
| 199 buffer->capacity = newcap; |
245 buffer->capacity = newcap; |
| 200 return 0; |
246 return 0; |
| 201 } else { |
247 } else { |
| 202 return -1; // LCOV_EXCL_LINE |
248 return -1; // LCOV_EXCL_LINE |
| |
249 } |
| |
250 } |
| |
251 |
| |
252 void cxBufferShrink( |
| |
253 CxBuffer *buffer, |
| |
254 size_t reserve |
| |
255 ) { |
| |
256 // Ensure buffer is in a reallocatable state |
| |
257 const int force_copy_flags = CX_BUFFER_COPY_ON_WRITE | CX_BUFFER_COPY_ON_EXTEND; |
| |
258 if (buffer->flags & force_copy_flags) { |
| |
259 // do nothing when we are not allowed to reallocate |
| |
260 return; |
| |
261 } |
| |
262 |
| |
263 // calculate new capacity |
| |
264 size_t newCapacity = buffer->size + reserve; |
| |
265 |
| |
266 // If new capacity is smaller than current capacity, resize the buffer |
| |
267 if (newCapacity < buffer->capacity) { |
| |
268 if (0 == cxReallocate(buffer->allocator, &buffer->bytes, newCapacity)) { |
| |
269 buffer->capacity = newCapacity; |
| |
270 } |
| 203 } |
271 } |
| 204 } |
272 } |
| 205 |
273 |
| 206 static size_t cx_buffer_flush_helper( |
274 static size_t cx_buffer_flush_helper( |
| 207 const CxBuffer *buffer, |
275 const CxBuffer *buffer, |