| 33 #include <errno.h> |
33 #include <errno.h> |
| 34 |
34 |
| 35 #ifdef _WIN32 |
35 #ifdef _WIN32 |
| 36 #include <Windows.h> |
36 #include <Windows.h> |
| 37 #include <sysinfoapi.h> |
37 #include <sysinfoapi.h> |
| 38 static unsigned long system_page_size() { |
38 static unsigned long system_page_size(void) { |
| 39 static unsigned long ps = 0; |
39 static unsigned long ps = 0; |
| 40 if (ps == 0) { |
40 if (ps == 0) { |
| 41 SYSTEM_INFO sysinfo; |
41 SYSTEM_INFO sysinfo; |
| 42 GetSystemInfo(&sysinfo); |
42 GetSystemInfo(&sysinfo); |
| 43 ps = sysinfo.dwPageSize; |
43 ps = sysinfo.dwPageSize; |
| 44 } |
44 } |
| 45 return ps; |
45 return ps; |
| 46 } |
46 } |
| 47 #define SYSTEM_PAGE_SIZE system_page_size() |
|
| 48 #else |
47 #else |
| 49 #include <unistd.h> |
48 #include <unistd.h> |
| 50 #define SYSTEM_PAGE_SIZE sysconf(_SC_PAGESIZE) |
49 static unsigned long system_page_size(void) { |
| |
50 static unsigned long ps = 0; |
| |
51 if (ps == 0) { |
| |
52 long sc = sysconf(_SC_PAGESIZE); |
| |
53 if (sc < 0) { |
| |
54 // fallback for systems which do not report a value here |
| |
55 ps = 4096; // LCOV_EXCL_LINE |
| |
56 } else { |
| |
57 ps = (unsigned long) sc; |
| |
58 } |
| |
59 } |
| |
60 return ps; |
| |
61 } |
| 51 #endif |
62 #endif |
| 52 |
63 |
| 53 static int buffer_copy_on_write(CxBuffer* buffer) { |
64 static int buffer_copy_on_write(CxBuffer* buffer) { |
| 54 if (0 == (buffer->flags & CX_BUFFER_COPY_ON_WRITE)) return 0; |
65 if (0 == (buffer->flags & CX_BUFFER_COPY_ON_WRITE)) return 0; |
| 55 void *newspace = cxMalloc(buffer->allocator, buffer->capacity); |
66 void *newspace = cxMalloc(buffer->allocator, buffer->capacity); |
| 56 if (NULL == newspace) return -1; |
67 if (NULL == newspace) return -1; // LCOV_EXCL_LINE |
| 57 memcpy(newspace, buffer->space, buffer->size); |
68 memcpy(newspace, buffer->space, buffer->size); |
| 58 buffer->space = newspace; |
69 buffer->space = newspace; |
| 59 buffer->flags &= ~CX_BUFFER_COPY_ON_WRITE; |
70 buffer->flags &= ~CX_BUFFER_COPY_ON_WRITE; |
| 60 buffer->flags |= CX_BUFFER_FREE_CONTENTS; |
71 buffer->flags |= CX_BUFFER_FREE_CONTENTS; |
| 61 return 0; |
72 return 0; |
| 76 } |
87 } |
| 77 buffer->allocator = allocator; |
88 buffer->allocator = allocator; |
| 78 buffer->flags = flags; |
89 buffer->flags = flags; |
| 79 if (!space) { |
90 if (!space) { |
| 80 buffer->bytes = cxMalloc(allocator, capacity); |
91 buffer->bytes = cxMalloc(allocator, capacity); |
| 81 if (buffer->bytes == NULL) { |
92 if (buffer->bytes == NULL) return -1; // LCOV_EXCL_LINE |
| 82 return -1; // LCOV_EXCL_LINE |
|
| 83 } |
|
| 84 buffer->flags |= CX_BUFFER_FREE_CONTENTS; |
93 buffer->flags |= CX_BUFFER_FREE_CONTENTS; |
| 85 } else { |
94 } else { |
| 86 buffer->bytes = space; |
95 buffer->bytes = space; |
| 87 } |
96 } |
| 88 buffer->capacity = capacity; |
97 buffer->capacity = capacity; |
| 120 ) { |
129 ) { |
| 121 if (allocator == NULL) { |
130 if (allocator == NULL) { |
| 122 allocator = cxDefaultAllocator; |
131 allocator = cxDefaultAllocator; |
| 123 } |
132 } |
| 124 CxBuffer *buf = cxMalloc(allocator, sizeof(CxBuffer)); |
133 CxBuffer *buf = cxMalloc(allocator, sizeof(CxBuffer)); |
| 125 if (buf == NULL) return NULL; |
134 if (buf == NULL) return NULL; // LCOV_EXCL_LINE |
| 126 if (0 == cxBufferInit(buf, space, capacity, allocator, flags)) { |
135 if (0 == cxBufferInit(buf, space, capacity, allocator, flags)) { |
| 127 return buf; |
136 return buf; |
| 128 } else { |
137 } else { |
| 129 // LCOV_EXCL_START |
138 // LCOV_EXCL_START |
| 130 cxFree(allocator, buf); |
139 cxFree(allocator, buf); |
| 181 return 0; |
190 return 0; |
| 182 } |
191 } |
| 183 |
192 |
| 184 } |
193 } |
| 185 |
194 |
| |
195 size_t cxBufferPop(CxBuffer *buffer, size_t size, size_t nitems) { |
| |
196 size_t len; |
| |
197 if (cx_szmul(size, nitems, &len)) { |
| |
198 // LCOV_EXCL_START |
| |
199 errno = EOVERFLOW; |
| |
200 return 0; |
| |
201 // LCOV_EXCL_STOP |
| |
202 } |
| |
203 if (len == 0) return 0; |
| |
204 if (len > buffer->size) { |
| |
205 if (size == 1) { |
| |
206 // simple case: everything can be discarded |
| |
207 len = buffer->size; |
| |
208 } else { |
| |
209 // complicated case: misaligned bytes must stay |
| |
210 size_t misalignment = buffer->size % size; |
| |
211 len = buffer->size - misalignment; |
| |
212 } |
| |
213 } |
| |
214 buffer->size -= len; |
| |
215 |
| |
216 // adjust position, if required |
| |
217 if (buffer->pos > buffer->size) { |
| |
218 buffer->pos = buffer->size; |
| |
219 } |
| |
220 |
| |
221 return len / size; |
| |
222 } |
| |
223 |
| 186 void cxBufferClear(CxBuffer *buffer) { |
224 void cxBufferClear(CxBuffer *buffer) { |
| 187 if (0 == (buffer->flags & CX_BUFFER_COPY_ON_WRITE)) { |
225 if (0 == (buffer->flags & CX_BUFFER_COPY_ON_WRITE)) { |
| 188 memset(buffer->bytes, 0, buffer->size); |
226 memset(buffer->bytes, 0, buffer->size); |
| 189 } |
227 } |
| 190 buffer->size = 0; |
228 buffer->size = 0; |
| 198 |
236 |
| 199 bool cxBufferEof(const CxBuffer *buffer) { |
237 bool cxBufferEof(const CxBuffer *buffer) { |
| 200 return buffer->pos >= buffer->size; |
238 return buffer->pos >= buffer->size; |
| 201 } |
239 } |
| 202 |
240 |
| 203 int cxBufferMinimumCapacity( |
241 int cxBufferReserve(CxBuffer *buffer, size_t newcap) { |
| 204 CxBuffer *buffer, |
|
| 205 size_t newcap |
|
| 206 ) { |
|
| 207 if (newcap <= buffer->capacity) { |
242 if (newcap <= buffer->capacity) { |
| 208 return 0; |
243 return 0; |
| 209 } |
244 } |
| 210 |
|
| 211 unsigned long pagesize = SYSTEM_PAGE_SIZE; |
|
| 212 // if page size is larger than 64 KB - for some reason - truncate to 64 KB |
|
| 213 if (pagesize > 65536) pagesize = 65536; |
|
| 214 if (newcap < pagesize) { |
|
| 215 // when smaller as one page, map to the next power of two |
|
| 216 newcap--; |
|
| 217 newcap |= newcap >> 1; |
|
| 218 newcap |= newcap >> 2; |
|
| 219 newcap |= newcap >> 4; |
|
| 220 // last operation only needed for pages larger 4096 bytes |
|
| 221 // but if/else would be more expensive than just doing this |
|
| 222 newcap |= newcap >> 8; |
|
| 223 newcap++; |
|
| 224 } else { |
|
| 225 // otherwise, map to a multiple of the page size |
|
| 226 newcap -= newcap % pagesize; |
|
| 227 newcap += pagesize; |
|
| 228 // note: if newcap is already page aligned, |
|
| 229 // this gives a full additional page (which is good) |
|
| 230 } |
|
| 231 |
|
| 232 |
|
| 233 const int force_copy_flags = CX_BUFFER_COPY_ON_WRITE | CX_BUFFER_COPY_ON_EXTEND; |
245 const int force_copy_flags = CX_BUFFER_COPY_ON_WRITE | CX_BUFFER_COPY_ON_EXTEND; |
| 234 if (buffer->flags & force_copy_flags) { |
246 if (buffer->flags & force_copy_flags) { |
| 235 void *newspace = cxMalloc(buffer->allocator, newcap); |
247 void *newspace = cxMalloc(buffer->allocator, newcap); |
| 236 if (NULL == newspace) return -1; |
248 if (NULL == newspace) return -1; |
| 237 memcpy(newspace, buffer->space, buffer->size); |
249 memcpy(newspace, buffer->space, buffer->size); |
| 245 buffer->capacity = newcap; |
257 buffer->capacity = newcap; |
| 246 return 0; |
258 return 0; |
| 247 } else { |
259 } else { |
| 248 return -1; // LCOV_EXCL_LINE |
260 return -1; // LCOV_EXCL_LINE |
| 249 } |
261 } |
| |
262 } |
| |
263 |
| |
264 static size_t cx_buffer_calculate_minimum_capacity(size_t mincap) { |
| |
265 unsigned long pagesize = system_page_size(); |
| |
266 // if page size is larger than 64 KB - for some reason - truncate to 64 KB |
| |
267 if (pagesize > 65536) pagesize = 65536; |
| |
268 if (mincap < pagesize) { |
| |
269 // when smaller as one page, map to the next power of two |
| |
270 mincap--; |
| |
271 mincap |= mincap >> 1; |
| |
272 mincap |= mincap >> 2; |
| |
273 mincap |= mincap >> 4; |
| |
274 // last operation only needed for pages larger 4096 bytes |
| |
275 // but if/else would be more expensive than just doing this |
| |
276 mincap |= mincap >> 8; |
| |
277 mincap++; |
| |
278 } else { |
| |
279 // otherwise, map to a multiple of the page size |
| |
280 mincap -= mincap % pagesize; |
| |
281 mincap += pagesize; |
| |
282 // note: if newcap is already page aligned, |
| |
283 // this gives a full additional page (which is good) |
| |
284 } |
| |
285 return mincap; |
| |
286 } |
| |
287 |
| |
288 int cxBufferMinimumCapacity(CxBuffer *buffer, size_t newcap) { |
| |
289 if (newcap <= buffer->capacity) { |
| |
290 return 0; |
| |
291 } |
| |
292 newcap = cx_buffer_calculate_minimum_capacity(newcap); |
| |
293 return cxBufferReserve(buffer, newcap); |
| 250 } |
294 } |
| 251 |
295 |
| 252 void cxBufferShrink( |
296 void cxBufferShrink( |
| 253 CxBuffer *buffer, |
297 CxBuffer *buffer, |
| 254 size_t reserve |
298 size_t reserve |
| 349 |
393 |
| 350 size_t required = buffer->pos + len; |
394 size_t required = buffer->pos + len; |
| 351 bool perform_flush = false; |
395 bool perform_flush = false; |
| 352 if (required > buffer->capacity) { |
396 if (required > buffer->capacity) { |
| 353 if (buffer->flags & CX_BUFFER_AUTO_EXTEND) { |
397 if (buffer->flags & CX_BUFFER_AUTO_EXTEND) { |
| 354 if (buffer->flush != NULL && required > buffer->flush->threshold) { |
398 if (buffer->flush != NULL) { |
| 355 perform_flush = true; |
399 size_t newcap = cx_buffer_calculate_minimum_capacity(required); |
| |
400 if (newcap > buffer->flush->threshold) { |
| |
401 newcap = buffer->flush->threshold; |
| |
402 } |
| |
403 if (cxBufferReserve(buffer, newcap)) { |
| |
404 return total_flushed; // LCOV_EXCL_LINE |
| |
405 } |
| |
406 if (required > newcap) { |
| |
407 perform_flush = true; |
| |
408 } |
| 356 } else { |
409 } else { |
| 357 if (cxBufferMinimumCapacity(buffer, required)) { |
410 if (cxBufferMinimumCapacity(buffer, required)) { |
| 358 return total_flushed; // LCOV_EXCL_LINE |
411 return total_flushed; // LCOV_EXCL_LINE |
| 359 } |
412 } |
| 360 } |
413 } |