| 29 #include "cx/mempool.h" |
29 #include "cx/mempool.h" |
| 30 |
30 |
| 31 #include <string.h> |
31 #include <string.h> |
| 32 #include <errno.h> |
32 #include <errno.h> |
| 33 |
33 |
| 34 struct cx_mempool_memory_s { |
34 static int cx_mempool_ensure_capacity( |
| 35 /** The destructor. */ |
35 struct cx_mempool_s *pool, |
| 36 cx_destructor_func destructor; |
36 size_t needed_capacity |
| 37 /** The actual memory. */ |
37 ) { |
| 38 char c[]; |
38 if (needed_capacity <= pool->capacity) return 0; |
| 39 }; |
39 size_t newcap = pool->capacity >= 1000 ? |
| 40 |
40 pool->capacity + 1000 : pool->capacity * 2; |
| 41 static void *cx_mempool_malloc( |
41 size_t newmsize; |
| |
42 // LCOV_EXCL_START |
| |
43 if (pool->capacity > newcap |
| |
44 || cx_szmul(newcap, sizeof(void*), &newmsize)) { |
| |
45 errno = EOVERFLOW; |
| |
46 return 1; |
| |
47 } // LCOV_EXCL_STOP |
| |
48 void **newdata = cxRealloc(pool->base_allocator, pool->data, newmsize); |
| |
49 if (newdata == NULL) return 1; |
| |
50 pool->data = newdata; |
| |
51 pool->capacity = newcap; |
| |
52 return 0; |
| |
53 } |
| |
54 |
| |
55 static int cx_mempool_ensure_registered_capacity( |
| |
56 struct cx_mempool_s *pool, |
| |
57 size_t needed_capacity |
| |
58 ) { |
| |
59 if (needed_capacity <= pool->registered_capacity) return 0; |
| |
60 // we do not expect so many registrations |
| |
61 size_t newcap = pool->registered_capacity + 8; |
| |
62 size_t newmsize; |
| |
63 // LCOV_EXCL_START |
| |
64 if (pool->registered_capacity > newcap || cx_szmul(newcap, |
| |
65 sizeof(struct cx_mempool_foreign_memory_s), &newmsize)) { |
| |
66 errno = EOVERFLOW; |
| |
67 return 1; |
| |
68 } // LCOV_EXCL_STOP |
| |
69 void *newdata = cxRealloc(pool->base_allocator, pool->registered, newmsize); |
| |
70 if (newdata == NULL) return 1; |
| |
71 pool->registered = newdata; |
| |
72 pool->registered_capacity = newcap; |
| |
73 return 0; |
| |
74 } |
| |
75 |
| |
76 static void *cx_mempool_malloc_simple( |
| 42 void *p, |
77 void *p, |
| 43 size_t n |
78 size_t n |
| 44 ) { |
79 ) { |
| 45 struct cx_mempool_s *pool = p; |
80 struct cx_mempool_s *pool = p; |
| 46 |
81 |
| 47 if (pool->size >= pool->capacity) { |
82 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
| 48 size_t newcap = pool->capacity - (pool->capacity % 16) + 16; |
83 return NULL; // LCOV_EXCL_LINE |
| 49 size_t newmsize; |
84 } |
| 50 if (pool->capacity > newcap || cx_szmul(newcap, |
85 |
| 51 sizeof(struct cx_mempool_memory_s*), &newmsize)) { |
86 struct cx_mempool_memory_s *mem = |
| 52 errno = EOVERFLOW; |
87 cxMalloc(pool->base_allocator, sizeof(struct cx_mempool_memory_s) + n); |
| 53 return NULL; |
|
| 54 } |
|
| 55 struct cx_mempool_memory_s **newdata = realloc(pool->data, newmsize); |
|
| 56 if (newdata == NULL) return NULL; |
|
| 57 pool->data = newdata; |
|
| 58 pool->capacity = newcap; |
|
| 59 } |
|
| 60 |
|
| 61 struct cx_mempool_memory_s *mem = malloc(sizeof(cx_destructor_func) + n); |
|
| 62 if (mem == NULL) return NULL; |
88 if (mem == NULL) return NULL; |
| 63 |
89 mem->destructor = NULL; |
| 64 mem->destructor = pool->auto_destr; |
|
| 65 pool->data[pool->size] = mem; |
90 pool->data[pool->size] = mem; |
| 66 pool->size++; |
91 pool->size++; |
| 67 |
92 |
| 68 return mem->c; |
93 return mem->c; |
| 69 } |
94 } |
| 70 |
95 |
| 71 static void *cx_mempool_calloc( |
96 static void *cx_mempool_calloc_simple( |
| 72 void *p, |
97 void *p, |
| 73 size_t nelem, |
98 size_t nelem, |
| 74 size_t elsize |
99 size_t elsize |
| 75 ) { |
100 ) { |
| 76 size_t msz; |
101 size_t msz; |
| 77 if (cx_szmul(nelem, elsize, &msz)) { |
102 if (cx_szmul(nelem, elsize, &msz)) { |
| 78 errno = EOVERFLOW; |
103 errno = EOVERFLOW; |
| 79 return NULL; |
104 return NULL; |
| 80 } |
105 } |
| 81 void *ptr = cx_mempool_malloc(p, msz); |
106 void *ptr = cx_mempool_malloc_simple(p, msz); |
| 82 if (ptr == NULL) return NULL; |
107 if (ptr == NULL) return NULL; |
| 83 memset(ptr, 0, nelem * elsize); |
108 memset(ptr, 0, nelem * elsize); |
| 84 return ptr; |
109 return ptr; |
| 85 } |
110 } |
| 86 |
111 |
| 87 static void *cx_mempool_realloc( |
112 static void cx_mempool_free_simple( |
| |
113 void *p, |
| |
114 void *ptr |
| |
115 ) { |
| |
116 if (!ptr) return; |
| |
117 struct cx_mempool_s *pool = p; |
| |
118 |
| |
119 cx_destructor_func destr = pool->destr; |
| |
120 cx_destructor_func2 destr2 = pool->destr2; |
| |
121 |
| |
122 struct cx_mempool_memory_s *mem = |
| |
123 (void*) ((char *) ptr - sizeof(struct cx_mempool_memory_s)); |
| |
124 |
| |
125 for (size_t i = 0; i < pool->size; i++) { |
| |
126 if (mem == pool->data[i]) { |
| |
127 if (mem->destructor) { |
| |
128 mem->destructor(mem->c); |
| |
129 } |
| |
130 if (destr != NULL) { |
| |
131 destr(mem->c); |
| |
132 } |
| |
133 if (destr2 != NULL) { |
| |
134 destr2(pool->destr2_data, mem->c); |
| |
135 } |
| |
136 cxFree(pool->base_allocator, mem); |
| |
137 size_t last_index = pool->size - 1; |
| |
138 if (i != last_index) { |
| |
139 pool->data[i] = pool->data[last_index]; |
| |
140 pool->data[last_index] = NULL; |
| |
141 } |
| |
142 pool->size--; |
| |
143 return; |
| |
144 } |
| |
145 } |
| |
146 abort(); // LCOV_EXCL_LINE |
| |
147 } |
| |
148 |
| |
149 static void *cx_mempool_realloc_simple( |
| 88 void *p, |
150 void *p, |
| 89 void *ptr, |
151 void *ptr, |
| 90 size_t n |
152 size_t n |
| 91 ) { |
153 ) { |
| 92 struct cx_mempool_s *pool = p; |
154 if (ptr == NULL) { |
| 93 |
155 return cx_mempool_malloc_simple(p, n); |
| 94 struct cx_mempool_memory_s *mem, *newm; |
156 } |
| 95 mem = (struct cx_mempool_memory_s*)(((char *) ptr) - sizeof(cx_destructor_func)); |
157 if (n == 0) { |
| 96 newm = realloc(mem, n + sizeof(cx_destructor_func)); |
158 cx_mempool_free_simple(p, ptr); |
| |
159 return NULL; |
| |
160 } |
| |
161 struct cx_mempool_s *pool = p; |
| |
162 |
| |
163 const unsigned overhead = sizeof(struct cx_mempool_memory_s); |
| |
164 struct cx_mempool_memory_s *mem = |
| |
165 (void *) (((char *) ptr) - overhead); |
| |
166 struct cx_mempool_memory_s *newm = |
| |
167 cxRealloc(pool->base_allocator, mem, n + overhead); |
| 97 |
168 |
| 98 if (newm == NULL) return NULL; |
169 if (newm == NULL) return NULL; |
| 99 if (mem != newm) { |
170 if (mem != newm) { |
| 100 for (size_t i = 0; i < pool->size; i++) { |
171 for (size_t i = 0; i < pool->size; i++) { |
| 101 if (pool->data[i] == mem) { |
172 if (pool->data[i] == mem) { |
| 102 pool->data[i] = newm; |
173 pool->data[i] = newm; |
| 103 return ((char*)newm) + sizeof(cx_destructor_func); |
174 return ((char*)newm) + overhead; |
| 104 } |
175 } |
| 105 } |
176 } |
| 106 abort(); // LCOV_EXCL_LINE |
177 abort(); // LCOV_EXCL_LINE |
| 107 } else { |
178 } else { |
| 108 return ptr; |
179 // unfortunately glibc() realloc seems to always move |
| 109 } |
180 return ptr; // LCOV_EXCL_LINE |
| 110 } |
181 } |
| 111 |
182 } |
| 112 static void cx_mempool_free( |
183 |
| |
184 static void cx_mempool_free_all_simple(const struct cx_mempool_s *pool) { |
| |
185 cx_destructor_func destr = pool->destr; |
| |
186 cx_destructor_func2 destr2 = pool->destr2; |
| |
187 for (size_t i = 0; i < pool->size; i++) { |
| |
188 struct cx_mempool_memory_s *mem = pool->data[i]; |
| |
189 if (mem->destructor) { |
| |
190 mem->destructor(mem->c); |
| |
191 } |
| |
192 if (destr != NULL) { |
| |
193 destr(mem->c); |
| |
194 } |
| |
195 if (destr2 != NULL) { |
| |
196 destr2(pool->destr2_data, mem->c); |
| |
197 } |
| |
198 cxFree(pool->base_allocator, mem); |
| |
199 } |
| |
200 } |
| |
201 |
| |
202 static cx_allocator_class cx_mempool_simple_allocator_class = { |
| |
203 cx_mempool_malloc_simple, |
| |
204 cx_mempool_realloc_simple, |
| |
205 cx_mempool_calloc_simple, |
| |
206 cx_mempool_free_simple |
| |
207 }; |
| |
208 |
| |
209 static void *cx_mempool_malloc_advanced( |
| |
210 void *p, |
| |
211 size_t n |
| |
212 ) { |
| |
213 struct cx_mempool_s *pool = p; |
| |
214 |
| |
215 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
| |
216 return NULL; // LCOV_EXCL_LINE |
| |
217 } |
| |
218 |
| |
219 struct cx_mempool_memory2_s *mem = |
| |
220 cxMalloc(pool->base_allocator, sizeof(struct cx_mempool_memory2_s) + n); |
| |
221 if (mem == NULL) return NULL; |
| |
222 mem->destructor = NULL; |
| |
223 mem->data = NULL; |
| |
224 pool->data[pool->size] = mem; |
| |
225 pool->size++; |
| |
226 |
| |
227 return mem->c; |
| |
228 } |
| |
229 |
| |
230 static void *cx_mempool_calloc_advanced( |
| |
231 void *p, |
| |
232 size_t nelem, |
| |
233 size_t elsize |
| |
234 ) { |
| |
235 size_t msz; |
| |
236 if (cx_szmul(nelem, elsize, &msz)) { |
| |
237 errno = EOVERFLOW; |
| |
238 return NULL; |
| |
239 } |
| |
240 void *ptr = cx_mempool_malloc_advanced(p, msz); |
| |
241 if (ptr == NULL) return NULL; |
| |
242 memset(ptr, 0, nelem * elsize); |
| |
243 return ptr; |
| |
244 } |
| |
245 |
| |
246 static void cx_mempool_free_advanced( |
| 113 void *p, |
247 void *p, |
| 114 void *ptr |
248 void *ptr |
| 115 ) { |
249 ) { |
| 116 if (!ptr) return; |
250 if (!ptr) return; |
| 117 struct cx_mempool_s *pool = p; |
251 struct cx_mempool_s *pool = p; |
| 118 |
252 |
| 119 struct cx_mempool_memory_s *mem = (struct cx_mempool_memory_s *) |
253 cx_destructor_func destr = pool->destr; |
| 120 ((char *) ptr - sizeof(cx_destructor_func)); |
254 cx_destructor_func2 destr2 = pool->destr2; |
| |
255 |
| |
256 struct cx_mempool_memory2_s *mem = |
| |
257 (void*) ((char *) ptr - sizeof(struct cx_mempool_memory2_s)); |
| 121 |
258 |
| 122 for (size_t i = 0; i < pool->size; i++) { |
259 for (size_t i = 0; i < pool->size; i++) { |
| 123 if (mem == pool->data[i]) { |
260 if (mem == pool->data[i]) { |
| 124 if (mem->destructor) { |
261 if (mem->destructor) { |
| 125 mem->destructor(mem->c); |
262 mem->destructor(mem->data, mem->c); |
| 126 } |
263 } |
| 127 free(mem); |
264 if (destr != NULL) { |
| |
265 destr(mem->c); |
| |
266 } |
| |
267 if (destr2 != NULL) { |
| |
268 destr2(pool->destr2_data, mem->c); |
| |
269 } |
| |
270 cxFree(pool->base_allocator, mem); |
| 128 size_t last_index = pool->size - 1; |
271 size_t last_index = pool->size - 1; |
| 129 if (i != last_index) { |
272 if (i != last_index) { |
| 130 pool->data[i] = pool->data[last_index]; |
273 pool->data[i] = pool->data[last_index]; |
| 131 pool->data[last_index] = NULL; |
274 pool->data[last_index] = NULL; |
| 132 } |
275 } |
| 135 } |
278 } |
| 136 } |
279 } |
| 137 abort(); // LCOV_EXCL_LINE |
280 abort(); // LCOV_EXCL_LINE |
| 138 } |
281 } |
| 139 |
282 |
| |
283 static void *cx_mempool_realloc_advanced( |
| |
284 void *p, |
| |
285 void *ptr, |
| |
286 size_t n |
| |
287 ) { |
| |
288 if (ptr == NULL) { |
| |
289 return cx_mempool_malloc_advanced(p, n); |
| |
290 } |
| |
291 if (n == 0) { |
| |
292 cx_mempool_free_advanced(p, ptr); |
| |
293 return NULL; |
| |
294 } |
| |
295 struct cx_mempool_s *pool = p; |
| |
296 |
| |
297 const unsigned overhead = sizeof(struct cx_mempool_memory2_s); |
| |
298 struct cx_mempool_memory2_s *mem = |
| |
299 (void *) (((char *) ptr) - overhead); |
| |
300 struct cx_mempool_memory2_s *newm = |
| |
301 cxRealloc(pool->base_allocator, mem, n + overhead); |
| |
302 |
| |
303 if (newm == NULL) return NULL; |
| |
304 if (mem != newm) { |
| |
305 for (size_t i = 0; i < pool->size; i++) { |
| |
306 if (pool->data[i] == mem) { |
| |
307 pool->data[i] = newm; |
| |
308 return ((char*)newm) + overhead; |
| |
309 } |
| |
310 } |
| |
311 abort(); // LCOV_EXCL_LINE |
| |
312 } else { |
| |
313 // unfortunately glibc() realloc seems to always move |
| |
314 return ptr; // LCOV_EXCL_LINE |
| |
315 } |
| |
316 } |
| |
317 |
| |
318 static void cx_mempool_free_all_advanced(const struct cx_mempool_s *pool) { |
| |
319 cx_destructor_func destr = pool->destr; |
| |
320 cx_destructor_func2 destr2 = pool->destr2; |
| |
321 for (size_t i = 0; i < pool->size; i++) { |
| |
322 struct cx_mempool_memory2_s *mem = pool->data[i]; |
| |
323 if (mem->destructor) { |
| |
324 mem->destructor(mem->data, mem->c); |
| |
325 } |
| |
326 if (destr != NULL) { |
| |
327 destr(mem->c); |
| |
328 } |
| |
329 if (destr2 != NULL) { |
| |
330 destr2(pool->destr2_data, mem->c); |
| |
331 } |
| |
332 cxFree(pool->base_allocator, mem); |
| |
333 } |
| |
334 } |
| |
335 |
| |
336 static cx_allocator_class cx_mempool_advanced_allocator_class = { |
| |
337 cx_mempool_malloc_advanced, |
| |
338 cx_mempool_realloc_advanced, |
| |
339 cx_mempool_calloc_advanced, |
| |
340 cx_mempool_free_advanced |
| |
341 }; |
| |
342 |
| |
343 |
| |
344 static void *cx_mempool_malloc_pure( |
| |
345 void *p, |
| |
346 size_t n |
| |
347 ) { |
| |
348 struct cx_mempool_s *pool = p; |
| |
349 |
| |
350 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
| |
351 return NULL; // LCOV_EXCL_LINE |
| |
352 } |
| |
353 |
| |
354 void *mem = cxMalloc(pool->base_allocator, n); |
| |
355 if (mem == NULL) return NULL; |
| |
356 pool->data[pool->size] = mem; |
| |
357 pool->size++; |
| |
358 |
| |
359 return mem; |
| |
360 } |
| |
361 |
| |
362 static void *cx_mempool_calloc_pure( |
| |
363 void *p, |
| |
364 size_t nelem, |
| |
365 size_t elsize |
| |
366 ) { |
| |
367 size_t msz; |
| |
368 if (cx_szmul(nelem, elsize, &msz)) { |
| |
369 errno = EOVERFLOW; |
| |
370 return NULL; |
| |
371 } |
| |
372 void *ptr = cx_mempool_malloc_pure(p, msz); |
| |
373 if (ptr == NULL) return NULL; |
| |
374 memset(ptr, 0, nelem * elsize); |
| |
375 return ptr; |
| |
376 } |
| |
377 |
| |
378 static void cx_mempool_free_pure( |
| |
379 void *p, |
| |
380 void *ptr |
| |
381 ) { |
| |
382 if (!ptr) return; |
| |
383 struct cx_mempool_s *pool = p; |
| |
384 |
| |
385 cx_destructor_func destr = pool->destr; |
| |
386 cx_destructor_func2 destr2 = pool->destr2; |
| |
387 |
| |
388 for (size_t i = 0; i < pool->size; i++) { |
| |
389 if (ptr == pool->data[i]) { |
| |
390 if (destr != NULL) { |
| |
391 destr(ptr); |
| |
392 } |
| |
393 if (destr2 != NULL) { |
| |
394 destr2(pool->destr2_data, ptr); |
| |
395 } |
| |
396 cxFree(pool->base_allocator, ptr); |
| |
397 size_t last_index = pool->size - 1; |
| |
398 if (i != last_index) { |
| |
399 pool->data[i] = pool->data[last_index]; |
| |
400 pool->data[last_index] = NULL; |
| |
401 } |
| |
402 pool->size--; |
| |
403 return; |
| |
404 } |
| |
405 } |
| |
406 abort(); // LCOV_EXCL_LINE |
| |
407 } |
| |
408 |
| |
409 static void *cx_mempool_realloc_pure( |
| |
410 void *p, |
| |
411 void *ptr, |
| |
412 size_t n |
| |
413 ) { |
| |
414 if (ptr == NULL) { |
| |
415 return cx_mempool_malloc_pure(p, n); |
| |
416 } |
| |
417 if (n == 0) { |
| |
418 cx_mempool_free_pure(p, ptr); |
| |
419 return NULL; |
| |
420 } |
| |
421 struct cx_mempool_s *pool = p; |
| |
422 void *newm = cxRealloc(pool->base_allocator, ptr, n); |
| |
423 if (newm == NULL) return NULL; |
| |
424 if (ptr != newm) { |
| |
425 for (size_t i = 0; i < pool->size; i++) { |
| |
426 if (pool->data[i] == ptr) { |
| |
427 pool->data[i] = newm; |
| |
428 return newm; |
| |
429 } |
| |
430 } |
| |
431 abort(); // LCOV_EXCL_LINE |
| |
432 } else { |
| |
433 // unfortunately glibc() realloc seems to always move |
| |
434 return ptr; // LCOV_EXCL_LINE |
| |
435 } |
| |
436 } |
| |
437 |
| |
438 static void cx_mempool_free_all_pure(const struct cx_mempool_s *pool) { |
| |
439 cx_destructor_func destr = pool->destr; |
| |
440 cx_destructor_func2 destr2 = pool->destr2; |
| |
441 for (size_t i = 0; i < pool->size; i++) { |
| |
442 void *mem = pool->data[i]; |
| |
443 if (destr != NULL) { |
| |
444 destr(mem); |
| |
445 } |
| |
446 if (destr2 != NULL) { |
| |
447 destr2(pool->destr2_data, mem); |
| |
448 } |
| |
449 cxFree(pool->base_allocator, mem); |
| |
450 } |
| |
451 } |
| |
452 |
| |
453 static cx_allocator_class cx_mempool_pure_allocator_class = { |
| |
454 cx_mempool_malloc_pure, |
| |
455 cx_mempool_realloc_pure, |
| |
456 cx_mempool_calloc_pure, |
| |
457 cx_mempool_free_pure |
| |
458 }; |
| |
459 |
| |
460 static void cx_mempool_free_foreign(const struct cx_mempool_s *pool) { |
| |
461 for (size_t i = 0; i < pool->registered_size; i++) { |
| |
462 struct cx_mempool_foreign_memory_s info = pool->registered[i]; |
| |
463 if (info.destr2_data == NULL) { |
| |
464 if (info.destr) { |
| |
465 info.destr(info.mem); |
| |
466 } |
| |
467 } else { |
| |
468 info.destr2(info.destr2_data, info.mem); |
| |
469 } |
| |
470 } |
| |
471 } |
| |
472 |
| 140 void cxMempoolFree(CxMempool *pool) { |
473 void cxMempoolFree(CxMempool *pool) { |
| 141 if (pool == NULL) return; |
474 if (pool == NULL) return; |
| 142 struct cx_mempool_memory_s *mem; |
475 if (pool->allocator->cl == &cx_mempool_simple_allocator_class) { |
| 143 for (size_t i = 0; i < pool->size; i++) { |
476 cx_mempool_free_all_simple(pool); |
| 144 mem = pool->data[i]; |
477 } else if (pool->allocator->cl == &cx_mempool_advanced_allocator_class) { |
| 145 if (mem->destructor) { |
478 cx_mempool_free_all_advanced(pool); |
| 146 mem->destructor(mem->c); |
479 } else { |
| 147 } |
480 cx_mempool_free_all_pure(pool); |
| 148 free(mem); |
481 } |
| 149 } |
482 cx_mempool_free_foreign(pool); |
| 150 free(pool->data); |
483 cxFree(pool->base_allocator, pool->data); |
| 151 free((void*) pool->allocator); |
484 cxFree(pool->base_allocator, pool->registered); |
| 152 free(pool); |
485 cxFree(pool->base_allocator, (void*) pool->allocator); |
| |
486 cxFree(pool->base_allocator, pool); |
| 153 } |
487 } |
| 154 |
488 |
| 155 void cxMempoolSetDestructor( |
489 void cxMempoolSetDestructor( |
| 156 void *ptr, |
490 void *ptr, |
| 157 cx_destructor_func func |
491 cx_destructor_func func |
| 158 ) { |
492 ) { |
| 159 *(cx_destructor_func *) ((char *) ptr - sizeof(cx_destructor_func)) = func; |
493 *(cx_destructor_func *) ((char *) ptr - sizeof(cx_destructor_func)) = func; |
| 160 } |
494 } |
| 161 |
495 |
| |
496 void cxMempoolSetDestructor2( |
| |
497 void *ptr, |
| |
498 cx_destructor_func2 func, |
| |
499 void *data |
| |
500 ) { |
| |
501 struct cx_mempool_memory2_s *info = |
| |
502 (void*)((char *) ptr - sizeof(struct cx_mempool_memory2_s)); |
| |
503 info->destructor = func; |
| |
504 info->data = data; |
| |
505 } |
| |
506 |
| 162 void cxMempoolRemoveDestructor(void *ptr) { |
507 void cxMempoolRemoveDestructor(void *ptr) { |
| 163 *(cx_destructor_func *) ((char *) ptr - sizeof(cx_destructor_func)) = NULL; |
508 *(cx_destructor_func *) ((char *) ptr - sizeof(cx_destructor_func)) = NULL; |
| 164 } |
509 } |
| 165 |
510 |
| 166 struct cx_mempool_foreign_mem_s { |
511 void cxMempoolRemoveDestructor2(void *ptr) { |
| 167 cx_destructor_func destr; |
512 struct cx_mempool_memory2_s *info = |
| 168 void* mem; |
513 (void*)((char *) ptr - sizeof(struct cx_mempool_memory2_s)); |
| 169 }; |
514 info->destructor = NULL; |
| 170 |
515 info->data = NULL; |
| 171 static void cx_mempool_destr_foreign_mem(void* ptr) { |
|
| 172 struct cx_mempool_foreign_mem_s *fm = ptr; |
|
| 173 fm->destr(fm->mem); |
|
| 174 } |
516 } |
| 175 |
517 |
| 176 int cxMempoolRegister( |
518 int cxMempoolRegister( |
| 177 CxMempool *pool, |
519 CxMempool *pool, |
| 178 void *memory, |
520 void *memory, |
| 179 cx_destructor_func destr |
521 cx_destructor_func destr |
| 180 ) { |
522 ) { |
| 181 struct cx_mempool_foreign_mem_s *fm = cx_mempool_malloc( |
523 if (cx_mempool_ensure_registered_capacity(pool, pool->registered_size + 1)) { |
| 182 pool, |
524 return 1; // LCOV_EXCL_LINE |
| 183 sizeof(struct cx_mempool_foreign_mem_s) |
525 } |
| 184 ); |
526 |
| 185 if (fm == NULL) return 1; |
527 pool->registered[pool->registered_size++] = |
| 186 |
528 (struct cx_mempool_foreign_memory_s) { |
| 187 fm->mem = memory; |
529 .mem = memory, |
| 188 fm->destr = destr; |
530 .destr = destr, |
| 189 *(cx_destructor_func *) ((char *) fm - sizeof(cx_destructor_func)) = cx_mempool_destr_foreign_mem; |
531 .destr2_data = NULL |
| |
532 }; |
| 190 |
533 |
| 191 return 0; |
534 return 0; |
| 192 } |
535 } |
| 193 |
536 |
| 194 static cx_allocator_class cx_mempool_allocator_class = { |
537 int cxMempoolRegister2( |
| 195 cx_mempool_malloc, |
538 CxMempool *pool, |
| 196 cx_mempool_realloc, |
539 void *memory, |
| 197 cx_mempool_calloc, |
540 cx_destructor_func2 destr, |
| 198 cx_mempool_free |
541 void *data |
| 199 }; |
542 ) { |
| |
543 if (cx_mempool_ensure_registered_capacity(pool, pool->registered_size + 1)) { |
| |
544 return 1; // LCOV_EXCL_LINE |
| |
545 } |
| |
546 |
| |
547 pool->registered[pool->registered_size++] = |
| |
548 (struct cx_mempool_foreign_memory_s) { |
| |
549 .mem = memory, |
| |
550 .destr2 = destr, |
| |
551 .destr2_data = data |
| |
552 }; |
| |
553 |
| |
554 return 0; |
| |
555 } |
| 200 |
556 |
| 201 CxMempool *cxMempoolCreate( |
557 CxMempool *cxMempoolCreate( |
| 202 size_t capacity, |
558 size_t capacity, |
| 203 cx_destructor_func destr |
559 enum cx_mempool_type type |
| 204 ) { |
560 ) { |
| |
561 if (capacity == 0) capacity = 16; |
| 205 size_t poolsize; |
562 size_t poolsize; |
| 206 if (cx_szmul(capacity, sizeof(struct cx_mempool_memory_s*), &poolsize)) { |
563 if (cx_szmul(capacity, sizeof(void*), &poolsize)) { |
| |
564 // LCOV_EXCL_START |
| 207 errno = EOVERFLOW; |
565 errno = EOVERFLOW; |
| 208 return NULL; |
566 return NULL; |
| 209 } |
567 } // LCOV_EXCL_STOP |
| 210 |
568 |
| 211 struct cx_mempool_s *pool = |
569 CxAllocator *provided_allocator = cxMallocDefault(sizeof(CxAllocator)); |
| 212 malloc(sizeof(struct cx_mempool_s)); |
|
| 213 if (pool == NULL) return NULL; |
|
| 214 |
|
| 215 CxAllocator *provided_allocator = malloc(sizeof(CxAllocator)); |
|
| 216 if (provided_allocator == NULL) { // LCOV_EXCL_START |
570 if (provided_allocator == NULL) { // LCOV_EXCL_START |
| 217 free(pool); |
|
| 218 return NULL; |
571 return NULL; |
| 219 } // LCOV_EXCL_STOP |
572 } // LCOV_EXCL_STOP |
| 220 provided_allocator->cl = &cx_mempool_allocator_class; |
573 |
| |
574 CxMempool *pool = cxCallocDefault(1, sizeof(CxMempool)); |
| |
575 if (pool == NULL) { // LCOV_EXCL_START |
| |
576 cxFreeDefault(provided_allocator); |
| |
577 return NULL; |
| |
578 } // LCOV_EXCL_STOP |
| |
579 |
| 221 provided_allocator->data = pool; |
580 provided_allocator->data = pool; |
| 222 |
581 *((const CxAllocator**)&pool->base_allocator) = cxDefaultAllocator; |
| 223 pool->allocator = provided_allocator; |
582 pool->allocator = provided_allocator; |
| 224 |
583 if (type == CX_MEMPOOL_TYPE_SIMPLE) { |
| 225 pool->data = malloc(poolsize); |
584 provided_allocator->cl = &cx_mempool_simple_allocator_class; |
| |
585 } else if (type == CX_MEMPOOL_TYPE_ADVANCED) { |
| |
586 provided_allocator->cl = &cx_mempool_advanced_allocator_class; |
| |
587 } else { |
| |
588 provided_allocator->cl = &cx_mempool_pure_allocator_class; |
| |
589 } |
| |
590 |
| |
591 pool->data = cxMallocDefault(poolsize); |
| 226 if (pool->data == NULL) { // LCOV_EXCL_START |
592 if (pool->data == NULL) { // LCOV_EXCL_START |
| 227 free(provided_allocator); |
593 cxFreeDefault(provided_allocator); |
| 228 free(pool); |
594 cxFreeDefault(pool); |
| 229 return NULL; |
595 return NULL; |
| 230 } // LCOV_EXCL_STOP |
596 } // LCOV_EXCL_STOP |
| 231 |
597 |
| 232 pool->size = 0; |
598 pool->size = 0; |
| 233 pool->capacity = capacity; |
599 pool->capacity = capacity; |
| 234 pool->auto_destr = destr; |
|
| 235 |
600 |
| 236 return pool; |
601 return pool; |
| 237 } |
602 } |
| |
603 |
| |
604 void cxMempoolGlobalDestructor(CxMempool *pool, cx_destructor_func fnc) { |
| |
605 pool->destr = fnc; |
| |
606 } |
| |
607 |
| |
608 void cxMempoolGlobalDestructor2(CxMempool *pool, cx_destructor_func2 fnc, void *data) { |
| |
609 pool->destr2 = fnc; |
| |
610 pool->destr2_data = data; |
| |
611 } |
| |
612 |
| |
613 static void cx_mempool_free_transferred_allocator(void *base_al, void *al) { |
| |
614 cxFree(base_al, al); |
| |
615 } |
| |
616 |
| |
617 int cxMempoolTransfer( |
| |
618 CxMempool *source, |
| |
619 CxMempool *dest |
| |
620 ) { |
| |
621 // safety checks |
| |
622 if (source == dest) return 1; |
| |
623 if (source->allocator->cl != dest->allocator->cl) return 1; |
| |
624 if (source->base_allocator->cl != dest->base_allocator->cl) return 1; |
| |
625 |
| |
626 // ensure enough capacity in the destination pool |
| |
627 if (cx_mempool_ensure_capacity(dest, dest->size + source->size)) { |
| |
628 return 1; // LCOV_EXCL_LINE |
| |
629 } |
| |
630 if (cx_mempool_ensure_registered_capacity(dest, |
| |
631 dest->registered_size + source->registered_size)) { |
| |
632 return 1; // LCOV_EXCL_LINE |
| |
633 } |
| |
634 |
| |
635 // allocate a replacement allocator for the source pool |
| |
636 CxAllocator *new_source_allocator = |
| |
637 cxMalloc(source->base_allocator, sizeof(CxAllocator)); |
| |
638 if (new_source_allocator == NULL) { // LCOV_EXCL_START |
| |
639 return 1; |
| |
640 } // LCOV_EXCL_STOP |
| |
641 new_source_allocator->cl = source->allocator->cl; |
| |
642 new_source_allocator->data = source; |
| |
643 |
| |
644 // transfer all the data |
| |
645 if (source->size > 0) { |
| |
646 memcpy(&dest->data[dest->size], source->data, |
| |
647 sizeof(void*)*source->size); |
| |
648 dest->size += source->size; |
| |
649 } |
| |
650 |
| |
651 // transfer all registered memory |
| |
652 if (source->registered_size > 0) { |
| |
653 memcpy(&dest->registered[dest->registered_size], source->registered, |
| |
654 sizeof(struct cx_mempool_foreign_memory_s) |
| |
655 * source->registered_size); |
| |
656 dest->registered_size += source->registered_size; |
| |
657 } |
| |
658 |
| |
659 // register the old allocator with the new pool |
| |
660 // we have to remove const-ness for this, but that's okay here |
| |
661 // also register the base allocator, s.t. the pool knows how to free it |
| |
662 CxAllocator *transferred_allocator = (CxAllocator*) source->allocator; |
| |
663 transferred_allocator->data = dest; |
| |
664 cxMempoolRegister2(dest, transferred_allocator, |
| |
665 cx_mempool_free_transferred_allocator, (void*)source->base_allocator); |
| |
666 |
| |
667 // prepare the source pool for re-use |
| |
668 source->allocator = new_source_allocator; |
| |
669 memset(source->data, 0, source->size * sizeof(void*)); |
| |
670 memset(source->registered, 0, |
| |
671 source->registered_size * sizeof(struct cx_mempool_foreign_memory_s)); |
| |
672 source->size = 0; |
| |
673 source->registered_size = 0; |
| |
674 |
| |
675 return 0; |
| |
676 } |
| |
677 |
| |
678 int cxMempoolTransferObject( |
| |
679 CxMempool *source, |
| |
680 CxMempool *dest, |
| |
681 const void *obj |
| |
682 ) { |
| |
683 // safety checks |
| |
684 if (source == dest) return 1; |
| |
685 if (source->allocator->cl != dest->allocator->cl) return 1; |
| |
686 if (source->base_allocator->cl != dest->base_allocator->cl) return 1; |
| |
687 |
| |
688 // search for the object |
| |
689 for (size_t i = 0; i < source->size; i++) { |
| |
690 struct cx_mempool_memory_s *mem = source->data[i]; |
| |
691 if (mem->c == obj) { |
| |
692 // first, make sure that the dest pool can take the object |
| |
693 if (cx_mempool_ensure_capacity(dest, dest->size + 1)) { |
| |
694 return 1; // LCOV_EXCL_LINE |
| |
695 } |
| |
696 // remove from the source pool |
| |
697 size_t last_index = source->size - 1; |
| |
698 if (i != last_index) { |
| |
699 source->data[i] = source->data[last_index]; |
| |
700 source->data[last_index] = NULL; |
| |
701 } |
| |
702 source->size--; |
| |
703 // add to the target pool |
| |
704 dest->data[dest->size++] = mem; |
| |
705 return 0; |
| |
706 } |
| |
707 } |
| |
708 // search in the registered objects |
| |
709 for (size_t i = 0; i < source->registered_size; i++) { |
| |
710 struct cx_mempool_foreign_memory_s *mem = &source->registered[i]; |
| |
711 if (mem->mem == obj) { |
| |
712 // first, make sure that the dest pool can take the object |
| |
713 if (cx_mempool_ensure_registered_capacity(dest, |
| |
714 dest->registered_size + 1)) { |
| |
715 return 1; // LCOV_EXCL_LINE |
| |
716 } |
| |
717 dest->registered[dest->registered_size++] = *mem; |
| |
718 // remove from the source pool |
| |
719 size_t last_index = source->registered_size - 1; |
| |
720 if (i != last_index) { |
| |
721 source->registered[i] = source->registered[last_index]; |
| |
722 memset(&source->registered[last_index], 0, |
| |
723 sizeof(struct cx_mempool_foreign_memory_s)); |
| |
724 } |
| |
725 source->registered_size--; |
| |
726 return 0; |
| |
727 } |
| |
728 } |
| |
729 // not found |
| |
730 return 1; |
| |
731 } |