1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include "cx/mempool.h"
30
31 #include <string.h>
32 #include <errno.h>
33
34 static int cx_mempool_ensure_capacity(
35 struct cx_mempool_s *pool,
36 size_t needed_capacity
37 ) {
38 if (needed_capacity <= pool->capacity)
return 0;
39 size_t newcap = pool->capacity >=
1000 ?
40 pool->capacity +
1000 : pool->capacity *
2;
41 size_t newmsize;
42
43 if (pool->capacity > newcap
44 || cx_szmul(newcap,
sizeof(
void*), &newmsize)) {
45 errno =
EOVERFLOW;
46 return 1;
47 }
48 void **newdata = cxRealloc(pool->base_allocator, pool->data, newmsize);
49 if (newdata ==
NULL)
return 1;
50 pool->data = newdata;
51 pool->capacity = newcap;
52 return 0;
53 }
54
55 static int cx_mempool_ensure_registered_capacity(
56 struct cx_mempool_s *pool,
57 size_t needed_capacity
58 ) {
59 if (needed_capacity <= pool->registered_capacity)
return 0;
60
61 size_t newcap = pool->registered_capacity +
8;
62 size_t newmsize;
63
64 if (pool->registered_capacity > newcap || cx_szmul(newcap,
65 sizeof(
struct cx_mempool_foreign_memory_s), &newmsize)) {
66 errno =
EOVERFLOW;
67 return 1;
68 }
69 void *newdata = cxRealloc(pool->base_allocator, pool->registered, newmsize);
70 if (newdata ==
NULL)
return 1;
71 pool->registered = newdata;
72 pool->registered_capacity = newcap;
73 return 0;
74 }
75
76 static void *cx_mempool_malloc_simple(
77 void *p,
78 size_t n
79 ) {
80 struct cx_mempool_s *pool = p;
81
82 if (cx_mempool_ensure_capacity(pool, pool->size +
1)) {
83 return NULL;
84 }
85
86 struct cx_mempool_memory_s *mem =
87 cxMalloc(pool->base_allocator,
sizeof(
struct cx_mempool_memory_s) + n);
88 if (mem ==
NULL)
return NULL;
89 mem->destructor =
NULL;
90 pool->data[pool->size] = mem;
91 pool->size++;
92
93 return mem->c;
94 }
95
96 static void *cx_mempool_calloc_simple(
97 void *p,
98 size_t nelem,
99 size_t elsize
100 ) {
101 size_t msz;
102 if (cx_szmul(nelem, elsize, &msz)) {
103 errno =
EOVERFLOW;
104 return NULL;
105 }
106 void *ptr = cx_mempool_malloc_simple(p, msz);
107 if (ptr ==
NULL)
return NULL;
108 memset(ptr,
0, nelem * elsize);
109 return ptr;
110 }
111
112 static void cx_mempool_free_simple(
113 void *p,
114 void *ptr
115 ) {
116 if (!ptr)
return;
117 struct cx_mempool_s *pool = p;
118
119 cx_destructor_func destr = pool->destr;
120 cx_destructor_func2 destr2 = pool->destr2;
121
122 struct cx_mempool_memory_s *mem =
123 (
void*) ((
char *) ptr -
sizeof(
struct cx_mempool_memory_s));
124
125 for (
size_t i =
0; i < pool->size; i++) {
126 if (mem == pool->data[i]) {
127 if (mem->destructor) {
128 mem->destructor(mem->c);
129 }
130 if (destr !=
NULL) {
131 destr(mem->c);
132 }
133 if (destr2 !=
NULL) {
134 destr2(pool->destr2_data, mem->c);
135 }
136 cxFree(pool->base_allocator, mem);
137 size_t last_index = pool->size -
1;
138 if (i != last_index) {
139 pool->data[i] = pool->data[last_index];
140 pool->data[last_index] =
NULL;
141 }
142 pool->size--;
143 return;
144 }
145 }
146 abort();
147 }
148
149 static void *cx_mempool_realloc_simple(
150 void *p,
151 void *ptr,
152 size_t n
153 ) {
154 if (ptr ==
NULL) {
155 return cx_mempool_malloc_simple(p, n);
156 }
157 if (n ==
0) {
158 cx_mempool_free_simple(p, ptr);
159 return NULL;
160 }
161 struct cx_mempool_s *pool = p;
162
163 const unsigned overhead =
sizeof(
struct cx_mempool_memory_s);
164 struct cx_mempool_memory_s *mem =
165 (
void *) (((
char *) ptr) - overhead);
166 struct cx_mempool_memory_s *newm =
167 cxRealloc(pool->base_allocator, mem, n + overhead);
168
169 if (newm ==
NULL)
return NULL;
170 if (mem != newm) {
171 for (
size_t i =
0; i < pool->size; i++) {
172 if (pool->data[i] == mem) {
173 pool->data[i] = newm;
174 return ((
char*)newm) + overhead;
175 }
176 }
177 abort();
178 }
else {
179
180 return ptr;
181 }
182 }
183
184 static void cx_mempool_free_all_simple(
const struct cx_mempool_s *pool) {
185 cx_destructor_func destr = pool->destr;
186 cx_destructor_func2 destr2 = pool->destr2;
187 for (
size_t i =
0; i < pool->size; i++) {
188 struct cx_mempool_memory_s *mem = pool->data[i];
189 if (mem->destructor) {
190 mem->destructor(mem->c);
191 }
192 if (destr !=
NULL) {
193 destr(mem->c);
194 }
195 if (destr2 !=
NULL) {
196 destr2(pool->destr2_data, mem->c);
197 }
198 cxFree(pool->base_allocator, mem);
199 }
200 }
201
202 static cx_allocator_class cx_mempool_simple_allocator_class = {
203 cx_mempool_malloc_simple,
204 cx_mempool_realloc_simple,
205 cx_mempool_calloc_simple,
206 cx_mempool_free_simple
207 };
208
209 static void *cx_mempool_malloc_advanced(
210 void *p,
211 size_t n
212 ) {
213 struct cx_mempool_s *pool = p;
214
215 if (cx_mempool_ensure_capacity(pool, pool->size +
1)) {
216 return NULL;
217 }
218
219 struct cx_mempool_memory2_s *mem =
220 cxMalloc(pool->base_allocator,
sizeof(
struct cx_mempool_memory2_s) + n);
221 if (mem ==
NULL)
return NULL;
222 mem->destructor =
NULL;
223 mem->data =
NULL;
224 pool->data[pool->size] = mem;
225 pool->size++;
226
227 return mem->c;
228 }
229
230 static void *cx_mempool_calloc_advanced(
231 void *p,
232 size_t nelem,
233 size_t elsize
234 ) {
235 size_t msz;
236 if (cx_szmul(nelem, elsize, &msz)) {
237 errno =
EOVERFLOW;
238 return NULL;
239 }
240 void *ptr = cx_mempool_malloc_advanced(p, msz);
241 if (ptr ==
NULL)
return NULL;
242 memset(ptr,
0, nelem * elsize);
243 return ptr;
244 }
245
246 static void cx_mempool_free_advanced(
247 void *p,
248 void *ptr
249 ) {
250 if (!ptr)
return;
251 struct cx_mempool_s *pool = p;
252
253 cx_destructor_func destr = pool->destr;
254 cx_destructor_func2 destr2 = pool->destr2;
255
256 struct cx_mempool_memory2_s *mem =
257 (
void*) ((
char *) ptr -
sizeof(
struct cx_mempool_memory2_s));
258
259 for (
size_t i =
0; i < pool->size; i++) {
260 if (mem == pool->data[i]) {
261 if (mem->destructor) {
262 mem->destructor(mem->data, mem->c);
263 }
264 if (destr !=
NULL) {
265 destr(mem->c);
266 }
267 if (destr2 !=
NULL) {
268 destr2(pool->destr2_data, mem->c);
269 }
270 cxFree(pool->base_allocator, mem);
271 size_t last_index = pool->size -
1;
272 if (i != last_index) {
273 pool->data[i] = pool->data[last_index];
274 pool->data[last_index] =
NULL;
275 }
276 pool->size--;
277 return;
278 }
279 }
280 abort();
281 }
282
283 static void *cx_mempool_realloc_advanced(
284 void *p,
285 void *ptr,
286 size_t n
287 ) {
288 if (ptr ==
NULL) {
289 return cx_mempool_malloc_advanced(p, n);
290 }
291 if (n ==
0) {
292 cx_mempool_free_advanced(p, ptr);
293 return NULL;
294 }
295 struct cx_mempool_s *pool = p;
296
297 const unsigned overhead =
sizeof(
struct cx_mempool_memory2_s);
298 struct cx_mempool_memory2_s *mem =
299 (
void *) (((
char *) ptr) - overhead);
300 struct cx_mempool_memory2_s *newm =
301 cxRealloc(pool->base_allocator, mem, n + overhead);
302
303 if (newm ==
NULL)
return NULL;
304 if (mem != newm) {
305 for (
size_t i =
0; i < pool->size; i++) {
306 if (pool->data[i] == mem) {
307 pool->data[i] = newm;
308 return ((
char*)newm) + overhead;
309 }
310 }
311 abort();
312 }
else {
313
314 return ptr;
315 }
316 }
317
318 static void cx_mempool_free_all_advanced(
const struct cx_mempool_s *pool) {
319 cx_destructor_func destr = pool->destr;
320 cx_destructor_func2 destr2 = pool->destr2;
321 for (
size_t i =
0; i < pool->size; i++) {
322 struct cx_mempool_memory2_s *mem = pool->data[i];
323 if (mem->destructor) {
324 mem->destructor(mem->data, mem->c);
325 }
326 if (destr !=
NULL) {
327 destr(mem->c);
328 }
329 if (destr2 !=
NULL) {
330 destr2(pool->destr2_data, mem->c);
331 }
332 cxFree(pool->base_allocator, mem);
333 }
334 }
335
336 static cx_allocator_class cx_mempool_advanced_allocator_class = {
337 cx_mempool_malloc_advanced,
338 cx_mempool_realloc_advanced,
339 cx_mempool_calloc_advanced,
340 cx_mempool_free_advanced
341 };
342
343
344 static void *cx_mempool_malloc_pure(
345 void *p,
346 size_t n
347 ) {
348 struct cx_mempool_s *pool = p;
349
350 if (cx_mempool_ensure_capacity(pool, pool->size +
1)) {
351 return NULL;
352 }
353
354 void *mem = cxMalloc(pool->base_allocator, n);
355 if (mem ==
NULL)
return NULL;
356 pool->data[pool->size] = mem;
357 pool->size++;
358
359 return mem;
360 }
361
362 static void *cx_mempool_calloc_pure(
363 void *p,
364 size_t nelem,
365 size_t elsize
366 ) {
367 size_t msz;
368 if (cx_szmul(nelem, elsize, &msz)) {
369 errno =
EOVERFLOW;
370 return NULL;
371 }
372 void *ptr = cx_mempool_malloc_pure(p, msz);
373 if (ptr ==
NULL)
return NULL;
374 memset(ptr,
0, nelem * elsize);
375 return ptr;
376 }
377
378 static void cx_mempool_free_pure(
379 void *p,
380 void *ptr
381 ) {
382 if (!ptr)
return;
383 struct cx_mempool_s *pool = p;
384
385 cx_destructor_func destr = pool->destr;
386 cx_destructor_func2 destr2 = pool->destr2;
387
388 for (
size_t i =
0; i < pool->size; i++) {
389 if (ptr == pool->data[i]) {
390 if (destr !=
NULL) {
391 destr(ptr);
392 }
393 if (destr2 !=
NULL) {
394 destr2(pool->destr2_data, ptr);
395 }
396 cxFree(pool->base_allocator, ptr);
397 size_t last_index = pool->size -
1;
398 if (i != last_index) {
399 pool->data[i] = pool->data[last_index];
400 pool->data[last_index] =
NULL;
401 }
402 pool->size--;
403 return;
404 }
405 }
406 abort();
407 }
408
409 static void *cx_mempool_realloc_pure(
410 void *p,
411 void *ptr,
412 size_t n
413 ) {
414 if (ptr ==
NULL) {
415 return cx_mempool_malloc_pure(p, n);
416 }
417 if (n ==
0) {
418 cx_mempool_free_pure(p, ptr);
419 return NULL;
420 }
421 struct cx_mempool_s *pool = p;
422 void *newm = cxRealloc(pool->base_allocator, ptr, n);
423 if (newm ==
NULL)
return NULL;
424 if (ptr != newm) {
425 for (
size_t i =
0; i < pool->size; i++) {
426 if (pool->data[i] == ptr) {
427 pool->data[i] = newm;
428 return newm;
429 }
430 }
431 abort();
432 }
else {
433
434 return ptr;
435 }
436 }
437
438 static void cx_mempool_free_all_pure(
const struct cx_mempool_s *pool) {
439 cx_destructor_func destr = pool->destr;
440 cx_destructor_func2 destr2 = pool->destr2;
441 for (
size_t i =
0; i < pool->size; i++) {
442 void *mem = pool->data[i];
443 if (destr !=
NULL) {
444 destr(mem);
445 }
446 if (destr2 !=
NULL) {
447 destr2(pool->destr2_data, mem);
448 }
449 cxFree(pool->base_allocator, mem);
450 }
451 }
452
453 static cx_allocator_class cx_mempool_pure_allocator_class = {
454 cx_mempool_malloc_pure,
455 cx_mempool_realloc_pure,
456 cx_mempool_calloc_pure,
457 cx_mempool_free_pure
458 };
459
460 static void cx_mempool_free_foreign(
const struct cx_mempool_s *pool) {
461 for (
size_t i =
0; i < pool->registered_size; i++) {
462 struct cx_mempool_foreign_memory_s info = pool->registered[i];
463 if (info.destr2_data ==
NULL) {
464 if (info.destr) {
465 info.destr(info.mem);
466 }
467 }
else {
468 info.destr2(info.destr2_data, info.mem);
469 }
470 }
471 }
472
473 void cxMempoolFree(CxMempool *pool) {
474 if (pool ==
NULL)
return;
475 if (pool->allocator->cl == &cx_mempool_simple_allocator_class) {
476 cx_mempool_free_all_simple(pool);
477 }
else if (pool->allocator->cl == &cx_mempool_advanced_allocator_class) {
478 cx_mempool_free_all_advanced(pool);
479 }
else {
480 cx_mempool_free_all_pure(pool);
481 }
482 cx_mempool_free_foreign(pool);
483 cxFree(pool->base_allocator, pool->data);
484 cxFree(pool->base_allocator, pool->registered);
485 cxFree(pool->base_allocator, (
void*) pool->allocator);
486 cxFree(pool->base_allocator, pool);
487 }
488
489 void cxMempoolSetDestructor(
490 void *ptr,
491 cx_destructor_func func
492 ) {
493 *(cx_destructor_func *) ((
char *) ptr -
sizeof(cx_destructor_func)) = func;
494 }
495
496 void cxMempoolSetDestructor2(
497 void *ptr,
498 cx_destructor_func2 func,
499 void *data
500 ) {
501 struct cx_mempool_memory2_s *info =
502 (
void*)((
char *) ptr -
sizeof(
struct cx_mempool_memory2_s));
503 info->destructor = func;
504 info->data = data;
505 }
506
507 void cxMempoolRemoveDestructor(
void *ptr) {
508 *(cx_destructor_func *) ((
char *) ptr -
sizeof(cx_destructor_func)) =
NULL;
509 }
510
511 void cxMempoolRemoveDestructor2(
void *ptr) {
512 struct cx_mempool_memory2_s *info =
513 (
void*)((
char *) ptr -
sizeof(
struct cx_mempool_memory2_s));
514 info->destructor =
NULL;
515 info->data =
NULL;
516 }
517
518 int cxMempoolRegister(
519 CxMempool *pool,
520 void *memory,
521 cx_destructor_func destr
522 ) {
523 if (cx_mempool_ensure_registered_capacity(pool, pool->registered_size +
1)) {
524 return 1;
525 }
526
527 pool->registered[pool->registered_size++] =
528 (
struct cx_mempool_foreign_memory_s) {
529 .mem = memory,
530 .destr = destr,
531 .destr2_data =
NULL
532 };
533
534 return 0;
535 }
536
537 int cxMempoolRegister2(
538 CxMempool *pool,
539 void *memory,
540 cx_destructor_func2 destr,
541 void *data
542 ) {
543 if (cx_mempool_ensure_registered_capacity(pool, pool->registered_size +
1)) {
544 return 1;
545 }
546
547 pool->registered[pool->registered_size++] =
548 (
struct cx_mempool_foreign_memory_s) {
549 .mem = memory,
550 .destr2 = destr,
551 .destr2_data = data
552 };
553
554 return 0;
555 }
556
557 CxMempool *cxMempoolCreate(
558 size_t capacity,
559 enum cx_mempool_type type
560 ) {
561 if (capacity ==
0) capacity =
16;
562 size_t poolsize;
563 if (cx_szmul(capacity,
sizeof(
void*), &poolsize)) {
564
565 errno =
EOVERFLOW;
566 return NULL;
567 }
568
569 CxAllocator *provided_allocator = cxMallocDefault(
sizeof(CxAllocator));
570 if (provided_allocator ==
NULL) {
571 return NULL;
572 }
573
574 CxMempool *pool = cxCallocDefault(
1,
sizeof(CxMempool));
575 if (pool ==
NULL) {
576 cxFreeDefault(provided_allocator);
577 return NULL;
578 }
579
580 provided_allocator->data = pool;
581 *((
const CxAllocator**)&pool->base_allocator) = cxDefaultAllocator;
582 pool->allocator = provided_allocator;
583 if (type ==
CX_MEMPOOL_TYPE_SIMPLE) {
584 provided_allocator->cl = &cx_mempool_simple_allocator_class;
585 }
else if (type ==
CX_MEMPOOL_TYPE_ADVANCED) {
586 provided_allocator->cl = &cx_mempool_advanced_allocator_class;
587 }
else {
588 provided_allocator->cl = &cx_mempool_pure_allocator_class;
589 }
590
591 pool->data = cxMallocDefault(poolsize);
592 if (pool->data ==
NULL) {
593 cxFreeDefault(provided_allocator);
594 cxFreeDefault(pool);
595 return NULL;
596 }
597
598 pool->size =
0;
599 pool->capacity = capacity;
600
601 return pool;
602 }
603
604 void cxMempoolGlobalDestructor(CxMempool *pool, cx_destructor_func fnc) {
605 pool->destr = fnc;
606 }
607
608 void cxMempoolGlobalDestructor2(CxMempool *pool, cx_destructor_func2 fnc,
void *data) {
609 pool->destr2 = fnc;
610 pool->destr2_data = data;
611 }
612
613 static void cx_mempool_free_transferred_allocator(
void *base_al,
void *al) {
614 cxFree(base_al, al);
615 }
616
617 int cxMempoolTransfer(
618 CxMempool *source,
619 CxMempool *dest
620 ) {
621
622 if (source == dest)
return 1;
623 if (source->allocator->cl != dest->allocator->cl)
return 1;
624 if (source->base_allocator->cl != dest->base_allocator->cl)
return 1;
625
626
627 if (cx_mempool_ensure_capacity(dest, dest->size + source->size)) {
628 return 1;
629 }
630 if (cx_mempool_ensure_registered_capacity(dest,
631 dest->registered_size + source->registered_size)) {
632 return 1;
633 }
634
635
636 CxAllocator *new_source_allocator =
637 cxMalloc(source->base_allocator,
sizeof(CxAllocator));
638 if (new_source_allocator ==
NULL) {
639 return 1;
640 }
641 new_source_allocator->cl = source->allocator->cl;
642 new_source_allocator->data = source;
643
644
645 if (source->size >
0) {
646 memcpy(&dest->data[dest->size], source->data,
647 sizeof(
void*)*source->size);
648 dest->size += source->size;
649 }
650
651
652 if (source->registered_size >
0) {
653 memcpy(&dest->registered[dest->registered_size], source->registered,
654 sizeof(
struct cx_mempool_foreign_memory_s)
655 * source->registered_size);
656 dest->registered_size += source->registered_size;
657 }
658
659
660
661
662 CxAllocator *transferred_allocator = (CxAllocator*) source->allocator;
663 transferred_allocator->data = dest;
664 cxMempoolRegister2(dest, transferred_allocator,
665 cx_mempool_free_transferred_allocator, (
void*)source->base_allocator);
666
667
668 source->allocator = new_source_allocator;
669 memset(source->data,
0, source->size *
sizeof(
void*));
670 memset(source->registered,
0,
671 source->registered_size *
sizeof(
struct cx_mempool_foreign_memory_s));
672 source->size =
0;
673 source->registered_size =
0;
674
675 return 0;
676 }
677
678 int cxMempoolTransferObject(
679 CxMempool *source,
680 CxMempool *dest,
681 const void *obj
682 ) {
683
684 if (source == dest)
return 1;
685 if (source->allocator->cl != dest->allocator->cl)
return 1;
686 if (source->base_allocator->cl != dest->base_allocator->cl)
return 1;
687
688
689 for (
size_t i =
0; i < source->size; i++) {
690 struct cx_mempool_memory_s *mem = source->data[i];
691 if (mem->c == obj) {
692
693 if (cx_mempool_ensure_capacity(dest, dest->size +
1)) {
694 return 1;
695 }
696
697 size_t last_index = source->size -
1;
698 if (i != last_index) {
699 source->data[i] = source->data[last_index];
700 source->data[last_index] =
NULL;
701 }
702 source->size--;
703
704 dest->data[dest->size++] = mem;
705 return 0;
706 }
707 }
708
709 for (
size_t i =
0; i < source->registered_size; i++) {
710 struct cx_mempool_foreign_memory_s *mem = &source->registered[i];
711 if (mem->mem == obj) {
712
713 if (cx_mempool_ensure_registered_capacity(dest,
714 dest->registered_size +
1)) {
715 return 1;
716 }
717 dest->registered[dest->registered_size++] = *mem;
718
719 size_t last_index = source->registered_size -
1;
720 if (i != last_index) {
721 source->registered[i] = source->registered[last_index];
722 memset(&source->registered[last_index],
0,
723 sizeof(
struct cx_mempool_foreign_memory_s));
724 }
725 source->registered_size--;
726 return 0;
727 }
728 }
729
730 return 1;
731 }
732