| 34 |
34 |
| 35 // Default array reallocator |
35 // Default array reallocator |
| 36 |
36 |
| 37 static void *cx_array_default_realloc( |
37 static void *cx_array_default_realloc( |
| 38 void *array, |
38 void *array, |
| 39 size_t capacity, |
39 cx_attr_unused size_t old_capacity, |
| |
40 size_t new_capacity, |
| 40 size_t elem_size, |
41 size_t elem_size, |
| 41 cx_attr_unused CxArrayReallocator *alloc |
42 cx_attr_unused CxArrayReallocator *alloc |
| 42 ) { |
43 ) { |
| 43 size_t n; |
44 size_t n; |
| 44 if (cx_szmul(capacity, elem_size, &n)) { |
45 if (cx_szmul(new_capacity, elem_size, &n)) { |
| 45 errno = EOVERFLOW; |
46 errno = EOVERFLOW; |
| 46 return NULL; |
47 return NULL; |
| 47 } |
48 } |
| 48 return realloc(array, n); |
49 return cxReallocDefault(array, n); |
| 49 } |
50 } |
| 50 |
51 |
| 51 CxArrayReallocator cx_array_default_reallocator_impl = { |
52 CxArrayReallocator cx_array_default_reallocator_impl = { |
| 52 cx_array_default_realloc, NULL, NULL, 0, 0 |
53 cx_array_default_realloc, NULL, NULL, 0, 0 |
| 53 }; |
54 }; |
| 56 |
57 |
| 57 // Stack-aware array reallocator |
58 // Stack-aware array reallocator |
| 58 |
59 |
| 59 static void *cx_array_advanced_realloc( |
60 static void *cx_array_advanced_realloc( |
| 60 void *array, |
61 void *array, |
| 61 size_t capacity, |
62 size_t old_capacity, |
| |
63 size_t new_capacity, |
| 62 size_t elem_size, |
64 size_t elem_size, |
| 63 cx_attr_unused CxArrayReallocator *alloc |
65 cx_attr_unused CxArrayReallocator *alloc |
| 64 ) { |
66 ) { |
| 65 // check for overflow |
67 // check for overflow |
| 66 size_t n; |
68 size_t n; |
| 67 if (cx_szmul(capacity, elem_size, &n)) { |
69 if (cx_szmul(new_capacity, elem_size, &n)) { |
| 68 errno = EOVERFLOW; |
70 errno = EOVERFLOW; |
| 69 return NULL; |
71 return NULL; |
| 70 } |
72 } |
| 71 |
73 |
| 72 // retrieve the pointer to the actual allocator |
74 // retrieve the pointer to the actual allocator |
| 75 // check if the array is still located on the stack |
77 // check if the array is still located on the stack |
| 76 void *newmem; |
78 void *newmem; |
| 77 if (array == alloc->ptr2) { |
79 if (array == alloc->ptr2) { |
| 78 newmem = cxMalloc(al, n); |
80 newmem = cxMalloc(al, n); |
| 79 if (newmem != NULL && array != NULL) { |
81 if (newmem != NULL && array != NULL) { |
| 80 memcpy(newmem, array, n); |
82 memcpy(newmem, array, old_capacity*elem_size); |
| 81 } |
83 } |
| 82 } else { |
84 } else { |
| 83 newmem = cxRealloc(al, array, n); |
85 newmem = cxRealloc(al, array, n); |
| 84 } |
86 } |
| 85 return newmem; |
87 return newmem; |
| 178 // calculate new capacity (next number divisible by 16) |
180 // calculate new capacity (next number divisible by 16) |
| 179 newcap = cx_array_align_capacity(newcap, 16, max_size); |
181 newcap = cx_array_align_capacity(newcap, 16, max_size); |
| 180 |
182 |
| 181 // perform reallocation |
183 // perform reallocation |
| 182 void *newmem = reallocator->realloc( |
184 void *newmem = reallocator->realloc( |
| 183 *array, newcap, elem_size, reallocator |
185 *array, oldcap, newcap, elem_size, reallocator |
| 184 ); |
186 ); |
| 185 if (newmem == NULL) { |
187 if (newmem == NULL) { |
| 186 return 1; // LCOV_EXCL_LINE |
188 return 1; // LCOV_EXCL_LINE |
| 187 } |
189 } |
| 188 |
190 |
| 284 newcap = cx_array_align_capacity(newsize, 16, max_size); |
286 newcap = cx_array_align_capacity(newsize, 16, max_size); |
| 285 assert(newcap > newsize); |
287 assert(newcap > newsize); |
| 286 |
288 |
| 287 // perform reallocation |
289 // perform reallocation |
| 288 void *newmem = reallocator->realloc( |
290 void *newmem = reallocator->realloc( |
| 289 *target, newcap, elem_size, reallocator |
291 *target, oldcap, newcap, elem_size, reallocator |
| 290 ); |
292 ); |
| 291 if (newmem == NULL) { |
293 if (newmem == NULL) { |
| 292 return 1; |
294 return 1; |
| 293 } |
295 } |
| 294 |
296 |
| 364 return 1; |
366 return 1; |
| 365 } |
367 } |
| 366 |
368 |
| 367 // store some counts |
369 // store some counts |
| 368 size_t old_size = *size; |
370 size_t old_size = *size; |
| |
371 size_t old_capacity = *capacity; |
| 369 size_t needed_capacity = old_size + elem_count; |
372 size_t needed_capacity = old_size + elem_count; |
| 370 |
373 |
| 371 // if we need more than we have, try a reallocation |
374 // if we need more than we have, try a reallocation |
| 372 if (needed_capacity > *capacity) { |
375 if (needed_capacity > old_capacity) { |
| 373 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
376 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
| 374 void *new_mem = reallocator->realloc( |
377 void *new_mem = reallocator->realloc( |
| 375 *target, new_capacity, elem_size, reallocator |
378 *target, old_capacity, new_capacity, elem_size, reallocator |
| 376 ); |
379 ); |
| 377 if (new_mem == NULL) { |
380 if (new_mem == NULL) { |
| 378 // give it up right away, there is no contract |
381 // give it up right away, there is no contract |
| 379 // that requires us to insert as much as we can |
382 // that requires us to insert as much as we can |
| 380 return 1; // LCOV_EXCL_LINE |
383 return 1; // LCOV_EXCL_LINE |
| 570 char sbo_mem[CX_ARRAY_SWAP_SBO_SIZE]; |
573 char sbo_mem[CX_ARRAY_SWAP_SBO_SIZE]; |
| 571 void *tmp; |
574 void *tmp; |
| 572 |
575 |
| 573 // decide if we can use the local buffer |
576 // decide if we can use the local buffer |
| 574 if (elem_size > CX_ARRAY_SWAP_SBO_SIZE) { |
577 if (elem_size > CX_ARRAY_SWAP_SBO_SIZE) { |
| 575 tmp = malloc(elem_size); |
578 tmp = cxMallocDefault(elem_size); |
| 576 // we don't want to enforce error handling |
579 // we don't want to enforce error handling |
| 577 if (tmp == NULL) abort(); |
580 if (tmp == NULL) abort(); |
| 578 } else { |
581 } else { |
| 579 tmp = sbo_mem; |
582 tmp = sbo_mem; |
| 580 } |
583 } |
| 589 memcpy(left, right, elem_size); |
592 memcpy(left, right, elem_size); |
| 590 memcpy(right, tmp, elem_size); |
593 memcpy(right, tmp, elem_size); |
| 591 |
594 |
| 592 // free dynamic memory, if it was needed |
595 // free dynamic memory, if it was needed |
| 593 if (tmp != sbo_mem) { |
596 if (tmp != sbo_mem) { |
| 594 free(tmp); |
597 cxFreeDefault(tmp); |
| 595 } |
598 } |
| 596 } |
599 } |
| 597 |
600 |
| 598 // HIGH LEVEL ARRAY LIST FUNCTIONS |
601 // HIGH LEVEL ARRAY LIST FUNCTIONS |
| 599 |
602 |
| 636 if (index > list->collection.size || n == 0) return 0; |
639 if (index > list->collection.size || n == 0) return 0; |
| 637 |
640 |
| 638 // get a correctly typed pointer to the list |
641 // get a correctly typed pointer to the list |
| 639 cx_array_list *arl = (cx_array_list *) list; |
642 cx_array_list *arl = (cx_array_list *) list; |
| 640 |
643 |
| |
644 // guarantee enough capacity |
| |
645 if (arl->capacity < list->collection.size + n) { |
| |
646 size_t new_capacity = list->collection.size + n; |
| |
647 new_capacity = new_capacity - (new_capacity % 16) + 16; |
| |
648 if (cxReallocateArray( |
| |
649 list->collection.allocator, |
| |
650 &arl->data, new_capacity, |
| |
651 list->collection.elem_size) |
| |
652 ) { |
| |
653 return 0; |
| |
654 } |
| |
655 arl->capacity = new_capacity; |
| |
656 } |
| |
657 |
| |
658 // determine insert position |
| |
659 char *arl_data = arl->data; |
| |
660 char *insert_pos = arl_data + index * list->collection.elem_size; |
| |
661 |
| 641 // do we need to move some elements? |
662 // do we need to move some elements? |
| 642 if (index < list->collection.size) { |
663 if (index < list->collection.size) { |
| 643 const char *first_to_move = (const char *) arl->data; |
|
| 644 first_to_move += index * list->collection.elem_size; |
|
| 645 size_t elems_to_move = list->collection.size - index; |
664 size_t elems_to_move = list->collection.size - index; |
| 646 size_t start_of_moved = index + n; |
665 char *target = insert_pos + n * list->collection.elem_size; |
| 647 |
666 memmove(target, insert_pos, elems_to_move * list->collection.elem_size); |
| 648 if (cx_array_copy( |
667 } |
| 649 &arl->data, |
668 |
| 650 &list->collection.size, |
669 // place the new elements, if any |
| 651 &arl->capacity, |
670 if (array != NULL) { |
| 652 0, |
671 memcpy(insert_pos, array, n * list->collection.elem_size); |
| 653 start_of_moved, |
672 } |
| 654 first_to_move, |
673 list->collection.size += n; |
| 655 list->collection.elem_size, |
674 |
| 656 elems_to_move, |
675 return n; |
| 657 &arl->reallocator |
|
| 658 )) { |
|
| 659 // if moving existing elems is unsuccessful, abort |
|
| 660 return 0; |
|
| 661 } |
|
| 662 } |
|
| 663 |
|
| 664 // note that if we had to move the elements, the following operation |
|
| 665 // is guaranteed to succeed, because we have the memory already allocated |
|
| 666 // therefore, it is impossible to leave this function with an invalid array |
|
| 667 |
|
| 668 // place the new elements |
|
| 669 if (cx_array_copy( |
|
| 670 &arl->data, |
|
| 671 &list->collection.size, |
|
| 672 &arl->capacity, |
|
| 673 0, |
|
| 674 index, |
|
| 675 array, |
|
| 676 list->collection.elem_size, |
|
| 677 n, |
|
| 678 &arl->reallocator |
|
| 679 )) { |
|
| 680 // array list implementation is "all or nothing" |
|
| 681 return 0; |
|
| 682 } else { |
|
| 683 return n; |
|
| 684 } |
|
| 685 } |
676 } |
| 686 |
677 |
| 687 static size_t cx_arl_insert_sorted( |
678 static size_t cx_arl_insert_sorted( |
| 688 struct cx_list_s *list, |
679 struct cx_list_s *list, |
| 689 const void *sorted_data, |
680 const void *sorted_data, |
| 707 } else { |
698 } else { |
| 708 return n; |
699 return n; |
| 709 } |
700 } |
| 710 } |
701 } |
| 711 |
702 |
| 712 static int cx_arl_insert_element( |
703 static void *cx_arl_insert_element( |
| 713 struct cx_list_s *list, |
704 struct cx_list_s *list, |
| 714 size_t index, |
705 size_t index, |
| 715 const void *element |
706 const void *element |
| 716 ) { |
707 ) { |
| 717 return 1 != cx_arl_insert_array(list, index, element, 1); |
708 if (cx_arl_insert_array(list, index, element, 1) == 1) { |
| |
709 return ((char*)((cx_array_list *) list)->data) + index * list->collection.elem_size; |
| |
710 } else { |
| |
711 return NULL; |
| |
712 } |
| 718 } |
713 } |
| 719 |
714 |
| 720 static int cx_arl_insert_iter( |
715 static int cx_arl_insert_iter( |
| 721 struct cx_iterator_s *iter, |
716 struct cx_iterator_s *iter, |
| 722 const void *elem, |
717 const void *elem, |
| 723 int prepend |
718 int prepend |
| 724 ) { |
719 ) { |
| 725 struct cx_list_s *list = iter->src_handle.m; |
720 struct cx_list_s *list = iter->src_handle.m; |
| 726 if (iter->index < list->collection.size) { |
721 if (iter->index < list->collection.size) { |
| 727 int result = cx_arl_insert_element( |
722 if (cx_arl_insert_element(list, |
| 728 list, |
723 iter->index + 1 - prepend, elem) == NULL) { |
| 729 iter->index + 1 - prepend, |
724 return 1; |
| 730 elem |
725 } |
| 731 ); |
726 iter->elem_count++; |
| 732 if (result == 0) { |
727 if (prepend != 0) { |
| 733 iter->elem_count++; |
728 iter->index++; |
| 734 if (prepend != 0) { |
729 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
| 735 iter->index++; |
730 } |
| 736 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
731 return 0; |
| 737 } |
732 } else { |
| 738 } |
733 if (cx_arl_insert_element(list, list->collection.size, elem) == NULL) { |
| 739 return result; |
734 return 1; |
| 740 } else { |
735 } |
| 741 int result = cx_arl_insert_element(list, list->collection.size, elem); |
736 iter->elem_count++; |
| 742 if (result == 0) { |
737 iter->index = list->collection.size; |
| 743 iter->elem_count++; |
738 return 0; |
| 744 iter->index = list->collection.size; |
|
| 745 } |
|
| 746 return result; |
|
| 747 } |
739 } |
| 748 } |
740 } |
| 749 |
741 |
| 750 static size_t cx_arl_remove( |
742 static size_t cx_arl_remove( |
| 751 struct cx_list_s *list, |
743 struct cx_list_s *list, |