| 40 size_t new_capacity, |
40 size_t new_capacity, |
| 41 size_t elem_size, |
41 size_t elem_size, |
| 42 cx_attr_unused CxArrayReallocator *alloc |
42 cx_attr_unused CxArrayReallocator *alloc |
| 43 ) { |
43 ) { |
| 44 size_t n; |
44 size_t n; |
| |
45 // LCOV_EXCL_START |
| 45 if (cx_szmul(new_capacity, elem_size, &n)) { |
46 if (cx_szmul(new_capacity, elem_size, &n)) { |
| 46 errno = EOVERFLOW; |
47 errno = EOVERFLOW; |
| 47 return NULL; |
48 return NULL; |
| 48 } |
49 } // LCOV_EXCL_STOP |
| 49 return cxReallocDefault(array, n); |
50 return cxReallocDefault(array, n); |
| 50 } |
51 } |
| 51 |
52 |
| 52 CxArrayReallocator cx_array_default_reallocator_impl = { |
53 CxArrayReallocator cx_array_default_reallocator_impl = { |
| 53 cx_array_default_realloc, NULL, NULL |
54 cx_array_default_realloc, NULL, NULL |
| 64 size_t elem_size, |
65 size_t elem_size, |
| 65 cx_attr_unused CxArrayReallocator *alloc |
66 cx_attr_unused CxArrayReallocator *alloc |
| 66 ) { |
67 ) { |
| 67 // check for overflow |
68 // check for overflow |
| 68 size_t n; |
69 size_t n; |
| |
70 // LCOV_EXCL_START |
| 69 if (cx_szmul(new_capacity, elem_size, &n)) { |
71 if (cx_szmul(new_capacity, elem_size, &n)) { |
| 70 errno = EOVERFLOW; |
72 errno = EOVERFLOW; |
| 71 return NULL; |
73 return NULL; |
| 72 } |
74 } // LCOV_EXCL_STOP |
| 73 |
75 |
| 74 // retrieve the pointer to the actual allocator |
76 // retrieve the pointer to the actual allocator |
| 75 const CxAllocator *al = alloc->allocator; |
77 const CxAllocator *al = alloc->allocator; |
| 76 |
78 |
| 77 // check if the array is still located on the stack |
79 // check if the array is still located on the stack |
| 101 } |
103 } |
| 102 |
104 |
| 103 // LOW LEVEL ARRAY LIST FUNCTIONS |
105 // LOW LEVEL ARRAY LIST FUNCTIONS |
| 104 |
106 |
| 105 /** |
107 /** |
| 106 * Increases the capacity until it is a multiple of a some alignment or reaches the maximum. |
108 * Intelligently calculates a new capacity, reserving some more |
| |
109 * elements than required to prevent too many allocations. |
| 107 * |
110 * |
| 108 * @param cap the required capacity (must not be larger than @p max) |
111 * @param current_capacity the current capacity of the array |
| 109 * @param alignment the alignment |
112 * @param needed_capacity the required capacity of the array |
| 110 * @param max the maximum capacity |
113 * @return the new capacity |
| 111 * @return the aligned capacity |
|
| 112 */ |
114 */ |
| 113 static size_t cx_array_align_capacity( |
115 static size_t cx_array_grow_capacity( |
| 114 size_t cap, |
116 size_t current_capacity, |
| 115 size_t alignment, |
117 size_t needed_capacity |
| 116 size_t max |
118 ) { |
| 117 ) { |
119 if (current_capacity >= needed_capacity) { |
| 118 if (cap > max - alignment) { |
120 return current_capacity; |
| 119 return cap; |
121 } |
| 120 } else { |
122 size_t cap = needed_capacity; |
| 121 return cap - (cap % alignment) + alignment; |
123 size_t alignment; |
| 122 } |
124 if (cap < 128) alignment = 16; |
| |
125 else if (cap < 1024) alignment = 64; |
| |
126 else if (cap < 8192) alignment = 512; |
| |
127 else alignment = 1024; |
| |
128 return cap - (cap % alignment) + alignment; |
| 123 } |
129 } |
| 124 |
130 |
| 125 int cx_array_reserve( |
131 int cx_array_reserve( |
| 126 void **array, |
132 void **array, |
| 127 void *size, |
133 void *size, |
| 182 // determine new capacity |
188 // determine new capacity |
| 183 size_t newcap = oldsize + elem_count; |
189 size_t newcap = oldsize + elem_count; |
| 184 |
190 |
| 185 // reallocate if possible |
191 // reallocate if possible |
| 186 if (newcap > oldcap) { |
192 if (newcap > oldcap) { |
| 187 // calculate new capacity (next number divisible by 16) |
|
| 188 newcap = cx_array_align_capacity(newcap, 16, max_size); |
|
| 189 |
|
| 190 // perform reallocation |
|
| 191 void *newmem = reallocator->realloc( |
193 void *newmem = reallocator->realloc( |
| 192 *array, oldcap, newcap, elem_size, reallocator |
194 *array, oldcap, newcap, elem_size, reallocator |
| 193 ); |
195 ); |
| 194 if (newmem == NULL) { |
196 if (newmem == NULL) { |
| 195 return 1; // LCOV_EXCL_LINE |
197 return 1; // LCOV_EXCL_LINE |
| 275 errno = EOVERFLOW; |
277 errno = EOVERFLOW; |
| 276 return 1; |
278 return 1; |
| 277 } |
279 } |
| 278 |
280 |
| 279 // check if resize is required |
281 // check if resize is required |
| 280 size_t minsize = index + elem_count; |
282 const size_t minsize = index + elem_count; |
| 281 size_t newsize = oldsize < minsize ? minsize : oldsize; |
283 const size_t newsize = oldsize < minsize ? minsize : oldsize; |
| 282 |
284 |
| 283 // reallocate if possible |
285 // reallocate if necessary |
| 284 size_t newcap = oldcap; |
286 const size_t newcap = cx_array_grow_capacity(oldcap, newsize); |
| 285 if (newsize > oldcap) { |
287 if (newcap > oldcap) { |
| 286 // check, if we need to repair the src pointer |
288 // check if we need to repair the src pointer |
| 287 uintptr_t targetaddr = (uintptr_t) *target; |
289 uintptr_t targetaddr = (uintptr_t) *target; |
| 288 uintptr_t srcaddr = (uintptr_t) src; |
290 uintptr_t srcaddr = (uintptr_t) src; |
| 289 bool repairsrc = targetaddr <= srcaddr |
291 bool repairsrc = targetaddr <= srcaddr |
| 290 && srcaddr < targetaddr + oldcap * elem_size; |
292 && srcaddr < targetaddr + oldcap * elem_size; |
| 291 |
|
| 292 // calculate new capacity (next number divisible by 16) |
|
| 293 newcap = cx_array_align_capacity(newsize, 16, max_size); |
|
| 294 assert(newcap > newsize); |
|
| 295 |
293 |
| 296 // perform reallocation |
294 // perform reallocation |
| 297 void *newmem = reallocator->realloc( |
295 void *newmem = reallocator->realloc( |
| 298 *target, oldcap, newcap, elem_size, reallocator |
296 *target, oldcap, newcap, elem_size, reallocator |
| 299 ); |
297 ); |
| 367 |
365 |
| 368 // corner case |
366 // corner case |
| 369 if (elem_count == 0) return 0; |
367 if (elem_count == 0) return 0; |
| 370 |
368 |
| 371 // overflow check |
369 // overflow check |
| |
370 // LCOV_EXCL_START |
| 372 if (elem_count > SIZE_MAX - *size) { |
371 if (elem_count > SIZE_MAX - *size) { |
| 373 errno = EOVERFLOW; |
372 errno = EOVERFLOW; |
| 374 return 1; |
373 return 1; |
| 375 } |
374 } |
| |
375 // LCOV_EXCL_STOP |
| 376 |
376 |
| 377 // store some counts |
377 // store some counts |
| 378 size_t old_size = *size; |
378 const size_t old_size = *size; |
| 379 size_t old_capacity = *capacity; |
379 const size_t old_capacity = *capacity; |
| 380 // the necessary capacity is the worst case assumption, including duplicates |
380 // the necessary capacity is the worst case assumption, including duplicates |
| 381 size_t needed_capacity = old_size + elem_count; |
381 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + elem_count); |
| 382 |
382 |
| 383 // if we need more than we have, try a reallocation |
383 // if we need more than we have, try a reallocation |
| 384 if (needed_capacity > old_capacity) { |
384 if (needed_capacity > old_capacity) { |
| 385 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
|
| 386 void *new_mem = reallocator->realloc( |
385 void *new_mem = reallocator->realloc( |
| 387 *target, old_capacity, new_capacity, elem_size, reallocator |
386 *target, old_capacity, needed_capacity, elem_size, reallocator |
| 388 ); |
387 ); |
| 389 if (new_mem == NULL) { |
388 if (new_mem == NULL) { |
| 390 // give it up right away, there is no contract |
389 // give it up right away, there is no contract |
| 391 // that requires us to insert as much as we can |
390 // that requires us to insert as much as we can |
| 392 return 1; // LCOV_EXCL_LINE |
391 return 1; // LCOV_EXCL_LINE |
| 393 } |
392 } |
| 394 *target = new_mem; |
393 *target = new_mem; |
| 395 *capacity = new_capacity; |
394 *capacity = needed_capacity; |
| 396 } |
395 } |
| 397 |
396 |
| 398 // now we have guaranteed that we can insert everything |
397 // now we have guaranteed that we can insert everything |
| 399 size_t new_size = old_size + elem_count; |
398 size_t new_size = old_size + elem_count; |
| 400 *size = new_size; |
399 *size = new_size; |
| 416 memmove(bptr, dest, buf_size * elem_size); |
415 memmove(bptr, dest, buf_size * elem_size); |
| 417 |
416 |
| 418 // while there are both source and buffered elements left, |
417 // while there are both source and buffered elements left, |
| 419 // copy them interleaving |
418 // copy them interleaving |
| 420 while (si < elem_count && bi < new_size) { |
419 while (si < elem_count && bi < new_size) { |
| 421 // determine how many source elements can be inserted |
420 // determine how many source elements can be inserted. |
| |
421 // the first element that shall not be inserted is the smallest element |
| |
422 // that is strictly larger than the first buffered element |
| |
423 // (located at the index of the infimum plus one). |
| |
424 // the infimum is guaranteed to exist: |
| |
425 // - if all src elements are larger, |
| |
426 // there is no buffer, and this loop is skipped |
| |
427 // - if any src element is smaller or equal, the infimum exists |
| |
428 // - when all src elements that are smaller are copied, the second part |
| |
429 // of this loop body will copy the remaining buffer (emptying it) |
| |
430 // Therefore, the buffer can never contain an element that is smaller |
| |
431 // than any element in the source and the infimum exists. |
| 422 size_t copy_len, bytes_copied; |
432 size_t copy_len, bytes_copied; |
| 423 copy_len = cx_array_binary_search_sup( |
433 copy_len = cx_array_binary_search_inf( |
| 424 src, |
434 src, elem_count - si, elem_size, bptr, cmp_func |
| 425 elem_count - si, |
|
| 426 elem_size, |
|
| 427 bptr, |
|
| 428 cmp_func |
|
| 429 ); |
435 ); |
| 430 // binary search gives us the smallest index; |
436 copy_len++; |
| 431 // we also want to include equal elements here |
|
| 432 while (si + copy_len < elem_count |
|
| 433 && cmp_func(bptr, src+copy_len*elem_size) == 0) { |
|
| 434 copy_len++; |
|
| 435 } |
|
| 436 |
437 |
| 437 // copy the source elements |
438 // copy the source elements |
| 438 if (copy_len > 0) { |
439 if (copy_len > 0) { |
| 439 if (allow_duplicates) { |
440 if (allow_duplicates) { |
| 440 // we can copy the entire chunk |
441 // we can copy the entire chunk |
| 507 if (si < elem_count) { |
508 if (si < elem_count) { |
| 508 if (allow_duplicates) { |
509 if (allow_duplicates) { |
| 509 // duplicates allowed or nothing inserted yet: simply copy everything |
510 // duplicates allowed or nothing inserted yet: simply copy everything |
| 510 memcpy(dest, src, elem_size * (elem_count - si)); |
511 memcpy(dest, src, elem_size * (elem_count - si)); |
| 511 } else { |
512 } else { |
| 512 if (dest != *target) { |
513 // we must check the remaining source elements one by one |
| 513 // skip all source elements that equal the last element |
514 // to skip the duplicates. |
| 514 char *last = dest - elem_size; |
515 // Note that no source element can equal the last element in the |
| 515 while (si < elem_count) { |
516 // destination, because that would have created an insertion point |
| 516 if (last != NULL && cmp_func(last, src) == 0) { |
517 // and a buffer, s.t. the above loop already handled the duplicates |
| 517 src += elem_size; |
|
| 518 si++; |
|
| 519 (*size)--; |
|
| 520 } else { |
|
| 521 break; |
|
| 522 } |
|
| 523 } |
|
| 524 } |
|
| 525 // we must check the elements in the chunk one by one |
|
| 526 while (si < elem_count) { |
518 while (si < elem_count) { |
| 527 // find a chain of elements that can be copied |
519 // find a chain of elements that can be copied |
| 528 size_t copy_len = 1, skip_len = 0; |
520 size_t copy_len = 1, skip_len = 0; |
| 529 { |
521 { |
| 530 const char *left_src = src; |
522 const char *left_src = src; |
| 531 while (si + copy_len < elem_count) { |
523 while (si + copy_len + skip_len < elem_count) { |
| 532 const char *right_src = left_src + elem_size; |
524 const char *right_src = left_src + elem_size; |
| 533 int d = cmp_func(left_src, right_src); |
525 int d = cmp_func(left_src, right_src); |
| 534 if (d < 0) { |
526 if (d < 0) { |
| 535 if (skip_len > 0) { |
527 if (skip_len > 0) { |
| 536 // new larger element found; |
528 // new larger element found; |
| 594 ) { |
586 ) { |
| 595 return cx_array_insert_sorted_impl(target, size, capacity, |
587 return cx_array_insert_sorted_impl(target, size, capacity, |
| 596 cmp_func, sorted_data, elem_size, elem_count, reallocator, false); |
588 cmp_func, sorted_data, elem_size, elem_count, reallocator, false); |
| 597 } |
589 } |
| 598 |
590 |
| 599 size_t cx_array_binary_search_inf( |
591 // implementation that finds ANY index |
| |
592 static size_t cx_array_binary_search_inf_impl( |
| 600 const void *arr, |
593 const void *arr, |
| 601 size_t size, |
594 size_t size, |
| 602 size_t elem_size, |
595 size_t elem_size, |
| 603 const void *elem, |
596 const void *elem, |
| 604 cx_compare_func cmp_func |
597 cx_compare_func cmp_func |
| 639 pivot_index = left_index + (right_index - left_index) / 2; |
632 pivot_index = left_index + (right_index - left_index) / 2; |
| 640 const char *arr_elem = array + pivot_index * elem_size; |
633 const char *arr_elem = array + pivot_index * elem_size; |
| 641 result = cmp_func(elem, arr_elem); |
634 result = cmp_func(elem, arr_elem); |
| 642 if (result == 0) { |
635 if (result == 0) { |
| 643 // found it! |
636 // found it! |
| 644 // check previous elements; |
|
| 645 // when they are equal, report the smallest index |
|
| 646 arr_elem -= elem_size; |
|
| 647 while (pivot_index > 0 && cmp_func(elem, arr_elem) == 0) { |
|
| 648 pivot_index--; |
|
| 649 arr_elem -= elem_size; |
|
| 650 } |
|
| 651 return pivot_index; |
637 return pivot_index; |
| 652 } else if (result < 0) { |
638 } else if (result < 0) { |
| 653 // element is smaller than pivot, continue search left |
639 // element is smaller than pivot, continue search left |
| 654 right_index = pivot_index - 1; |
640 right_index = pivot_index - 1; |
| 655 } else { |
641 } else { |
| 658 } |
644 } |
| 659 } |
645 } |
| 660 |
646 |
| 661 // report the largest upper bound |
647 // report the largest upper bound |
| 662 return result < 0 ? (pivot_index - 1) : pivot_index; |
648 return result < 0 ? (pivot_index - 1) : pivot_index; |
| |
649 } |
| |
650 |
| |
651 size_t cx_array_binary_search_inf( |
| |
652 const void *arr, |
| |
653 size_t size, |
| |
654 size_t elem_size, |
| |
655 const void *elem, |
| |
656 cx_compare_func cmp_func |
| |
657 ) { |
| |
658 size_t index = cx_array_binary_search_inf_impl( |
| |
659 arr, size, elem_size, elem, cmp_func); |
| |
660 // in case of equality, report the largest index |
| |
661 const char *e = ((const char *) arr) + (index + 1) * elem_size; |
| |
662 while (index + 1 < size && cmp_func(e, elem) == 0) { |
| |
663 e += elem_size; |
| |
664 index++; |
| |
665 } |
| |
666 return index; |
| 663 } |
667 } |
| 664 |
668 |
| 665 size_t cx_array_binary_search( |
669 size_t cx_array_binary_search( |
| 666 const void *arr, |
670 const void *arr, |
| 667 size_t size, |
671 size_t size, |
| 685 size_t size, |
689 size_t size, |
| 686 size_t elem_size, |
690 size_t elem_size, |
| 687 const void *elem, |
691 const void *elem, |
| 688 cx_compare_func cmp_func |
692 cx_compare_func cmp_func |
| 689 ) { |
693 ) { |
| 690 size_t inf = cx_array_binary_search_inf( |
694 size_t index = cx_array_binary_search_inf_impl( |
| 691 arr, size, elem_size, elem, cmp_func |
695 arr, size, elem_size, elem, cmp_func |
| 692 ); |
696 ); |
| 693 if (inf == size) { |
697 const char *e = ((const char *) arr) + index * elem_size; |
| 694 // no infimum means, first element is supremum |
698 if (index == size) { |
| |
699 // no infimum means the first element is supremum |
| 695 return 0; |
700 return 0; |
| 696 } else if (cmp_func(((const char *) arr) + inf * elem_size, elem) == 0) { |
701 } else if (cmp_func(e, elem) == 0) { |
| 697 return inf; |
702 // found an equal element, search the smallest index |
| |
703 e -= elem_size; // e now contains the element at index-1 |
| |
704 while (index > 0 && cmp_func(e, elem) == 0) { |
| |
705 e -= elem_size; |
| |
706 index--; |
| |
707 } |
| |
708 return index; |
| 698 } else { |
709 } else { |
| 699 return inf + 1; |
710 // we already have the largest index of the infimum (by design) |
| |
711 // the next element is the supremum (or there is no supremum) |
| |
712 return index + 1; |
| 700 } |
713 } |
| 701 } |
714 } |
| 702 |
715 |
| 703 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
716 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
| 704 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
717 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
| 787 // get a correctly typed pointer to the list |
800 // get a correctly typed pointer to the list |
| 788 cx_array_list *arl = (cx_array_list *) list; |
801 cx_array_list *arl = (cx_array_list *) list; |
| 789 |
802 |
| 790 // guarantee enough capacity |
803 // guarantee enough capacity |
| 791 if (arl->capacity < list->collection.size + n) { |
804 if (arl->capacity < list->collection.size + n) { |
| 792 size_t new_capacity = list->collection.size + n; |
805 const size_t new_capacity = cx_array_grow_capacity(arl->capacity,list->collection.size + n); |
| 793 new_capacity = cx_array_align_capacity(new_capacity, 16, SIZE_MAX); |
|
| 794 if (cxReallocateArray( |
806 if (cxReallocateArray( |
| 795 list->collection.allocator, |
807 list->collection.allocator, |
| 796 &arl->data, new_capacity, |
808 &arl->data, new_capacity, |
| 797 list->collection.elem_size) |
809 list->collection.elem_size) |
| 798 ) { |
810 ) { |
| 799 return 0; |
811 return 0; // LCOV_EXCL_LINE |
| 800 } |
812 } |
| 801 arl->capacity = new_capacity; |
813 arl->capacity = new_capacity; |
| 802 } |
814 } |
| 803 |
815 |
| 804 // determine insert position |
816 // determine insert position |
| 890 ) { |
902 ) { |
| 891 struct cx_list_s *list = iter->src_handle; |
903 struct cx_list_s *list = iter->src_handle; |
| 892 if (iter->index < list->collection.size) { |
904 if (iter->index < list->collection.size) { |
| 893 if (cx_arl_insert_element(list, |
905 if (cx_arl_insert_element(list, |
| 894 iter->index + 1 - prepend, elem) == NULL) { |
906 iter->index + 1 - prepend, elem) == NULL) { |
| 895 return 1; |
907 return 1; // LCOV_EXCL_LINE |
| 896 } |
908 } |
| 897 iter->elem_count++; |
909 iter->elem_count++; |
| 898 if (prepend != 0) { |
910 if (prepend != 0) { |
| 899 iter->index++; |
911 iter->index++; |
| 900 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
912 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
| 901 } |
913 } |
| 902 return 0; |
914 return 0; |
| 903 } else { |
915 } else { |
| 904 if (cx_arl_insert_element(list, list->collection.size, elem) == NULL) { |
916 if (cx_arl_insert_element(list, list->collection.size, elem) == NULL) { |
| 905 return 1; |
917 return 1; // LCOV_EXCL_LINE |
| 906 } |
918 } |
| 907 iter->elem_count++; |
919 iter->elem_count++; |
| 908 iter->index = list->collection.size; |
920 iter->index = list->collection.size; |
| 909 return 0; |
921 return 0; |
| 910 } |
922 } |
| 1135 iter->elem_handle = ((char *) list->data) |
1147 iter->elem_handle = ((char *) list->data) |
| 1136 + iter->index * list->base.collection.elem_size; |
1148 + iter->index * list->base.collection.elem_size; |
| 1137 } |
1149 } |
| 1138 } |
1150 } |
| 1139 |
1151 |
| |
1152 static int cx_arl_change_capacity( |
| |
1153 struct cx_list_s *list, |
| |
1154 size_t new_capacity |
| |
1155 ) { |
| |
1156 cx_array_list *arl = (cx_array_list *)list; |
| |
1157 return cxReallocateArray(list->collection.allocator, |
| |
1158 &arl->data, new_capacity, list->collection.elem_size); |
| |
1159 } |
| 1140 |
1160 |
| 1141 static struct cx_iterator_s cx_arl_iterator( |
1161 static struct cx_iterator_s cx_arl_iterator( |
| 1142 const struct cx_list_s *list, |
1162 const struct cx_list_s *list, |
| 1143 size_t index, |
1163 size_t index, |
| 1144 bool backwards |
1164 bool backwards |