| 70 errno = EOVERFLOW; |
70 errno = EOVERFLOW; |
| 71 return NULL; |
71 return NULL; |
| 72 } |
72 } |
| 73 |
73 |
| 74 // retrieve the pointer to the actual allocator |
74 // retrieve the pointer to the actual allocator |
| 75 const CxAllocator *al = alloc->ptr1; |
75 const CxAllocator *al = alloc->allocator; |
| 76 |
76 |
| 77 // check if the array is still located on the stack |
77 // check if the array is still located on the stack |
| 78 void *newmem; |
78 void *newmem; |
| 79 if (array == alloc->ptr2) { |
79 if (array == alloc->stack_ptr) { |
| 80 newmem = cxMalloc(al, n); |
80 newmem = cxMalloc(al, n); |
| 81 if (newmem != NULL && array != NULL) { |
81 if (newmem != NULL && array != NULL) { |
| 82 memcpy(newmem, array, old_capacity*elem_size); |
82 memcpy(newmem, array, old_capacity*elem_size); |
| 83 } |
83 } |
| 84 } else { |
84 } else { |
| 87 return newmem; |
87 return newmem; |
| 88 } |
88 } |
| 89 |
89 |
| 90 struct cx_array_reallocator_s cx_array_reallocator( |
90 struct cx_array_reallocator_s cx_array_reallocator( |
| 91 const struct cx_allocator_s *allocator, |
91 const struct cx_allocator_s *allocator, |
| 92 const void *stackmem |
92 const void *stack_ptr |
| 93 ) { |
93 ) { |
| 94 if (allocator == NULL) { |
94 if (allocator == NULL) { |
| 95 allocator = cxDefaultAllocator; |
95 allocator = cxDefaultAllocator; |
| 96 } |
96 } |
| 97 return (struct cx_array_reallocator_s) { |
97 return (struct cx_array_reallocator_s) { |
| 98 cx_array_advanced_realloc, |
98 cx_array_advanced_realloc, |
| 99 (void*) allocator, (void*) stackmem, |
99 allocator, stack_ptr, |
| 100 0, 0 |
|
| 101 }; |
100 }; |
| 102 } |
101 } |
| 103 |
102 |
| 104 // LOW LEVEL ARRAY LIST FUNCTIONS |
103 // LOW LEVEL ARRAY LIST FUNCTIONS |
| 105 |
104 |
| |
105 /** |
| |
106 * Increases the capacity until it is a multiple of a some alignment or reaches the maximum. |
| |
107 * |
| |
108 * @param cap the required capacity (must not be larger than @p max) |
| |
109 * @param alignment the alignment |
| |
110 * @param max the maximum capacity |
| |
111 * @return the aligned capacity |
| |
112 */ |
| 106 static size_t cx_array_align_capacity( |
113 static size_t cx_array_align_capacity( |
| 107 size_t cap, |
114 size_t cap, |
| 108 size_t alignment, |
115 size_t alignment, |
| 109 size_t max |
116 size_t max |
| 110 ) { |
117 ) { |
| 289 // perform reallocation |
296 // perform reallocation |
| 290 void *newmem = reallocator->realloc( |
297 void *newmem = reallocator->realloc( |
| 291 *target, oldcap, newcap, elem_size, reallocator |
298 *target, oldcap, newcap, elem_size, reallocator |
| 292 ); |
299 ); |
| 293 if (newmem == NULL) { |
300 if (newmem == NULL) { |
| 294 return 1; |
301 return 1; // LCOV_EXCL_LINE |
| 295 } |
302 } |
| 296 |
303 |
| 297 // repair src pointer, if necessary |
304 // repair src pointer, if necessary |
| 298 if (repairsrc) { |
305 if (repairsrc) { |
| 299 src = ((char *) newmem) + (srcaddr - targetaddr); |
306 src = ((char *) newmem) + (srcaddr - targetaddr); |
| 333 |
340 |
| 334 // return successfully |
341 // return successfully |
| 335 return 0; |
342 return 0; |
| 336 } |
343 } |
| 337 |
344 |
| 338 int cx_array_insert_sorted( |
345 static int cx_array_insert_sorted_impl( |
| 339 void **target, |
346 void **target, |
| 340 size_t *size, |
347 size_t *size, |
| 341 size_t *capacity, |
348 size_t *capacity, |
| 342 cx_compare_func cmp_func, |
349 cx_compare_func cmp_func, |
| 343 const void *sorted_data, |
350 const void *sorted_data, |
| 344 size_t elem_size, |
351 size_t elem_size, |
| 345 size_t elem_count, |
352 size_t elem_count, |
| 346 CxArrayReallocator *reallocator |
353 CxArrayReallocator *reallocator, |
| |
354 bool allow_duplicates |
| 347 ) { |
355 ) { |
| 348 // assert pointers |
356 // assert pointers |
| 349 assert(target != NULL); |
357 assert(target != NULL); |
| 350 assert(size != NULL); |
358 assert(size != NULL); |
| 351 assert(capacity != NULL); |
359 assert(capacity != NULL); |
| 367 } |
375 } |
| 368 |
376 |
| 369 // store some counts |
377 // store some counts |
| 370 size_t old_size = *size; |
378 size_t old_size = *size; |
| 371 size_t old_capacity = *capacity; |
379 size_t old_capacity = *capacity; |
| |
380 // the necessary capacity is the worst case assumption, including duplicates |
| 372 size_t needed_capacity = old_size + elem_count; |
381 size_t needed_capacity = old_size + elem_count; |
| 373 |
382 |
| 374 // if we need more than we have, try a reallocation |
383 // if we need more than we have, try a reallocation |
| 375 if (needed_capacity > old_capacity) { |
384 if (needed_capacity > old_capacity) { |
| 376 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
385 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
| 416 elem_count - si, |
425 elem_count - si, |
| 417 elem_size, |
426 elem_size, |
| 418 bptr, |
427 bptr, |
| 419 cmp_func |
428 cmp_func |
| 420 ); |
429 ); |
| |
430 // binary search gives us the smallest index; |
| |
431 // we also want to include equal elements here |
| |
432 while (si + copy_len < elem_count |
| |
433 && cmp_func(bptr, src+copy_len*elem_size) == 0) { |
| |
434 copy_len++; |
| |
435 } |
| 421 |
436 |
| 422 // copy the source elements |
437 // copy the source elements |
| 423 bytes_copied = copy_len * elem_size; |
438 if (copy_len > 0) { |
| 424 memcpy(dest, src, bytes_copied); |
439 if (allow_duplicates) { |
| 425 dest += bytes_copied; |
440 // we can copy the entire chunk |
| 426 src += bytes_copied; |
441 bytes_copied = copy_len * elem_size; |
| 427 si += copy_len; |
442 memcpy(dest, src, bytes_copied); |
| |
443 dest += bytes_copied; |
| |
444 src += bytes_copied; |
| |
445 si += copy_len; |
| |
446 di += copy_len; |
| |
447 } else { |
| |
448 // first, check the end of the source chunk |
| |
449 // for being a duplicate of the bptr |
| |
450 const char *end_of_src = src + (copy_len - 1) * elem_size; |
| |
451 size_t skip_len = 0; |
| |
452 while (copy_len > 0 && cmp_func(bptr, end_of_src) == 0) { |
| |
453 end_of_src -= elem_size; |
| |
454 skip_len++; |
| |
455 copy_len--; |
| |
456 } |
| |
457 char *last = dest == *target ? NULL : dest - elem_size; |
| |
458 // then iterate through the source chunk |
| |
459 // and skip all duplicates with the last element in the array |
| |
460 size_t more_skipped = 0; |
| |
461 for (unsigned j = 0; j < copy_len; j++) { |
| |
462 if (last != NULL && cmp_func(last, src) == 0) { |
| |
463 // duplicate - skip |
| |
464 src += elem_size; |
| |
465 si++; |
| |
466 more_skipped++; |
| |
467 } else { |
| |
468 memcpy(dest, src, elem_size); |
| |
469 src += elem_size; |
| |
470 last = dest; |
| |
471 dest += elem_size; |
| |
472 si++; |
| |
473 di++; |
| |
474 } |
| |
475 } |
| |
476 // skip the previously identified elements as well |
| |
477 src += skip_len * elem_size; |
| |
478 si += skip_len; |
| |
479 skip_len += more_skipped; |
| |
480 // reduce the actual size by the number of skipped elements |
| |
481 *size -= skip_len; |
| |
482 } |
| |
483 } |
| 428 |
484 |
| 429 // when all source elements are in place, we are done |
485 // when all source elements are in place, we are done |
| 430 if (si >= elem_count) break; |
486 if (si >= elem_count) break; |
| 431 |
487 |
| 432 // determine how many buffered elements need to be restored |
488 // determine how many buffered elements need to be restored |
| 441 // restore the buffered elements |
497 // restore the buffered elements |
| 442 bytes_copied = copy_len * elem_size; |
498 bytes_copied = copy_len * elem_size; |
| 443 memmove(dest, bptr, bytes_copied); |
499 memmove(dest, bptr, bytes_copied); |
| 444 dest += bytes_copied; |
500 dest += bytes_copied; |
| 445 bptr += bytes_copied; |
501 bptr += bytes_copied; |
| |
502 di += copy_len; |
| 446 bi += copy_len; |
503 bi += copy_len; |
| 447 } |
504 } |
| 448 |
505 |
| 449 // still source elements left? simply append them |
506 // still source elements left? |
| 450 if (si < elem_count) { |
507 if (si < elem_count) { |
| 451 memcpy(dest, src, elem_size * (elem_count - si)); |
508 if (allow_duplicates) { |
| 452 } |
509 // duplicates allowed or nothing inserted yet: simply copy everything |
| 453 |
510 memcpy(dest, src, elem_size * (elem_count - si)); |
| 454 // still buffer elements left? |
511 } else { |
| 455 // don't worry, we already moved them to the correct place |
512 if (dest != *target) { |
| |
513 // skip all source elements that equal the last element |
| |
514 char *last = dest - elem_size; |
| |
515 while (si < elem_count) { |
| |
516 if (last != NULL && cmp_func(last, src) == 0) { |
| |
517 src += elem_size; |
| |
518 si++; |
| |
519 (*size)--; |
| |
520 } else { |
| |
521 break; |
| |
522 } |
| |
523 } |
| |
524 } |
| |
525 // we must check the elements in the chunk one by one |
| |
526 while (si < elem_count) { |
| |
527 // find a chain of elements that can be copied |
| |
528 size_t copy_len = 1, skip_len = 0; |
| |
529 { |
| |
530 const char *left_src = src; |
| |
531 while (si + copy_len < elem_count) { |
| |
532 const char *right_src = left_src + elem_size; |
| |
533 int d = cmp_func(left_src, right_src); |
| |
534 if (d < 0) { |
| |
535 if (skip_len > 0) { |
| |
536 // new larger element found; |
| |
537 // handle it in the next cycle |
| |
538 break; |
| |
539 } |
| |
540 left_src += elem_size; |
| |
541 copy_len++; |
| |
542 } else if (d == 0) { |
| |
543 left_src += elem_size; |
| |
544 skip_len++; |
| |
545 } else { |
| |
546 break; |
| |
547 } |
| |
548 } |
| |
549 } |
| |
550 size_t bytes_copied = copy_len * elem_size; |
| |
551 memcpy(dest, src, bytes_copied); |
| |
552 dest += bytes_copied; |
| |
553 src += bytes_copied + skip_len * elem_size; |
| |
554 si += copy_len + skip_len; |
| |
555 di += copy_len; |
| |
556 *size -= skip_len; |
| |
557 } |
| |
558 } |
| |
559 } |
| |
560 |
| |
561 // buffered elements need to be moved when we skipped duplicates |
| |
562 size_t total_skipped = new_size - *size; |
| |
563 if (bi < new_size && total_skipped > 0) { |
| |
564 // move the remaining buffer to the end of the array |
| |
565 memmove(dest, bptr, elem_size * (new_size - bi)); |
| |
566 } |
| 456 |
567 |
| 457 return 0; |
568 return 0; |
| |
569 } |
| |
570 |
| |
571 int cx_array_insert_sorted( |
| |
572 void **target, |
| |
573 size_t *size, |
| |
574 size_t *capacity, |
| |
575 cx_compare_func cmp_func, |
| |
576 const void *sorted_data, |
| |
577 size_t elem_size, |
| |
578 size_t elem_count, |
| |
579 CxArrayReallocator *reallocator |
| |
580 ) { |
| |
581 return cx_array_insert_sorted_impl(target, size, capacity, |
| |
582 cmp_func, sorted_data, elem_size, elem_count, reallocator, true); |
| |
583 } |
| |
584 |
| |
585 int cx_array_insert_unique( |
| |
586 void **target, |
| |
587 size_t *size, |
| |
588 size_t *capacity, |
| |
589 cx_compare_func cmp_func, |
| |
590 const void *sorted_data, |
| |
591 size_t elem_size, |
| |
592 size_t elem_count, |
| |
593 CxArrayReallocator *reallocator |
| |
594 ) { |
| |
595 return cx_array_insert_sorted_impl(target, size, capacity, |
| |
596 cmp_func, sorted_data, elem_size, elem_count, reallocator, false); |
| 458 } |
597 } |
| 459 |
598 |
| 460 size_t cx_array_binary_search_inf( |
599 size_t cx_array_binary_search_inf( |
| 461 const void *arr, |
600 const void *arr, |
| 462 size_t size, |
601 size_t size, |
| 500 pivot_index = left_index + (right_index - left_index) / 2; |
639 pivot_index = left_index + (right_index - left_index) / 2; |
| 501 const char *arr_elem = array + pivot_index * elem_size; |
640 const char *arr_elem = array + pivot_index * elem_size; |
| 502 result = cmp_func(elem, arr_elem); |
641 result = cmp_func(elem, arr_elem); |
| 503 if (result == 0) { |
642 if (result == 0) { |
| 504 // found it! |
643 // found it! |
| |
644 // check previous elements; |
| |
645 // when they are equal, report the smallest index |
| |
646 arr_elem -= elem_size; |
| |
647 while (pivot_index > 0 && cmp_func(elem, arr_elem) == 0) { |
| |
648 pivot_index--; |
| |
649 arr_elem -= elem_size; |
| |
650 } |
| 505 return pivot_index; |
651 return pivot_index; |
| 506 } else if (result < 0) { |
652 } else if (result < 0) { |
| 507 // element is smaller than pivot, continue search left |
653 // element is smaller than pivot, continue search left |
| 508 right_index = pivot_index - 1; |
654 right_index = pivot_index - 1; |
| 509 } else { |
655 } else { |
| 642 cx_array_list *arl = (cx_array_list *) list; |
788 cx_array_list *arl = (cx_array_list *) list; |
| 643 |
789 |
| 644 // guarantee enough capacity |
790 // guarantee enough capacity |
| 645 if (arl->capacity < list->collection.size + n) { |
791 if (arl->capacity < list->collection.size + n) { |
| 646 size_t new_capacity = list->collection.size + n; |
792 size_t new_capacity = list->collection.size + n; |
| 647 new_capacity = new_capacity - (new_capacity % 16) + 16; |
793 new_capacity = cx_array_align_capacity(new_capacity, 16, SIZE_MAX); |
| 648 if (cxReallocateArray( |
794 if (cxReallocateArray( |
| 649 list->collection.allocator, |
795 list->collection.allocator, |
| 650 &arl->data, new_capacity, |
796 &arl->data, new_capacity, |
| 651 list->collection.elem_size) |
797 list->collection.elem_size) |
| 652 ) { |
798 ) { |
| 698 } else { |
844 } else { |
| 699 return n; |
845 return n; |
| 700 } |
846 } |
| 701 } |
847 } |
| 702 |
848 |
| |
849 static size_t cx_arl_insert_unique( |
| |
850 struct cx_list_s *list, |
| |
851 const void *sorted_data, |
| |
852 size_t n |
| |
853 ) { |
| |
854 // get a correctly typed pointer to the list |
| |
855 cx_array_list *arl = (cx_array_list *) list; |
| |
856 |
| |
857 if (cx_array_insert_unique( |
| |
858 &arl->data, |
| |
859 &list->collection.size, |
| |
860 &arl->capacity, |
| |
861 list->collection.cmpfunc, |
| |
862 sorted_data, |
| |
863 list->collection.elem_size, |
| |
864 n, |
| |
865 &arl->reallocator |
| |
866 )) { |
| |
867 // array list implementation is "all or nothing" |
| |
868 return 0; |
| |
869 } else { |
| |
870 return n; |
| |
871 } |
| |
872 } |
| |
873 |
| 703 static void *cx_arl_insert_element( |
874 static void *cx_arl_insert_element( |
| 704 struct cx_list_s *list, |
875 struct cx_list_s *list, |
| 705 size_t index, |
876 size_t index, |
| 706 const void *element |
877 const void *element |
| 707 ) { |
878 ) { |
| 939 |
1110 |
| 940 static void cx_arl_iter_next(void *it) { |
1111 static void cx_arl_iter_next(void *it) { |
| 941 struct cx_iterator_s *iter = it; |
1112 struct cx_iterator_s *iter = it; |
| 942 if (iter->base.remove) { |
1113 if (iter->base.remove) { |
| 943 iter->base.remove = false; |
1114 iter->base.remove = false; |
| 944 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); |
1115 cx_arl_remove(iter->src_handle, iter->index, 1, NULL); |
| |
1116 iter->elem_count--; |
| 945 } else { |
1117 } else { |
| 946 iter->index++; |
1118 iter->index++; |
| 947 iter->elem_handle = |
1119 iter->elem_handle = |
| 948 ((char *) iter->elem_handle) |
1120 ((char *) iter->elem_handle) |
| 949 + ((const struct cx_list_s *) iter->src_handle.c)->collection.elem_size; |
1121 + ((const struct cx_list_s *) iter->src_handle)->collection.elem_size; |
| 950 } |
1122 } |
| 951 } |
1123 } |
| 952 |
1124 |
| 953 static void cx_arl_iter_prev(void *it) { |
1125 static void cx_arl_iter_prev(void *it) { |
| 954 struct cx_iterator_s *iter = it; |
1126 struct cx_iterator_s *iter = it; |
| 955 const cx_array_list *list = iter->src_handle.c; |
|
| 956 if (iter->base.remove) { |
1127 if (iter->base.remove) { |
| 957 iter->base.remove = false; |
1128 iter->base.remove = false; |
| 958 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); |
1129 cx_arl_remove(iter->src_handle, iter->index, 1, NULL); |
| |
1130 iter->elem_count--; |
| 959 } |
1131 } |
| 960 iter->index--; |
1132 iter->index--; |
| |
1133 cx_array_list *list = iter->src_handle; |
| 961 if (iter->index < list->base.collection.size) { |
1134 if (iter->index < list->base.collection.size) { |
| 962 iter->elem_handle = ((char *) list->data) |
1135 iter->elem_handle = ((char *) list->data) |
| 963 + iter->index * list->base.collection.elem_size; |
1136 + iter->index * list->base.collection.elem_size; |
| 964 } |
1137 } |
| 965 } |
1138 } |
| 971 bool backwards |
1144 bool backwards |
| 972 ) { |
1145 ) { |
| 973 struct cx_iterator_s iter; |
1146 struct cx_iterator_s iter; |
| 974 |
1147 |
| 975 iter.index = index; |
1148 iter.index = index; |
| 976 iter.src_handle.c = list; |
1149 iter.src_handle = (void*)list; |
| 977 iter.elem_handle = cx_arl_at(list, index); |
1150 iter.elem_handle = cx_arl_at(list, index); |
| 978 iter.elem_size = list->collection.elem_size; |
1151 iter.elem_size = list->collection.elem_size; |
| 979 iter.elem_count = list->collection.size; |
1152 iter.elem_count = list->collection.size; |
| 980 iter.base.valid = cx_arl_iter_valid; |
1153 iter.base.valid = cx_arl_iter_valid; |
| 981 iter.base.current = cx_arl_iter_current; |
1154 iter.base.current = cx_arl_iter_current; |
| 982 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; |
1155 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; |
| 983 iter.base.remove = false; |
1156 iter.base.remove = false; |
| 984 iter.base.mutating = false; |
1157 iter.base.allow_remove = true; |
| 985 |
1158 |
| 986 return iter; |
1159 return iter; |
| 987 } |
1160 } |
| 988 |
1161 |
| 989 static cx_list_class cx_array_list_class = { |
1162 static cx_list_class cx_array_list_class = { |
| 990 cx_arl_destructor, |
1163 cx_arl_destructor, |
| 991 cx_arl_insert_element, |
1164 cx_arl_insert_element, |
| 992 cx_arl_insert_array, |
1165 cx_arl_insert_array, |
| 993 cx_arl_insert_sorted, |
1166 cx_arl_insert_sorted, |
| |
1167 cx_arl_insert_unique, |
| 994 cx_arl_insert_iter, |
1168 cx_arl_insert_iter, |
| 995 cx_arl_remove, |
1169 cx_arl_remove, |
| 996 cx_arl_clear, |
1170 cx_arl_clear, |
| 997 cx_arl_swap, |
1171 cx_arl_swap, |
| 998 cx_arl_at, |
1172 cx_arl_at, |