UNIXworkcode

1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 3 * 4 * Copyright 2021 Mike Becker, Olaf Wintermann All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include "cx/array_list.h" 30 #include "cx/compare.h" 31 #include <assert.h> 32 #include <string.h> 33 #include <errno.h> 34 35 // Default array reallocator 36 37 static void *cx_array_default_realloc( 38 void *array, 39 cx_attr_unused size_t old_capacity, 40 size_t new_capacity, 41 size_t elem_size, 42 cx_attr_unused CxArrayReallocator *alloc 43 ) { 44 size_t n; 45 // LCOV_EXCL_START 46 if (cx_szmul(new_capacity, elem_size, &n)) { 47 errno = EOVERFLOW; 48 return NULL; 49 } // LCOV_EXCL_STOP 50 return cxReallocDefault(array, n); 51 } 52 53 CxArrayReallocator cx_array_default_reallocator_impl = { 54 cx_array_default_realloc, NULL, NULL 55 }; 56 57 CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl; 58 59 // Stack-aware array reallocator 60 61 static void *cx_array_advanced_realloc( 62 void *array, 63 size_t old_capacity, 64 size_t new_capacity, 65 size_t elem_size, 66 cx_attr_unused CxArrayReallocator *alloc 67 ) { 68 // check for overflow 69 size_t n; 70 // LCOV_EXCL_START 71 if (cx_szmul(new_capacity, elem_size, &n)) { 72 errno = EOVERFLOW; 73 return NULL; 74 } // LCOV_EXCL_STOP 75 76 // retrieve the pointer to the actual allocator 77 const CxAllocator *al = alloc->allocator; 78 79 // check if the array is still located on the stack 80 void *newmem; 81 if (array == alloc->stack_ptr) { 82 newmem = cxMalloc(al, n); 83 if (newmem != NULL && array != NULL) { 84 memcpy(newmem, array, old_capacity*elem_size); 85 } 86 } else { 87 newmem = cxRealloc(al, array, n); 88 } 89 return newmem; 90 } 91 92 struct cx_array_reallocator_s cx_array_reallocator( 93 const struct cx_allocator_s *allocator, 94 const void *stack_ptr 95 ) { 96 if (allocator == NULL) { 97 allocator = cxDefaultAllocator; 98 } 99 return (struct cx_array_reallocator_s) { 100 cx_array_advanced_realloc, 101 allocator, stack_ptr, 102 }; 103 } 104 105 // LOW LEVEL ARRAY LIST FUNCTIONS 106 107 /** 108 * Intelligently calculates a new capacity, reserving some more 109 * elements than required to prevent too many allocations. 110 * 111 * @param current_capacity the current capacity of the array 112 * @param needed_capacity the required capacity of the array 113 * @return the new capacity 114 */ 115 static size_t cx_array_grow_capacity( 116 size_t current_capacity, 117 size_t needed_capacity 118 ) { 119 if (current_capacity >= needed_capacity) { 120 return current_capacity; 121 } 122 size_t cap = needed_capacity; 123 size_t alignment; 124 if (cap < 128) alignment = 16; 125 else if (cap < 1024) alignment = 64; 126 else if (cap < 8192) alignment = 512; 127 else alignment = 1024; 128 return cap - (cap % alignment) + alignment; 129 } 130 131 int cx_array_reserve( 132 void **array, 133 void *size, 134 void *capacity, 135 unsigned width, 136 size_t elem_size, 137 size_t elem_count, 138 CxArrayReallocator *reallocator 139 ) { 140 // assert pointers 141 assert(array != NULL); 142 assert(size != NULL); 143 assert(capacity != NULL); 144 145 // default reallocator 146 if (reallocator == NULL) { 147 reallocator = cx_array_default_reallocator; 148 } 149 150 // determine size and capacity 151 size_t oldcap; 152 size_t oldsize; 153 size_t max_size; 154 if (width == 0 || width == sizeof(size_t)) { 155 oldcap = *(size_t*) capacity; 156 oldsize = *(size_t*) size; 157 max_size = SIZE_MAX; 158 } else if (width == sizeof(uint16_t)) { 159 oldcap = *(uint16_t*) capacity; 160 oldsize = *(uint16_t*) size; 161 max_size = UINT16_MAX; 162 } else if (width == sizeof(uint8_t)) { 163 oldcap = *(uint8_t*) capacity; 164 oldsize = *(uint8_t*) size; 165 max_size = UINT8_MAX; 166 } 167 #if CX_WORDSIZE == 64 168 else if (width == sizeof(uint32_t)) { 169 oldcap = *(uint32_t*) capacity; 170 oldsize = *(uint32_t*) size; 171 max_size = UINT32_MAX; 172 } 173 #endif 174 else { 175 errno = EINVAL; 176 return 1; 177 } 178 179 // assert that the array is allocated when it has capacity 180 assert(*array != NULL || oldcap == 0); 181 182 // check for overflow 183 if (elem_count > max_size - oldsize) { 184 errno = EOVERFLOW; 185 return 1; 186 } 187 188 // determine new capacity 189 size_t newcap = oldsize + elem_count; 190 191 // reallocate if possible 192 if (newcap > oldcap) { 193 void *newmem = reallocator->realloc( 194 *array, oldcap, newcap, elem_size, reallocator 195 ); 196 if (newmem == NULL) { 197 return 1; // LCOV_EXCL_LINE 198 } 199 200 // store new pointer 201 *array = newmem; 202 203 // store new capacity 204 if (width == 0 || width == sizeof(size_t)) { 205 *(size_t*) capacity = newcap; 206 } else if (width == sizeof(uint16_t)) { 207 *(uint16_t*) capacity = (uint16_t) newcap; 208 } else if (width == sizeof(uint8_t)) { 209 *(uint8_t*) capacity = (uint8_t) newcap; 210 } 211 #if CX_WORDSIZE == 64 212 else if (width == sizeof(uint32_t)) { 213 *(uint32_t*) capacity = (uint32_t) newcap; 214 } 215 #endif 216 } 217 218 return 0; 219 } 220 221 int cx_array_copy( 222 void **target, 223 void *size, 224 void *capacity, 225 unsigned width, 226 size_t index, 227 const void *src, 228 size_t elem_size, 229 size_t elem_count, 230 CxArrayReallocator *reallocator 231 ) { 232 // assert pointers 233 assert(target != NULL); 234 assert(size != NULL); 235 assert(capacity != NULL); 236 assert(src != NULL); 237 238 // default reallocator 239 if (reallocator == NULL) { 240 reallocator = cx_array_default_reallocator; 241 } 242 243 // determine size and capacity 244 size_t oldcap; 245 size_t oldsize; 246 size_t max_size; 247 if (width == 0 || width == sizeof(size_t)) { 248 oldcap = *(size_t*) capacity; 249 oldsize = *(size_t*) size; 250 max_size = SIZE_MAX; 251 } else if (width == sizeof(uint16_t)) { 252 oldcap = *(uint16_t*) capacity; 253 oldsize = *(uint16_t*) size; 254 max_size = UINT16_MAX; 255 } else if (width == sizeof(uint8_t)) { 256 oldcap = *(uint8_t*) capacity; 257 oldsize = *(uint8_t*) size; 258 max_size = UINT8_MAX; 259 } 260 #if CX_WORDSIZE == 64 261 else if (width == sizeof(uint32_t)) { 262 oldcap = *(uint32_t*) capacity; 263 oldsize = *(uint32_t*) size; 264 max_size = UINT32_MAX; 265 } 266 #endif 267 else { 268 errno = EINVAL; 269 return 1; 270 } 271 272 // assert that the array is allocated when it has capacity 273 assert(*target != NULL || oldcap == 0); 274 275 // check for overflow 276 if (index > max_size || elem_count > max_size - index) { 277 errno = EOVERFLOW; 278 return 1; 279 } 280 281 // check if resize is required 282 const size_t minsize = index + elem_count; 283 const size_t newsize = oldsize < minsize ? minsize : oldsize; 284 285 // reallocate if necessary 286 const size_t newcap = cx_array_grow_capacity(oldcap, newsize); 287 if (newcap > oldcap) { 288 // check if we need to repair the src pointer 289 uintptr_t targetaddr = (uintptr_t) *target; 290 uintptr_t srcaddr = (uintptr_t) src; 291 bool repairsrc = targetaddr <= srcaddr 292 && srcaddr < targetaddr + oldcap * elem_size; 293 294 // perform reallocation 295 void *newmem = reallocator->realloc( 296 *target, oldcap, newcap, elem_size, reallocator 297 ); 298 if (newmem == NULL) { 299 return 1; // LCOV_EXCL_LINE 300 } 301 302 // repair src pointer, if necessary 303 if (repairsrc) { 304 src = ((char *) newmem) + (srcaddr - targetaddr); 305 } 306 307 // store new pointer 308 *target = newmem; 309 } 310 311 // determine target pointer 312 char *start = *target; 313 start += index * elem_size; 314 315 // copy elements and set new size 316 // note: no overflow check here, b/c we cannot get here w/o allocation 317 memmove(start, src, elem_count * elem_size); 318 319 // if any of size or capacity changed, store them back 320 if (newsize != oldsize || newcap != oldcap) { 321 if (width == 0 || width == sizeof(size_t)) { 322 *(size_t*) capacity = newcap; 323 *(size_t*) size = newsize; 324 } else if (width == sizeof(uint16_t)) { 325 *(uint16_t*) capacity = (uint16_t) newcap; 326 *(uint16_t*) size = (uint16_t) newsize; 327 } else if (width == sizeof(uint8_t)) { 328 *(uint8_t*) capacity = (uint8_t) newcap; 329 *(uint8_t*) size = (uint8_t) newsize; 330 } 331 #if CX_WORDSIZE == 64 332 else if (width == sizeof(uint32_t)) { 333 *(uint32_t*) capacity = (uint32_t) newcap; 334 *(uint32_t*) size = (uint32_t) newsize; 335 } 336 #endif 337 } 338 339 // return successfully 340 return 0; 341 } 342 343 static int cx_array_insert_sorted_impl( 344 void **target, 345 size_t *size, 346 size_t *capacity, 347 cx_compare_func cmp_func, 348 const void *sorted_data, 349 size_t elem_size, 350 size_t elem_count, 351 CxArrayReallocator *reallocator, 352 bool allow_duplicates 353 ) { 354 // assert pointers 355 assert(target != NULL); 356 assert(size != NULL); 357 assert(capacity != NULL); 358 assert(cmp_func != NULL); 359 assert(sorted_data != NULL); 360 361 // default reallocator 362 if (reallocator == NULL) { 363 reallocator = cx_array_default_reallocator; 364 } 365 366 // corner case 367 if (elem_count == 0) return 0; 368 369 // overflow check 370 // LCOV_EXCL_START 371 if (elem_count > SIZE_MAX - *size) { 372 errno = EOVERFLOW; 373 return 1; 374 } 375 // LCOV_EXCL_STOP 376 377 // store some counts 378 const size_t old_size = *size; 379 const size_t old_capacity = *capacity; 380 // the necessary capacity is the worst case assumption, including duplicates 381 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + elem_count); 382 383 // if we need more than we have, try a reallocation 384 if (needed_capacity > old_capacity) { 385 void *new_mem = reallocator->realloc( 386 *target, old_capacity, needed_capacity, elem_size, reallocator 387 ); 388 if (new_mem == NULL) { 389 // give it up right away, there is no contract 390 // that requires us to insert as much as we can 391 return 1; // LCOV_EXCL_LINE 392 } 393 *target = new_mem; 394 *capacity = needed_capacity; 395 } 396 397 // now we have guaranteed that we can insert everything 398 size_t new_size = old_size + elem_count; 399 *size = new_size; 400 401 // declare the source and destination indices/pointers 402 size_t si = 0, di = 0; 403 const char *src = sorted_data; 404 char *dest = *target; 405 406 // find the first insertion point 407 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); 408 dest += di * elem_size; 409 410 // move the remaining elements in the array completely to the right 411 // we will call it the "buffer" for parked elements 412 size_t buf_size = old_size - di; 413 size_t bi = new_size - buf_size; 414 char *bptr = ((char *) *target) + bi * elem_size; 415 memmove(bptr, dest, buf_size * elem_size); 416 417 // while there are both source and buffered elements left, 418 // copy them interleaving 419 while (si < elem_count && bi < new_size) { 420 // determine how many source elements can be inserted. 421 // the first element that shall not be inserted is the smallest element 422 // that is strictly larger than the first buffered element 423 // (located at the index of the infimum plus one). 424 // the infimum is guaranteed to exist: 425 // - if all src elements are larger, 426 // there is no buffer, and this loop is skipped 427 // - if any src element is smaller or equal, the infimum exists 428 // - when all src elements that are smaller are copied, the second part 429 // of this loop body will copy the remaining buffer (emptying it) 430 // Therefore, the buffer can never contain an element that is smaller 431 // than any element in the source and the infimum exists. 432 size_t copy_len, bytes_copied; 433 copy_len = cx_array_binary_search_inf( 434 src, elem_count - si, elem_size, bptr, cmp_func 435 ); 436 copy_len++; 437 438 // copy the source elements 439 if (copy_len > 0) { 440 if (allow_duplicates) { 441 // we can copy the entire chunk 442 bytes_copied = copy_len * elem_size; 443 memcpy(dest, src, bytes_copied); 444 dest += bytes_copied; 445 src += bytes_copied; 446 si += copy_len; 447 di += copy_len; 448 } else { 449 // first, check the end of the source chunk 450 // for being a duplicate of the bptr 451 const char *end_of_src = src + (copy_len - 1) * elem_size; 452 size_t skip_len = 0; 453 while (copy_len > 0 && cmp_func(bptr, end_of_src) == 0) { 454 end_of_src -= elem_size; 455 skip_len++; 456 copy_len--; 457 } 458 char *last = dest == *target ? NULL : dest - elem_size; 459 // then iterate through the source chunk 460 // and skip all duplicates with the last element in the array 461 size_t more_skipped = 0; 462 for (unsigned j = 0; j < copy_len; j++) { 463 if (last != NULL && cmp_func(last, src) == 0) { 464 // duplicate - skip 465 src += elem_size; 466 si++; 467 more_skipped++; 468 } else { 469 memcpy(dest, src, elem_size); 470 src += elem_size; 471 last = dest; 472 dest += elem_size; 473 si++; 474 di++; 475 } 476 } 477 // skip the previously identified elements as well 478 src += skip_len * elem_size; 479 si += skip_len; 480 skip_len += more_skipped; 481 // reduce the actual size by the number of skipped elements 482 *size -= skip_len; 483 } 484 } 485 486 // when all source elements are in place, we are done 487 if (si >= elem_count) break; 488 489 // determine how many buffered elements need to be restored 490 copy_len = cx_array_binary_search_sup( 491 bptr, 492 new_size - bi, 493 elem_size, 494 src, 495 cmp_func 496 ); 497 498 // restore the buffered elements 499 bytes_copied = copy_len * elem_size; 500 memmove(dest, bptr, bytes_copied); 501 dest += bytes_copied; 502 bptr += bytes_copied; 503 di += copy_len; 504 bi += copy_len; 505 } 506 507 // still source elements left? 508 if (si < elem_count) { 509 if (allow_duplicates) { 510 // duplicates allowed or nothing inserted yet: simply copy everything 511 memcpy(dest, src, elem_size * (elem_count - si)); 512 } else { 513 // we must check the remaining source elements one by one 514 // to skip the duplicates. 515 // Note that no source element can equal the last element in the 516 // destination, because that would have created an insertion point 517 // and a buffer, s.t. the above loop already handled the duplicates 518 while (si < elem_count) { 519 // find a chain of elements that can be copied 520 size_t copy_len = 1, skip_len = 0; 521 { 522 const char *left_src = src; 523 while (si + copy_len + skip_len < elem_count) { 524 const char *right_src = left_src + elem_size; 525 int d = cmp_func(left_src, right_src); 526 if (d < 0) { 527 if (skip_len > 0) { 528 // new larger element found; 529 // handle it in the next cycle 530 break; 531 } 532 left_src += elem_size; 533 copy_len++; 534 } else if (d == 0) { 535 left_src += elem_size; 536 skip_len++; 537 } else { 538 break; 539 } 540 } 541 } 542 size_t bytes_copied = copy_len * elem_size; 543 memcpy(dest, src, bytes_copied); 544 dest += bytes_copied; 545 src += bytes_copied + skip_len * elem_size; 546 si += copy_len + skip_len; 547 di += copy_len; 548 *size -= skip_len; 549 } 550 } 551 } 552 553 // buffered elements need to be moved when we skipped duplicates 554 size_t total_skipped = new_size - *size; 555 if (bi < new_size && total_skipped > 0) { 556 // move the remaining buffer to the end of the array 557 memmove(dest, bptr, elem_size * (new_size - bi)); 558 } 559 560 return 0; 561 } 562 563 int cx_array_insert_sorted( 564 void **target, 565 size_t *size, 566 size_t *capacity, 567 cx_compare_func cmp_func, 568 const void *sorted_data, 569 size_t elem_size, 570 size_t elem_count, 571 CxArrayReallocator *reallocator 572 ) { 573 return cx_array_insert_sorted_impl(target, size, capacity, 574 cmp_func, sorted_data, elem_size, elem_count, reallocator, true); 575 } 576 577 int cx_array_insert_unique( 578 void **target, 579 size_t *size, 580 size_t *capacity, 581 cx_compare_func cmp_func, 582 const void *sorted_data, 583 size_t elem_size, 584 size_t elem_count, 585 CxArrayReallocator *reallocator 586 ) { 587 return cx_array_insert_sorted_impl(target, size, capacity, 588 cmp_func, sorted_data, elem_size, elem_count, reallocator, false); 589 } 590 591 // implementation that finds ANY index 592 static size_t cx_array_binary_search_inf_impl( 593 const void *arr, 594 size_t size, 595 size_t elem_size, 596 const void *elem, 597 cx_compare_func cmp_func 598 ) { 599 // special case: empty array 600 if (size == 0) return 0; 601 602 // declare a variable that will contain the compare results 603 int result; 604 605 // cast the array pointer to something we can use offsets with 606 const char *array = arr; 607 608 // check the first array element 609 result = cmp_func(elem, array); 610 if (result < 0) { 611 return size; 612 } else if (result == 0) { 613 return 0; 614 } 615 616 // special case: there is only one element and that is smaller 617 if (size == 1) return 0; 618 619 // check the last array element 620 result = cmp_func(elem, array + elem_size * (size - 1)); 621 if (result >= 0) { 622 return size - 1; 623 } 624 625 // the element is now guaranteed to be somewhere in the list 626 // so start the binary search 627 size_t left_index = 1; 628 size_t right_index = size - 1; 629 size_t pivot_index; 630 631 while (left_index <= right_index) { 632 pivot_index = left_index + (right_index - left_index) / 2; 633 const char *arr_elem = array + pivot_index * elem_size; 634 result = cmp_func(elem, arr_elem); 635 if (result == 0) { 636 // found it! 637 return pivot_index; 638 } else if (result < 0) { 639 // element is smaller than pivot, continue search left 640 right_index = pivot_index - 1; 641 } else { 642 // element is larger than pivot, continue search right 643 left_index = pivot_index + 1; 644 } 645 } 646 647 // report the largest upper bound 648 return result < 0 ? (pivot_index - 1) : pivot_index; 649 } 650 651 size_t cx_array_binary_search_inf( 652 const void *arr, 653 size_t size, 654 size_t elem_size, 655 const void *elem, 656 cx_compare_func cmp_func 657 ) { 658 size_t index = cx_array_binary_search_inf_impl( 659 arr, size, elem_size, elem, cmp_func); 660 // in case of equality, report the largest index 661 const char *e = ((const char *) arr) + (index + 1) * elem_size; 662 while (index + 1 < size && cmp_func(e, elem) == 0) { 663 e += elem_size; 664 index++; 665 } 666 return index; 667 } 668 669 size_t cx_array_binary_search( 670 const void *arr, 671 size_t size, 672 size_t elem_size, 673 const void *elem, 674 cx_compare_func cmp_func 675 ) { 676 size_t index = cx_array_binary_search_inf( 677 arr, size, elem_size, elem, cmp_func 678 ); 679 if (index < size && 680 cmp_func(((const char *) arr) + index * elem_size, elem) == 0) { 681 return index; 682 } else { 683 return size; 684 } 685 } 686 687 size_t cx_array_binary_search_sup( 688 const void *arr, 689 size_t size, 690 size_t elem_size, 691 const void *elem, 692 cx_compare_func cmp_func 693 ) { 694 size_t index = cx_array_binary_search_inf_impl( 695 arr, size, elem_size, elem, cmp_func 696 ); 697 const char *e = ((const char *) arr) + index * elem_size; 698 if (index == size) { 699 // no infimum means the first element is supremum 700 return 0; 701 } else if (cmp_func(e, elem) == 0) { 702 // found an equal element, search the smallest index 703 e -= elem_size; // e now contains the element at index-1 704 while (index > 0 && cmp_func(e, elem) == 0) { 705 e -= elem_size; 706 index--; 707 } 708 return index; 709 } else { 710 // we already have the largest index of the infimum (by design) 711 // the next element is the supremum (or there is no supremum) 712 return index + 1; 713 } 714 } 715 716 #ifndef CX_ARRAY_SWAP_SBO_SIZE 717 #define CX_ARRAY_SWAP_SBO_SIZE 128 718 #endif 719 const unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE; 720 721 void cx_array_swap( 722 void *arr, 723 size_t elem_size, 724 size_t idx1, 725 size_t idx2 726 ) { 727 assert(arr != NULL); 728 729 // short circuit 730 if (idx1 == idx2) return; 731 732 char sbo_mem[CX_ARRAY_SWAP_SBO_SIZE]; 733 void *tmp; 734 735 // decide if we can use the local buffer 736 if (elem_size > CX_ARRAY_SWAP_SBO_SIZE) { 737 tmp = cxMallocDefault(elem_size); 738 // we don't want to enforce error handling 739 if (tmp == NULL) abort(); 740 } else { 741 tmp = sbo_mem; 742 } 743 744 // calculate memory locations 745 char *left = arr, *right = arr; 746 left += idx1 * elem_size; 747 right += idx2 * elem_size; 748 749 // three-way swap 750 memcpy(tmp, left, elem_size); 751 memcpy(left, right, elem_size); 752 memcpy(right, tmp, elem_size); 753 754 // free dynamic memory, if it was needed 755 if (tmp != sbo_mem) { 756 cxFreeDefault(tmp); 757 } 758 } 759 760 // HIGH LEVEL ARRAY LIST FUNCTIONS 761 762 typedef struct { 763 struct cx_list_s base; 764 void *data; 765 size_t capacity; 766 CxArrayReallocator reallocator; 767 } cx_array_list; 768 769 static void cx_arl_destructor(struct cx_list_s *list) { 770 cx_array_list *arl = (cx_array_list *) list; 771 772 char *ptr = arl->data; 773 774 if (list->collection.simple_destructor) { 775 for (size_t i = 0; i < list->collection.size; i++) { 776 cx_invoke_simple_destructor(list, ptr); 777 ptr += list->collection.elem_size; 778 } 779 } 780 if (list->collection.advanced_destructor) { 781 for (size_t i = 0; i < list->collection.size; i++) { 782 cx_invoke_advanced_destructor(list, ptr); 783 ptr += list->collection.elem_size; 784 } 785 } 786 787 cxFree(list->collection.allocator, arl->data); 788 cxFree(list->collection.allocator, list); 789 } 790 791 static size_t cx_arl_insert_array( 792 struct cx_list_s *list, 793 size_t index, 794 const void *array, 795 size_t n 796 ) { 797 // out of bounds and special case check 798 if (index > list->collection.size || n == 0) return 0; 799 800 // get a correctly typed pointer to the list 801 cx_array_list *arl = (cx_array_list *) list; 802 803 // guarantee enough capacity 804 if (arl->capacity < list->collection.size + n) { 805 const size_t new_capacity = cx_array_grow_capacity(arl->capacity,list->collection.size + n); 806 if (cxReallocateArray( 807 list->collection.allocator, 808 &arl->data, new_capacity, 809 list->collection.elem_size) 810 ) { 811 return 0; // LCOV_EXCL_LINE 812 } 813 arl->capacity = new_capacity; 814 } 815 816 // determine insert position 817 char *arl_data = arl->data; 818 char *insert_pos = arl_data + index * list->collection.elem_size; 819 820 // do we need to move some elements? 821 if (index < list->collection.size) { 822 size_t elems_to_move = list->collection.size - index; 823 char *target = insert_pos + n * list->collection.elem_size; 824 memmove(target, insert_pos, elems_to_move * list->collection.elem_size); 825 } 826 827 // place the new elements, if any 828 if (array != NULL) { 829 memcpy(insert_pos, array, n * list->collection.elem_size); 830 } 831 list->collection.size += n; 832 833 return n; 834 } 835 836 static size_t cx_arl_insert_sorted( 837 struct cx_list_s *list, 838 const void *sorted_data, 839 size_t n 840 ) { 841 // get a correctly typed pointer to the list 842 cx_array_list *arl = (cx_array_list *) list; 843 844 if (cx_array_insert_sorted( 845 &arl->data, 846 &list->collection.size, 847 &arl->capacity, 848 list->collection.cmpfunc, 849 sorted_data, 850 list->collection.elem_size, 851 n, 852 &arl->reallocator 853 )) { 854 // array list implementation is "all or nothing" 855 return 0; // LCOV_EXCL_LINE 856 } else { 857 return n; 858 } 859 } 860 861 static size_t cx_arl_insert_unique( 862 struct cx_list_s *list, 863 const void *sorted_data, 864 size_t n 865 ) { 866 // get a correctly typed pointer to the list 867 cx_array_list *arl = (cx_array_list *) list; 868 869 if (cx_array_insert_unique( 870 &arl->data, 871 &list->collection.size, 872 &arl->capacity, 873 list->collection.cmpfunc, 874 sorted_data, 875 list->collection.elem_size, 876 n, 877 &arl->reallocator 878 )) { 879 // array list implementation is "all or nothing" 880 return 0; // LCOV_EXCL_LINE 881 } else { 882 return n; 883 } 884 } 885 886 static void *cx_arl_insert_element( 887 struct cx_list_s *list, 888 size_t index, 889 const void *element 890 ) { 891 if (cx_arl_insert_array(list, index, element, 1) == 1) { 892 return ((char*)((cx_array_list *) list)->data) + index * list->collection.elem_size; 893 } else { 894 return NULL; 895 } 896 } 897 898 static int cx_arl_insert_iter( 899 struct cx_iterator_s *iter, 900 const void *elem, 901 int prepend 902 ) { 903 struct cx_list_s *list = iter->src_handle; 904 if (iter->index < list->collection.size) { 905 if (cx_arl_insert_element(list, 906 iter->index + 1 - prepend, elem) == NULL) { 907 return 1; // LCOV_EXCL_LINE 908 } 909 iter->elem_count++; 910 if (prepend != 0) { 911 iter->index++; 912 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; 913 } 914 return 0; 915 } else { 916 if (cx_arl_insert_element(list, list->collection.size, elem) == NULL) { 917 return 1; // LCOV_EXCL_LINE 918 } 919 iter->elem_count++; 920 iter->index = list->collection.size; 921 return 0; 922 } 923 } 924 925 static size_t cx_arl_remove( 926 struct cx_list_s *list, 927 size_t index, 928 size_t num, 929 void *targetbuf 930 ) { 931 cx_array_list *arl = (cx_array_list *) list; 932 933 // out-of-bounds check 934 size_t remove; 935 if (index >= list->collection.size) { 936 remove = 0; 937 } else if (index + num > list->collection.size) { 938 remove = list->collection.size - index; 939 } else { 940 remove = num; 941 } 942 943 // easy exit 944 if (remove == 0) return 0; 945 946 // destroy or copy contents 947 if (targetbuf == NULL) { 948 for (size_t idx = index; idx < index + remove; idx++) { 949 cx_invoke_destructor( 950 list, 951 ((char *) arl->data) + idx * list->collection.elem_size 952 ); 953 } 954 } else { 955 memcpy( 956 targetbuf, 957 ((char *) arl->data) + index * list->collection.elem_size, 958 remove * list->collection.elem_size 959 ); 960 } 961 962 // short-circuit removal of last elements 963 if (index + remove == list->collection.size) { 964 list->collection.size -= remove; 965 return remove; 966 } 967 968 // just move the elements to the left 969 cx_array_copy( 970 &arl->data, 971 &list->collection.size, 972 &arl->capacity, 973 0, 974 index, 975 ((char *) arl->data) + (index + remove) * list->collection.elem_size, 976 list->collection.elem_size, 977 list->collection.size - index - remove, 978 &arl->reallocator 979 ); 980 981 // decrease the size 982 list->collection.size -= remove; 983 984 return remove; 985 } 986 987 static void cx_arl_clear(struct cx_list_s *list) { 988 if (list->collection.size == 0) return; 989 990 cx_array_list *arl = (cx_array_list *) list; 991 char *ptr = arl->data; 992 993 if (list->collection.simple_destructor) { 994 for (size_t i = 0; i < list->collection.size; i++) { 995 cx_invoke_simple_destructor(list, ptr); 996 ptr += list->collection.elem_size; 997 } 998 } 999 if (list->collection.advanced_destructor) { 1000 for (size_t i = 0; i < list->collection.size; i++) { 1001 cx_invoke_advanced_destructor(list, ptr); 1002 ptr += list->collection.elem_size; 1003 } 1004 } 1005 1006 memset(arl->data, 0, list->collection.size * list->collection.elem_size); 1007 list->collection.size = 0; 1008 } 1009 1010 static int cx_arl_swap( 1011 struct cx_list_s *list, 1012 size_t i, 1013 size_t j 1014 ) { 1015 if (i >= list->collection.size || j >= list->collection.size) return 1; 1016 cx_array_list *arl = (cx_array_list *) list; 1017 cx_array_swap(arl->data, list->collection.elem_size, i, j); 1018 return 0; 1019 } 1020 1021 static void *cx_arl_at( 1022 const struct cx_list_s *list, 1023 size_t index 1024 ) { 1025 if (index < list->collection.size) { 1026 const cx_array_list *arl = (const cx_array_list *) list; 1027 char *space = arl->data; 1028 return space + index * list->collection.elem_size; 1029 } else { 1030 return NULL; 1031 } 1032 } 1033 1034 static size_t cx_arl_find_remove( 1035 struct cx_list_s *list, 1036 const void *elem, 1037 bool remove 1038 ) { 1039 assert(list != NULL); 1040 assert(list->collection.cmpfunc != NULL); 1041 if (list->collection.size == 0) return 0; 1042 char *cur = ((const cx_array_list *) list)->data; 1043 1044 // optimize with binary search, when sorted 1045 if (list->collection.sorted) { 1046 size_t i = cx_array_binary_search( 1047 cur, 1048 list->collection.size, 1049 list->collection.elem_size, 1050 elem, 1051 list->collection.cmpfunc 1052 ); 1053 if (remove && i < list->collection.size) { 1054 cx_arl_remove(list, i, 1, NULL); 1055 } 1056 return i; 1057 } 1058 1059 // fallback: linear search 1060 for (size_t i = 0; i < list->collection.size; i++) { 1061 if (0 == list->collection.cmpfunc(elem, cur)) { 1062 if (remove) { 1063 cx_arl_remove(list, i, 1, NULL); 1064 } 1065 return i; 1066 } 1067 cur += list->collection.elem_size; 1068 } 1069 return list->collection.size; 1070 } 1071 1072 static void cx_arl_sort(struct cx_list_s *list) { 1073 assert(list->collection.cmpfunc != NULL); 1074 qsort(((cx_array_list *) list)->data, 1075 list->collection.size, 1076 list->collection.elem_size, 1077 list->collection.cmpfunc 1078 ); 1079 } 1080 1081 static int cx_arl_compare( 1082 const struct cx_list_s *list, 1083 const struct cx_list_s *other 1084 ) { 1085 assert(list->collection.cmpfunc != NULL); 1086 if (list->collection.size == other->collection.size) { 1087 const char *left = ((const cx_array_list *) list)->data; 1088 const char *right = ((const cx_array_list *) other)->data; 1089 for (size_t i = 0; i < list->collection.size; i++) { 1090 int d = list->collection.cmpfunc(left, right); 1091 if (d != 0) { 1092 return d; 1093 } 1094 left += list->collection.elem_size; 1095 right += other->collection.elem_size; 1096 } 1097 return 0; 1098 } else { 1099 return list->collection.size < other->collection.size ? -1 : 1; 1100 } 1101 } 1102 1103 static void cx_arl_reverse(struct cx_list_s *list) { 1104 if (list->collection.size < 2) return; 1105 void *data = ((const cx_array_list *) list)->data; 1106 size_t half = list->collection.size / 2; 1107 for (size_t i = 0; i < half; i++) { 1108 cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i); 1109 } 1110 } 1111 1112 static bool cx_arl_iter_valid(const void *it) { 1113 const struct cx_iterator_s *iter = it; 1114 const struct cx_list_s *list = iter->src_handle; 1115 return iter->index < list->collection.size; 1116 } 1117 1118 static void *cx_arl_iter_current(const void *it) { 1119 const struct cx_iterator_s *iter = it; 1120 return iter->elem_handle; 1121 } 1122 1123 static void cx_arl_iter_next(void *it) { 1124 struct cx_iterator_s *iter = it; 1125 if (iter->base.remove) { 1126 iter->base.remove = false; 1127 cx_arl_remove(iter->src_handle, iter->index, 1, NULL); 1128 iter->elem_count--; 1129 } else { 1130 iter->index++; 1131 iter->elem_handle = 1132 ((char *) iter->elem_handle) 1133 + ((const struct cx_list_s *) iter->src_handle)->collection.elem_size; 1134 } 1135 } 1136 1137 static void cx_arl_iter_prev(void *it) { 1138 struct cx_iterator_s *iter = it; 1139 if (iter->base.remove) { 1140 iter->base.remove = false; 1141 cx_arl_remove(iter->src_handle, iter->index, 1, NULL); 1142 iter->elem_count--; 1143 } 1144 iter->index--; 1145 cx_array_list *list = iter->src_handle; 1146 if (iter->index < list->base.collection.size) { 1147 iter->elem_handle = ((char *) list->data) 1148 + iter->index * list->base.collection.elem_size; 1149 } 1150 } 1151 1152 static int cx_arl_change_capacity( 1153 struct cx_list_s *list, 1154 size_t new_capacity 1155 ) { 1156 cx_array_list *arl = (cx_array_list *)list; 1157 return cxReallocateArray(list->collection.allocator, 1158 &arl->data, new_capacity, list->collection.elem_size); 1159 } 1160 1161 static struct cx_iterator_s cx_arl_iterator( 1162 const struct cx_list_s *list, 1163 size_t index, 1164 bool backwards 1165 ) { 1166 struct cx_iterator_s iter; 1167 1168 iter.index = index; 1169 iter.src_handle = (void*)list; 1170 iter.elem_handle = cx_arl_at(list, index); 1171 iter.elem_size = list->collection.elem_size; 1172 iter.elem_count = list->collection.size; 1173 iter.base.valid = cx_arl_iter_valid; 1174 iter.base.current = cx_arl_iter_current; 1175 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; 1176 iter.base.remove = false; 1177 iter.base.allow_remove = true; 1178 1179 return iter; 1180 } 1181 1182 static cx_list_class cx_array_list_class = { 1183 cx_arl_destructor, 1184 cx_arl_insert_element, 1185 cx_arl_insert_array, 1186 cx_arl_insert_sorted, 1187 cx_arl_insert_unique, 1188 cx_arl_insert_iter, 1189 cx_arl_remove, 1190 cx_arl_clear, 1191 cx_arl_swap, 1192 cx_arl_at, 1193 cx_arl_find_remove, 1194 cx_arl_sort, 1195 cx_arl_compare, 1196 cx_arl_reverse, 1197 cx_arl_change_capacity, 1198 cx_arl_iterator, 1199 }; 1200 1201 CxList *cxArrayListCreate( 1202 const CxAllocator *allocator, 1203 cx_compare_func comparator, 1204 size_t elem_size, 1205 size_t initial_capacity 1206 ) { 1207 if (allocator == NULL) { 1208 allocator = cxDefaultAllocator; 1209 } 1210 1211 cx_array_list *list = cxCalloc(allocator, 1, sizeof(cx_array_list)); 1212 if (list == NULL) return NULL; 1213 cx_list_init((CxList*)list, &cx_array_list_class, 1214 allocator, comparator, elem_size); 1215 list->capacity = initial_capacity; 1216 1217 // allocate the array after the real elem_size is known 1218 list->data = cxCalloc(allocator, initial_capacity, 1219 list->base.collection.elem_size); 1220 if (list->data == NULL) { // LCOV_EXCL_START 1221 cxFree(allocator, list); 1222 return NULL; 1223 } // LCOV_EXCL_STOP 1224 1225 // configure the reallocator 1226 list->reallocator = cx_array_reallocator(allocator, NULL); 1227 1228 return (CxList *) list; 1229 } 1230