ucx/array_list.c

changeset 22
112b85020dc9
parent 21
5ea41679e15d
child 23
b26390e77237
equal deleted inserted replaced
21:5ea41679e15d 22:112b85020dc9
48 } 48 }
49 return cxReallocDefault(array, n); 49 return cxReallocDefault(array, n);
50 } 50 }
51 51
52 CxArrayReallocator cx_array_default_reallocator_impl = { 52 CxArrayReallocator cx_array_default_reallocator_impl = {
53 cx_array_default_realloc, NULL, NULL, 0, 0 53 cx_array_default_realloc, NULL, NULL
54 }; 54 };
55 55
56 CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl; 56 CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl;
57 57
58 // Stack-aware array reallocator 58 // Stack-aware array reallocator
70 errno = EOVERFLOW; 70 errno = EOVERFLOW;
71 return NULL; 71 return NULL;
72 } 72 }
73 73
74 // retrieve the pointer to the actual allocator 74 // retrieve the pointer to the actual allocator
75 const CxAllocator *al = alloc->ptr1; 75 const CxAllocator *al = alloc->allocator;
76 76
77 // check if the array is still located on the stack 77 // check if the array is still located on the stack
78 void *newmem; 78 void *newmem;
79 if (array == alloc->ptr2) { 79 if (array == alloc->stack_ptr) {
80 newmem = cxMalloc(al, n); 80 newmem = cxMalloc(al, n);
81 if (newmem != NULL && array != NULL) { 81 if (newmem != NULL && array != NULL) {
82 memcpy(newmem, array, old_capacity*elem_size); 82 memcpy(newmem, array, old_capacity*elem_size);
83 } 83 }
84 } else { 84 } else {
87 return newmem; 87 return newmem;
88 } 88 }
89 89
90 struct cx_array_reallocator_s cx_array_reallocator( 90 struct cx_array_reallocator_s cx_array_reallocator(
91 const struct cx_allocator_s *allocator, 91 const struct cx_allocator_s *allocator,
92 const void *stackmem 92 const void *stack_ptr
93 ) { 93 ) {
94 if (allocator == NULL) { 94 if (allocator == NULL) {
95 allocator = cxDefaultAllocator; 95 allocator = cxDefaultAllocator;
96 } 96 }
97 return (struct cx_array_reallocator_s) { 97 return (struct cx_array_reallocator_s) {
98 cx_array_advanced_realloc, 98 cx_array_advanced_realloc,
99 (void*) allocator, (void*) stackmem, 99 allocator, stack_ptr,
100 0, 0
101 }; 100 };
102 } 101 }
103 102
104 // LOW LEVEL ARRAY LIST FUNCTIONS 103 // LOW LEVEL ARRAY LIST FUNCTIONS
105 104
106 static size_t cx_array_align_capacity( 105 /**
107 size_t cap, 106 * Intelligently calculates a new capacity, reserving some more
108 size_t alignment, 107 * elements than required to prevent too many allocations.
109 size_t max 108 *
110 ) { 109 * @param current_capacity the current capacity of the array
111 if (cap > max - alignment) { 110 * @param needed_capacity the required capacity of the array
112 return cap; 111 * @param maximum_capacity the maximum capacity (given by the data type)
112 * @return the new capacity
113 */
114 static size_t cx_array_grow_capacity(
115 size_t current_capacity,
116 size_t needed_capacity,
117 size_t maximum_capacity
118 ) {
119 if (current_capacity >= needed_capacity) {
120 return current_capacity;
121 }
122 size_t cap = needed_capacity;
123 size_t alignment;
124 if (cap < 128) alignment = 16;
125 else if (cap < 1024) alignment = 64;
126 else if (cap < 8192) alignment = 512;
127 else alignment = 1024;
128
129 if (cap - 1 > maximum_capacity - alignment) {
130 return maximum_capacity;
113 } else { 131 } else {
114 return cap - (cap % alignment) + alignment; 132 return cap - (cap % alignment) + alignment;
115 } 133 }
116 } 134 }
117 135
175 // determine new capacity 193 // determine new capacity
176 size_t newcap = oldsize + elem_count; 194 size_t newcap = oldsize + elem_count;
177 195
178 // reallocate if possible 196 // reallocate if possible
179 if (newcap > oldcap) { 197 if (newcap > oldcap) {
180 // calculate new capacity (next number divisible by 16)
181 newcap = cx_array_align_capacity(newcap, 16, max_size);
182
183 // perform reallocation
184 void *newmem = reallocator->realloc( 198 void *newmem = reallocator->realloc(
185 *array, oldcap, newcap, elem_size, reallocator 199 *array, oldcap, newcap, elem_size, reallocator
186 ); 200 );
187 if (newmem == NULL) { 201 if (newmem == NULL) {
188 return 1; // LCOV_EXCL_LINE 202 return 1; // LCOV_EXCL_LINE
268 errno = EOVERFLOW; 282 errno = EOVERFLOW;
269 return 1; 283 return 1;
270 } 284 }
271 285
272 // check if resize is required 286 // check if resize is required
273 size_t minsize = index + elem_count; 287 const size_t minsize = index + elem_count;
274 size_t newsize = oldsize < minsize ? minsize : oldsize; 288 const size_t newsize = oldsize < minsize ? minsize : oldsize;
275 289
276 // reallocate if possible 290 // reallocate if necessary
277 size_t newcap = oldcap; 291 const size_t newcap = cx_array_grow_capacity(oldcap, newsize, max_size);
278 if (newsize > oldcap) { 292 if (newcap > oldcap) {
279 // check, if we need to repair the src pointer 293 // check if we need to repair the src pointer
280 uintptr_t targetaddr = (uintptr_t) *target; 294 uintptr_t targetaddr = (uintptr_t) *target;
281 uintptr_t srcaddr = (uintptr_t) src; 295 uintptr_t srcaddr = (uintptr_t) src;
282 bool repairsrc = targetaddr <= srcaddr 296 bool repairsrc = targetaddr <= srcaddr
283 && srcaddr < targetaddr + oldcap * elem_size; 297 && srcaddr < targetaddr + oldcap * elem_size;
284
285 // calculate new capacity (next number divisible by 16)
286 newcap = cx_array_align_capacity(newsize, 16, max_size);
287 assert(newcap > newsize);
288 298
289 // perform reallocation 299 // perform reallocation
290 void *newmem = reallocator->realloc( 300 void *newmem = reallocator->realloc(
291 *target, oldcap, newcap, elem_size, reallocator 301 *target, oldcap, newcap, elem_size, reallocator
292 ); 302 );
293 if (newmem == NULL) { 303 if (newmem == NULL) {
294 return 1; 304 return 1; // LCOV_EXCL_LINE
295 } 305 }
296 306
297 // repair src pointer, if necessary 307 // repair src pointer, if necessary
298 if (repairsrc) { 308 if (repairsrc) {
299 src = ((char *) newmem) + (srcaddr - targetaddr); 309 src = ((char *) newmem) + (srcaddr - targetaddr);
333 343
334 // return successfully 344 // return successfully
335 return 0; 345 return 0;
336 } 346 }
337 347
338 int cx_array_insert_sorted( 348 static int cx_array_insert_sorted_impl(
339 void **target, 349 void **target,
340 size_t *size, 350 size_t *size,
341 size_t *capacity, 351 size_t *capacity,
342 cx_compare_func cmp_func, 352 cx_compare_func cmp_func,
343 const void *sorted_data, 353 const void *sorted_data,
344 size_t elem_size, 354 size_t elem_size,
345 size_t elem_count, 355 size_t elem_count,
346 CxArrayReallocator *reallocator 356 CxArrayReallocator *reallocator,
357 bool allow_duplicates
347 ) { 358 ) {
348 // assert pointers 359 // assert pointers
349 assert(target != NULL); 360 assert(target != NULL);
350 assert(size != NULL); 361 assert(size != NULL);
351 assert(capacity != NULL); 362 assert(capacity != NULL);
365 errno = EOVERFLOW; 376 errno = EOVERFLOW;
366 return 1; 377 return 1;
367 } 378 }
368 379
369 // store some counts 380 // store some counts
370 size_t old_size = *size; 381 const size_t old_size = *size;
371 size_t old_capacity = *capacity; 382 const size_t old_capacity = *capacity;
372 size_t needed_capacity = old_size + elem_count; 383 // the necessary capacity is the worst case assumption, including duplicates
384 const size_t needed_capacity = cx_array_grow_capacity(old_capacity,
385 old_size + elem_count, SIZE_MAX);
373 386
374 // if we need more than we have, try a reallocation 387 // if we need more than we have, try a reallocation
375 if (needed_capacity > old_capacity) { 388 if (needed_capacity > old_capacity) {
376 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX);
377 void *new_mem = reallocator->realloc( 389 void *new_mem = reallocator->realloc(
378 *target, old_capacity, new_capacity, elem_size, reallocator 390 *target, old_capacity, needed_capacity, elem_size, reallocator
379 ); 391 );
380 if (new_mem == NULL) { 392 if (new_mem == NULL) {
381 // give it up right away, there is no contract 393 // give it up right away, there is no contract
382 // that requires us to insert as much as we can 394 // that requires us to insert as much as we can
383 return 1; // LCOV_EXCL_LINE 395 return 1; // LCOV_EXCL_LINE
384 } 396 }
385 *target = new_mem; 397 *target = new_mem;
386 *capacity = new_capacity; 398 *capacity = needed_capacity;
387 } 399 }
388 400
389 // now we have guaranteed that we can insert everything 401 // now we have guaranteed that we can insert everything
390 size_t new_size = old_size + elem_count; 402 size_t new_size = old_size + elem_count;
391 *size = new_size; 403 *size = new_size;
416 elem_count - si, 428 elem_count - si,
417 elem_size, 429 elem_size,
418 bptr, 430 bptr,
419 cmp_func 431 cmp_func
420 ); 432 );
433 // binary search gives us the smallest index;
434 // we also want to include equal elements here
435 while (si + copy_len < elem_count
436 && cmp_func(bptr, src+copy_len*elem_size) == 0) {
437 copy_len++;
438 }
421 439
422 // copy the source elements 440 // copy the source elements
423 bytes_copied = copy_len * elem_size; 441 if (copy_len > 0) {
424 memcpy(dest, src, bytes_copied); 442 if (allow_duplicates) {
425 dest += bytes_copied; 443 // we can copy the entire chunk
426 src += bytes_copied; 444 bytes_copied = copy_len * elem_size;
427 si += copy_len; 445 memcpy(dest, src, bytes_copied);
446 dest += bytes_copied;
447 src += bytes_copied;
448 si += copy_len;
449 di += copy_len;
450 } else {
451 // first, check the end of the source chunk
452 // for being a duplicate of the bptr
453 const char *end_of_src = src + (copy_len - 1) * elem_size;
454 size_t skip_len = 0;
455 while (copy_len > 0 && cmp_func(bptr, end_of_src) == 0) {
456 end_of_src -= elem_size;
457 skip_len++;
458 copy_len--;
459 }
460 char *last = dest == *target ? NULL : dest - elem_size;
461 // then iterate through the source chunk
462 // and skip all duplicates with the last element in the array
463 size_t more_skipped = 0;
464 for (unsigned j = 0; j < copy_len; j++) {
465 if (last != NULL && cmp_func(last, src) == 0) {
466 // duplicate - skip
467 src += elem_size;
468 si++;
469 more_skipped++;
470 } else {
471 memcpy(dest, src, elem_size);
472 src += elem_size;
473 last = dest;
474 dest += elem_size;
475 si++;
476 di++;
477 }
478 }
479 // skip the previously identified elements as well
480 src += skip_len * elem_size;
481 si += skip_len;
482 skip_len += more_skipped;
483 // reduce the actual size by the number of skipped elements
484 *size -= skip_len;
485 }
486 }
428 487
429 // when all source elements are in place, we are done 488 // when all source elements are in place, we are done
430 if (si >= elem_count) break; 489 if (si >= elem_count) break;
431 490
432 // determine how many buffered elements need to be restored 491 // determine how many buffered elements need to be restored
441 // restore the buffered elements 500 // restore the buffered elements
442 bytes_copied = copy_len * elem_size; 501 bytes_copied = copy_len * elem_size;
443 memmove(dest, bptr, bytes_copied); 502 memmove(dest, bptr, bytes_copied);
444 dest += bytes_copied; 503 dest += bytes_copied;
445 bptr += bytes_copied; 504 bptr += bytes_copied;
505 di += copy_len;
446 bi += copy_len; 506 bi += copy_len;
447 } 507 }
448 508
449 // still source elements left? simply append them 509 // still source elements left?
450 if (si < elem_count) { 510 if (si < elem_count) {
451 memcpy(dest, src, elem_size * (elem_count - si)); 511 if (allow_duplicates) {
452 } 512 // duplicates allowed or nothing inserted yet: simply copy everything
453 513 memcpy(dest, src, elem_size * (elem_count - si));
454 // still buffer elements left? 514 } else {
455 // don't worry, we already moved them to the correct place 515 if (dest != *target) {
516 // skip all source elements that equal the last element
517 char *last = dest - elem_size;
518 while (si < elem_count) {
519 if (last != NULL && cmp_func(last, src) == 0) {
520 src += elem_size;
521 si++;
522 (*size)--;
523 } else {
524 break;
525 }
526 }
527 }
528 // we must check the elements in the chunk one by one
529 while (si < elem_count) {
530 // find a chain of elements that can be copied
531 size_t copy_len = 1, skip_len = 0;
532 {
533 const char *left_src = src;
534 while (si + copy_len < elem_count) {
535 const char *right_src = left_src + elem_size;
536 int d = cmp_func(left_src, right_src);
537 if (d < 0) {
538 if (skip_len > 0) {
539 // new larger element found;
540 // handle it in the next cycle
541 break;
542 }
543 left_src += elem_size;
544 copy_len++;
545 } else if (d == 0) {
546 left_src += elem_size;
547 skip_len++;
548 } else {
549 break;
550 }
551 }
552 }
553 size_t bytes_copied = copy_len * elem_size;
554 memcpy(dest, src, bytes_copied);
555 dest += bytes_copied;
556 src += bytes_copied + skip_len * elem_size;
557 si += copy_len + skip_len;
558 di += copy_len;
559 *size -= skip_len;
560 }
561 }
562 }
563
564 // buffered elements need to be moved when we skipped duplicates
565 size_t total_skipped = new_size - *size;
566 if (bi < new_size && total_skipped > 0) {
567 // move the remaining buffer to the end of the array
568 memmove(dest, bptr, elem_size * (new_size - bi));
569 }
456 570
457 return 0; 571 return 0;
572 }
573
574 int cx_array_insert_sorted(
575 void **target,
576 size_t *size,
577 size_t *capacity,
578 cx_compare_func cmp_func,
579 const void *sorted_data,
580 size_t elem_size,
581 size_t elem_count,
582 CxArrayReallocator *reallocator
583 ) {
584 return cx_array_insert_sorted_impl(target, size, capacity,
585 cmp_func, sorted_data, elem_size, elem_count, reallocator, true);
586 }
587
588 int cx_array_insert_unique(
589 void **target,
590 size_t *size,
591 size_t *capacity,
592 cx_compare_func cmp_func,
593 const void *sorted_data,
594 size_t elem_size,
595 size_t elem_count,
596 CxArrayReallocator *reallocator
597 ) {
598 return cx_array_insert_sorted_impl(target, size, capacity,
599 cmp_func, sorted_data, elem_size, elem_count, reallocator, false);
458 } 600 }
459 601
460 size_t cx_array_binary_search_inf( 602 size_t cx_array_binary_search_inf(
461 const void *arr, 603 const void *arr,
462 size_t size, 604 size_t size,
500 pivot_index = left_index + (right_index - left_index) / 2; 642 pivot_index = left_index + (right_index - left_index) / 2;
501 const char *arr_elem = array + pivot_index * elem_size; 643 const char *arr_elem = array + pivot_index * elem_size;
502 result = cmp_func(elem, arr_elem); 644 result = cmp_func(elem, arr_elem);
503 if (result == 0) { 645 if (result == 0) {
504 // found it! 646 // found it!
647 // check previous elements;
648 // when they are equal, report the smallest index
649 arr_elem -= elem_size;
650 while (pivot_index > 0 && cmp_func(elem, arr_elem) == 0) {
651 pivot_index--;
652 arr_elem -= elem_size;
653 }
505 return pivot_index; 654 return pivot_index;
506 } else if (result < 0) { 655 } else if (result < 0) {
507 // element is smaller than pivot, continue search left 656 // element is smaller than pivot, continue search left
508 right_index = pivot_index - 1; 657 right_index = pivot_index - 1;
509 } else { 658 } else {
641 // get a correctly typed pointer to the list 790 // get a correctly typed pointer to the list
642 cx_array_list *arl = (cx_array_list *) list; 791 cx_array_list *arl = (cx_array_list *) list;
643 792
644 // guarantee enough capacity 793 // guarantee enough capacity
645 if (arl->capacity < list->collection.size + n) { 794 if (arl->capacity < list->collection.size + n) {
646 size_t new_capacity = list->collection.size + n; 795 const size_t new_capacity = cx_array_grow_capacity(arl->capacity,
647 new_capacity = new_capacity - (new_capacity % 16) + 16; 796 list->collection.size + n, SIZE_MAX);
648 if (cxReallocateArray( 797 if (cxReallocateArray(
649 list->collection.allocator, 798 list->collection.allocator,
650 &arl->data, new_capacity, 799 &arl->data, new_capacity,
651 list->collection.elem_size) 800 list->collection.elem_size)
652 ) { 801 ) {
698 } else { 847 } else {
699 return n; 848 return n;
700 } 849 }
701 } 850 }
702 851
852 static size_t cx_arl_insert_unique(
853 struct cx_list_s *list,
854 const void *sorted_data,
855 size_t n
856 ) {
857 // get a correctly typed pointer to the list
858 cx_array_list *arl = (cx_array_list *) list;
859
860 if (cx_array_insert_unique(
861 &arl->data,
862 &list->collection.size,
863 &arl->capacity,
864 list->collection.cmpfunc,
865 sorted_data,
866 list->collection.elem_size,
867 n,
868 &arl->reallocator
869 )) {
870 // array list implementation is "all or nothing"
871 return 0;
872 } else {
873 return n;
874 }
875 }
876
703 static void *cx_arl_insert_element( 877 static void *cx_arl_insert_element(
704 struct cx_list_s *list, 878 struct cx_list_s *list,
705 size_t index, 879 size_t index,
706 const void *element 880 const void *element
707 ) { 881 ) {
715 static int cx_arl_insert_iter( 889 static int cx_arl_insert_iter(
716 struct cx_iterator_s *iter, 890 struct cx_iterator_s *iter,
717 const void *elem, 891 const void *elem,
718 int prepend 892 int prepend
719 ) { 893 ) {
720 struct cx_list_s *list = iter->src_handle.m; 894 struct cx_list_s *list = iter->src_handle;
721 if (iter->index < list->collection.size) { 895 if (iter->index < list->collection.size) {
722 if (cx_arl_insert_element(list, 896 if (cx_arl_insert_element(list,
723 iter->index + 1 - prepend, elem) == NULL) { 897 iter->index + 1 - prepend, elem) == NULL) {
724 return 1; 898 return 1;
725 } 899 }
926 } 1100 }
927 } 1101 }
928 1102
929 static bool cx_arl_iter_valid(const void *it) { 1103 static bool cx_arl_iter_valid(const void *it) {
930 const struct cx_iterator_s *iter = it; 1104 const struct cx_iterator_s *iter = it;
931 const struct cx_list_s *list = iter->src_handle.c; 1105 const struct cx_list_s *list = iter->src_handle;
932 return iter->index < list->collection.size; 1106 return iter->index < list->collection.size;
933 } 1107 }
934 1108
935 static void *cx_arl_iter_current(const void *it) { 1109 static void *cx_arl_iter_current(const void *it) {
936 const struct cx_iterator_s *iter = it; 1110 const struct cx_iterator_s *iter = it;
939 1113
940 static void cx_arl_iter_next(void *it) { 1114 static void cx_arl_iter_next(void *it) {
941 struct cx_iterator_s *iter = it; 1115 struct cx_iterator_s *iter = it;
942 if (iter->base.remove) { 1116 if (iter->base.remove) {
943 iter->base.remove = false; 1117 iter->base.remove = false;
944 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); 1118 cx_arl_remove(iter->src_handle, iter->index, 1, NULL);
1119 iter->elem_count--;
945 } else { 1120 } else {
946 iter->index++; 1121 iter->index++;
947 iter->elem_handle = 1122 iter->elem_handle =
948 ((char *) iter->elem_handle) 1123 ((char *) iter->elem_handle)
949 + ((const struct cx_list_s *) iter->src_handle.c)->collection.elem_size; 1124 + ((const struct cx_list_s *) iter->src_handle)->collection.elem_size;
950 } 1125 }
951 } 1126 }
952 1127
953 static void cx_arl_iter_prev(void *it) { 1128 static void cx_arl_iter_prev(void *it) {
954 struct cx_iterator_s *iter = it; 1129 struct cx_iterator_s *iter = it;
955 const cx_array_list *list = iter->src_handle.c;
956 if (iter->base.remove) { 1130 if (iter->base.remove) {
957 iter->base.remove = false; 1131 iter->base.remove = false;
958 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); 1132 cx_arl_remove(iter->src_handle, iter->index, 1, NULL);
1133 iter->elem_count--;
959 } 1134 }
960 iter->index--; 1135 iter->index--;
1136 cx_array_list *list = iter->src_handle;
961 if (iter->index < list->base.collection.size) { 1137 if (iter->index < list->base.collection.size) {
962 iter->elem_handle = ((char *) list->data) 1138 iter->elem_handle = ((char *) list->data)
963 + iter->index * list->base.collection.elem_size; 1139 + iter->index * list->base.collection.elem_size;
964 } 1140 }
965 } 1141 }
966 1142
1143 static int cx_arl_change_capacity(
1144 struct cx_list_s *list,
1145 size_t new_capacity
1146 ) {
1147 cx_array_list *arl = (cx_array_list *)list;
1148 return cxReallocateArray(list->collection.allocator,
1149 &arl->data, new_capacity, list->collection.elem_size);
1150 }
967 1151
968 static struct cx_iterator_s cx_arl_iterator( 1152 static struct cx_iterator_s cx_arl_iterator(
969 const struct cx_list_s *list, 1153 const struct cx_list_s *list,
970 size_t index, 1154 size_t index,
971 bool backwards 1155 bool backwards
972 ) { 1156 ) {
973 struct cx_iterator_s iter; 1157 struct cx_iterator_s iter;
974 1158
975 iter.index = index; 1159 iter.index = index;
976 iter.src_handle.c = list; 1160 iter.src_handle = (void*)list;
977 iter.elem_handle = cx_arl_at(list, index); 1161 iter.elem_handle = cx_arl_at(list, index);
978 iter.elem_size = list->collection.elem_size; 1162 iter.elem_size = list->collection.elem_size;
979 iter.elem_count = list->collection.size; 1163 iter.elem_count = list->collection.size;
980 iter.base.valid = cx_arl_iter_valid; 1164 iter.base.valid = cx_arl_iter_valid;
981 iter.base.current = cx_arl_iter_current; 1165 iter.base.current = cx_arl_iter_current;
982 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; 1166 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next;
983 iter.base.remove = false; 1167 iter.base.remove = false;
984 iter.base.mutating = false; 1168 iter.base.allow_remove = true;
985 1169
986 return iter; 1170 return iter;
987 } 1171 }
988 1172
989 static cx_list_class cx_array_list_class = { 1173 static cx_list_class cx_array_list_class = {
990 cx_arl_destructor, 1174 cx_arl_destructor,
991 cx_arl_insert_element, 1175 cx_arl_insert_element,
992 cx_arl_insert_array, 1176 cx_arl_insert_array,
993 cx_arl_insert_sorted, 1177 cx_arl_insert_sorted,
1178 cx_arl_insert_unique,
994 cx_arl_insert_iter, 1179 cx_arl_insert_iter,
995 cx_arl_remove, 1180 cx_arl_remove,
996 cx_arl_clear, 1181 cx_arl_clear,
997 cx_arl_swap, 1182 cx_arl_swap,
998 cx_arl_at, 1183 cx_arl_at,
999 cx_arl_find_remove, 1184 cx_arl_find_remove,
1000 cx_arl_sort, 1185 cx_arl_sort,
1001 cx_arl_compare, 1186 cx_arl_compare,
1002 cx_arl_reverse, 1187 cx_arl_reverse,
1188 cx_arl_change_capacity,
1003 cx_arl_iterator, 1189 cx_arl_iterator,
1004 }; 1190 };
1005 1191
1006 CxList *cxArrayListCreate( 1192 CxList *cxArrayListCreate(
1007 const CxAllocator *allocator, 1193 const CxAllocator *allocator,

mercurial