|
1 /* |
|
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. |
|
3 * |
|
4 * Copyright 2021 Mike Becker, Olaf Wintermann All rights reserved. |
|
5 * |
|
6 * Redistribution and use in source and binary forms, with or without |
|
7 * modification, are permitted provided that the following conditions are met: |
|
8 * |
|
9 * 1. Redistributions of source code must retain the above copyright |
|
10 * notice, this list of conditions and the following disclaimer. |
|
11 * |
|
12 * 2. Redistributions in binary form must reproduce the above copyright |
|
13 * notice, this list of conditions and the following disclaimer in the |
|
14 * documentation and/or other materials provided with the distribution. |
|
15 * |
|
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
|
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
|
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
26 * POSSIBILITY OF SUCH DAMAGE. |
|
27 */ |
|
28 |
|
29 #include "cx/array_list.h" |
|
30 #include "cx/compare.h" |
|
31 #include <assert.h> |
|
32 #include <string.h> |
|
33 |
|
34 // Default array reallocator |
|
35 |
|
36 static void *cx_array_default_realloc( |
|
37 void *array, |
|
38 size_t capacity, |
|
39 size_t elem_size, |
|
40 __attribute__((__unused__)) struct cx_array_reallocator_s *alloc |
|
41 ) { |
|
42 return realloc(array, capacity * elem_size); |
|
43 } |
|
44 |
|
45 struct cx_array_reallocator_s cx_array_default_reallocator_impl = { |
|
46 cx_array_default_realloc, NULL, NULL, 0, 0 |
|
47 }; |
|
48 |
|
49 struct cx_array_reallocator_s *cx_array_default_reallocator = &cx_array_default_reallocator_impl; |
|
50 |
|
51 // LOW LEVEL ARRAY LIST FUNCTIONS |
|
52 |
|
53 enum cx_array_result cx_array_copy( |
|
54 void **target, |
|
55 size_t *size, |
|
56 size_t *capacity, |
|
57 size_t index, |
|
58 const void *src, |
|
59 size_t elem_size, |
|
60 size_t elem_count, |
|
61 struct cx_array_reallocator_s *reallocator |
|
62 ) { |
|
63 // assert pointers |
|
64 assert(target != NULL); |
|
65 assert(size != NULL); |
|
66 assert(src != NULL); |
|
67 |
|
68 // determine capacity |
|
69 size_t cap = capacity == NULL ? *size : *capacity; |
|
70 |
|
71 // check if resize is required |
|
72 size_t minsize = index + elem_count; |
|
73 size_t newsize = *size < minsize ? minsize : *size; |
|
74 bool needrealloc = newsize > cap; |
|
75 |
|
76 // reallocate if possible |
|
77 if (needrealloc) { |
|
78 // a reallocator and a capacity variable must be available |
|
79 if (reallocator == NULL || capacity == NULL) { |
|
80 return CX_ARRAY_REALLOC_NOT_SUPPORTED; |
|
81 } |
|
82 |
|
83 // check, if we need to repair the src pointer |
|
84 uintptr_t targetaddr = (uintptr_t) *target; |
|
85 uintptr_t srcaddr = (uintptr_t) src; |
|
86 bool repairsrc = targetaddr <= srcaddr |
|
87 && srcaddr < targetaddr + cap * elem_size; |
|
88 |
|
89 // calculate new capacity (next number divisible by 16) |
|
90 cap = newsize - (newsize % 16) + 16; |
|
91 assert(cap > newsize); |
|
92 |
|
93 // perform reallocation |
|
94 void *newmem = reallocator->realloc( |
|
95 *target, cap, elem_size, reallocator |
|
96 ); |
|
97 if (newmem == NULL) { |
|
98 return CX_ARRAY_REALLOC_FAILED; |
|
99 } |
|
100 |
|
101 // repair src pointer, if necessary |
|
102 if (repairsrc) { |
|
103 src = ((char *) newmem) + (srcaddr - targetaddr); |
|
104 } |
|
105 |
|
106 // store new pointer and capacity |
|
107 *target = newmem; |
|
108 *capacity = cap; |
|
109 } |
|
110 |
|
111 // determine target pointer |
|
112 char *start = *target; |
|
113 start += index * elem_size; |
|
114 |
|
115 // copy elements and set new size |
|
116 memmove(start, src, elem_count * elem_size); |
|
117 *size = newsize; |
|
118 |
|
119 // return successfully |
|
120 return CX_ARRAY_SUCCESS; |
|
121 } |
|
122 |
|
123 enum cx_array_result cx_array_insert_sorted( |
|
124 void **target, |
|
125 size_t *size, |
|
126 size_t *capacity, |
|
127 cx_compare_func cmp_func, |
|
128 const void *sorted_data, |
|
129 size_t elem_size, |
|
130 size_t elem_count, |
|
131 struct cx_array_reallocator_s *reallocator |
|
132 ) { |
|
133 // assert pointers |
|
134 assert(target != NULL); |
|
135 assert(size != NULL); |
|
136 assert(capacity != NULL); |
|
137 assert(cmp_func != NULL); |
|
138 assert(sorted_data != NULL); |
|
139 assert(reallocator != NULL); |
|
140 |
|
141 // corner case |
|
142 if (elem_count == 0) return 0; |
|
143 |
|
144 // store some counts |
|
145 size_t old_size = *size; |
|
146 size_t needed_capacity = old_size + elem_count; |
|
147 |
|
148 // if we need more than we have, try a reallocation |
|
149 if (needed_capacity > *capacity) { |
|
150 size_t new_capacity = needed_capacity - (needed_capacity % 16) + 16; |
|
151 void *new_mem = reallocator->realloc( |
|
152 *target, new_capacity, elem_size, reallocator |
|
153 ); |
|
154 if (new_mem == NULL) { |
|
155 // give it up right away, there is no contract |
|
156 // that requires us to insert as much as we can |
|
157 return CX_ARRAY_REALLOC_FAILED; |
|
158 } |
|
159 *target = new_mem; |
|
160 *capacity = new_capacity; |
|
161 } |
|
162 |
|
163 // now we have guaranteed that we can insert everything |
|
164 size_t new_size = old_size + elem_count; |
|
165 *size = new_size; |
|
166 |
|
167 // declare the source and destination indices/pointers |
|
168 size_t si = 0, di = 0; |
|
169 const char *src = sorted_data; |
|
170 char *dest = *target; |
|
171 |
|
172 // find the first insertion point |
|
173 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); |
|
174 dest += di * elem_size; |
|
175 |
|
176 // move the remaining elements in the array completely to the right |
|
177 // we will call it the "buffer" for parked elements |
|
178 size_t buf_size = old_size - di; |
|
179 size_t bi = new_size - buf_size; |
|
180 char *bptr = ((char *) *target) + bi * elem_size; |
|
181 memmove(bptr, dest, buf_size * elem_size); |
|
182 |
|
183 // while there are both source and buffered elements left, |
|
184 // copy them interleaving |
|
185 while (si < elem_count && bi < new_size) { |
|
186 // determine how many source elements can be inserted |
|
187 size_t copy_len, bytes_copied; |
|
188 copy_len = cx_array_binary_search_sup( |
|
189 src, |
|
190 elem_count - si, |
|
191 elem_size, |
|
192 bptr, |
|
193 cmp_func |
|
194 ); |
|
195 |
|
196 // copy the source elements |
|
197 bytes_copied = copy_len * elem_size; |
|
198 memcpy(dest, src, bytes_copied); |
|
199 dest += bytes_copied; |
|
200 src += bytes_copied; |
|
201 si += copy_len; |
|
202 |
|
203 // when all source elements are in place, we are done |
|
204 if (si >= elem_count) break; |
|
205 |
|
206 // determine how many buffered elements need to be restored |
|
207 copy_len = cx_array_binary_search_sup( |
|
208 bptr, |
|
209 new_size - bi, |
|
210 elem_size, |
|
211 src, |
|
212 cmp_func |
|
213 ); |
|
214 |
|
215 // restore the buffered elements |
|
216 bytes_copied = copy_len * elem_size; |
|
217 memmove(dest, bptr, bytes_copied); |
|
218 dest += bytes_copied; |
|
219 bptr += bytes_copied; |
|
220 bi += copy_len; |
|
221 } |
|
222 |
|
223 // still source elements left? simply append them |
|
224 if (si < elem_count) { |
|
225 memcpy(dest, src, elem_size * (elem_count - si)); |
|
226 } |
|
227 |
|
228 // still buffer elements left? |
|
229 // don't worry, we already moved them to the correct place |
|
230 |
|
231 return CX_ARRAY_SUCCESS; |
|
232 } |
|
233 |
|
234 size_t cx_array_binary_search_inf( |
|
235 const void *arr, |
|
236 size_t size, |
|
237 size_t elem_size, |
|
238 const void *elem, |
|
239 cx_compare_func cmp_func |
|
240 ) { |
|
241 // special case: empty array |
|
242 if (size == 0) return 0; |
|
243 |
|
244 // declare a variable that will contain the compare results |
|
245 int result; |
|
246 |
|
247 // cast the array pointer to something we can use offsets with |
|
248 const char *array = arr; |
|
249 |
|
250 // check the first array element |
|
251 result = cmp_func(elem, array); |
|
252 if (result < 0) { |
|
253 return size; |
|
254 } else if (result == 0) { |
|
255 return 0; |
|
256 } |
|
257 |
|
258 // check the last array element |
|
259 result = cmp_func(elem, array + elem_size * (size - 1)); |
|
260 if (result >= 0) { |
|
261 return size - 1; |
|
262 } |
|
263 |
|
264 // the element is now guaranteed to be somewhere in the list |
|
265 // so start the binary search |
|
266 size_t left_index = 1; |
|
267 size_t right_index = size - 1; |
|
268 size_t pivot_index; |
|
269 |
|
270 while (left_index <= right_index) { |
|
271 pivot_index = left_index + (right_index - left_index) / 2; |
|
272 const char *arr_elem = array + pivot_index * elem_size; |
|
273 result = cmp_func(elem, arr_elem); |
|
274 if (result == 0) { |
|
275 // found it! |
|
276 return pivot_index; |
|
277 } else if (result < 0) { |
|
278 // element is smaller than pivot, continue search left |
|
279 right_index = pivot_index - 1; |
|
280 } else { |
|
281 // element is larger than pivot, continue search right |
|
282 left_index = pivot_index + 1; |
|
283 } |
|
284 } |
|
285 |
|
286 // report the largest upper bound |
|
287 return result < 0 ? (pivot_index - 1) : pivot_index; |
|
288 } |
|
289 |
|
290 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
|
291 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
|
292 #endif |
|
293 unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE; |
|
294 |
|
295 void cx_array_swap( |
|
296 void *arr, |
|
297 size_t elem_size, |
|
298 size_t idx1, |
|
299 size_t idx2 |
|
300 ) { |
|
301 assert(arr != NULL); |
|
302 |
|
303 // short circuit |
|
304 if (idx1 == idx2) return; |
|
305 |
|
306 char sbo_mem[CX_ARRAY_SWAP_SBO_SIZE]; |
|
307 void *tmp; |
|
308 |
|
309 // decide if we can use the local buffer |
|
310 if (elem_size > CX_ARRAY_SWAP_SBO_SIZE) { |
|
311 tmp = malloc(elem_size); |
|
312 // we don't want to enforce error handling |
|
313 if (tmp == NULL) abort(); |
|
314 } else { |
|
315 tmp = sbo_mem; |
|
316 } |
|
317 |
|
318 // calculate memory locations |
|
319 char *left = arr, *right = arr; |
|
320 left += idx1 * elem_size; |
|
321 right += idx2 * elem_size; |
|
322 |
|
323 // three-way swap |
|
324 memcpy(tmp, left, elem_size); |
|
325 memcpy(left, right, elem_size); |
|
326 memcpy(right, tmp, elem_size); |
|
327 |
|
328 // free dynamic memory, if it was needed |
|
329 if (tmp != sbo_mem) { |
|
330 free(tmp); |
|
331 } |
|
332 } |
|
333 |
|
334 // HIGH LEVEL ARRAY LIST FUNCTIONS |
|
335 |
|
336 typedef struct { |
|
337 struct cx_list_s base; |
|
338 void *data; |
|
339 size_t capacity; |
|
340 struct cx_array_reallocator_s reallocator; |
|
341 } cx_array_list; |
|
342 |
|
343 static void *cx_arl_realloc( |
|
344 void *array, |
|
345 size_t capacity, |
|
346 size_t elem_size, |
|
347 struct cx_array_reallocator_s *alloc |
|
348 ) { |
|
349 // retrieve the pointer to the list allocator |
|
350 const CxAllocator *al = alloc->ptr1; |
|
351 |
|
352 // use the list allocator to reallocate the memory |
|
353 return cxRealloc(al, array, capacity * elem_size); |
|
354 } |
|
355 |
|
356 static void cx_arl_destructor(struct cx_list_s *list) { |
|
357 cx_array_list *arl = (cx_array_list *) list; |
|
358 |
|
359 char *ptr = arl->data; |
|
360 |
|
361 if (list->collection.simple_destructor) { |
|
362 for (size_t i = 0; i < list->collection.size; i++) { |
|
363 cx_invoke_simple_destructor(list, ptr); |
|
364 ptr += list->collection.elem_size; |
|
365 } |
|
366 } |
|
367 if (list->collection.advanced_destructor) { |
|
368 for (size_t i = 0; i < list->collection.size; i++) { |
|
369 cx_invoke_advanced_destructor(list, ptr); |
|
370 ptr += list->collection.elem_size; |
|
371 } |
|
372 } |
|
373 |
|
374 cxFree(list->collection.allocator, arl->data); |
|
375 cxFree(list->collection.allocator, list); |
|
376 } |
|
377 |
|
378 static size_t cx_arl_insert_array( |
|
379 struct cx_list_s *list, |
|
380 size_t index, |
|
381 const void *array, |
|
382 size_t n |
|
383 ) { |
|
384 // out of bounds and special case check |
|
385 if (index > list->collection.size || n == 0) return 0; |
|
386 |
|
387 // get a correctly typed pointer to the list |
|
388 cx_array_list *arl = (cx_array_list *) list; |
|
389 |
|
390 // do we need to move some elements? |
|
391 if (index < list->collection.size) { |
|
392 const char *first_to_move = (const char *) arl->data; |
|
393 first_to_move += index * list->collection.elem_size; |
|
394 size_t elems_to_move = list->collection.size - index; |
|
395 size_t start_of_moved = index + n; |
|
396 |
|
397 if (CX_ARRAY_SUCCESS != cx_array_copy( |
|
398 &arl->data, |
|
399 &list->collection.size, |
|
400 &arl->capacity, |
|
401 start_of_moved, |
|
402 first_to_move, |
|
403 list->collection.elem_size, |
|
404 elems_to_move, |
|
405 &arl->reallocator |
|
406 )) { |
|
407 // if moving existing elems is unsuccessful, abort |
|
408 return 0; |
|
409 } |
|
410 } |
|
411 |
|
412 // note that if we had to move the elements, the following operation |
|
413 // is guaranteed to succeed, because we have the memory already allocated |
|
414 // therefore, it is impossible to leave this function with an invalid array |
|
415 |
|
416 // place the new elements |
|
417 if (CX_ARRAY_SUCCESS == cx_array_copy( |
|
418 &arl->data, |
|
419 &list->collection.size, |
|
420 &arl->capacity, |
|
421 index, |
|
422 array, |
|
423 list->collection.elem_size, |
|
424 n, |
|
425 &arl->reallocator |
|
426 )) { |
|
427 return n; |
|
428 } else { |
|
429 // array list implementation is "all or nothing" |
|
430 return 0; |
|
431 } |
|
432 } |
|
433 |
|
434 static size_t cx_arl_insert_sorted( |
|
435 struct cx_list_s *list, |
|
436 const void *sorted_data, |
|
437 size_t n |
|
438 ) { |
|
439 // get a correctly typed pointer to the list |
|
440 cx_array_list *arl = (cx_array_list *) list; |
|
441 |
|
442 if (CX_ARRAY_SUCCESS == cx_array_insert_sorted( |
|
443 &arl->data, |
|
444 &list->collection.size, |
|
445 &arl->capacity, |
|
446 list->collection.cmpfunc, |
|
447 sorted_data, |
|
448 list->collection.elem_size, |
|
449 n, |
|
450 &arl->reallocator |
|
451 )) { |
|
452 return n; |
|
453 } else { |
|
454 // array list implementation is "all or nothing" |
|
455 return 0; |
|
456 } |
|
457 } |
|
458 |
|
459 static int cx_arl_insert_element( |
|
460 struct cx_list_s *list, |
|
461 size_t index, |
|
462 const void *element |
|
463 ) { |
|
464 return 1 != cx_arl_insert_array(list, index, element, 1); |
|
465 } |
|
466 |
|
467 static int cx_arl_insert_iter( |
|
468 struct cx_iterator_s *iter, |
|
469 const void *elem, |
|
470 int prepend |
|
471 ) { |
|
472 struct cx_list_s *list = iter->src_handle.m; |
|
473 if (iter->index < list->collection.size) { |
|
474 int result = cx_arl_insert_element( |
|
475 list, |
|
476 iter->index + 1 - prepend, |
|
477 elem |
|
478 ); |
|
479 if (result == 0) { |
|
480 iter->elem_count++; |
|
481 if (prepend != 0) { |
|
482 iter->index++; |
|
483 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
|
484 } |
|
485 } |
|
486 return result; |
|
487 } else { |
|
488 int result = cx_arl_insert_element(list, list->collection.size, elem); |
|
489 if (result == 0) { |
|
490 iter->elem_count++; |
|
491 iter->index = list->collection.size; |
|
492 } |
|
493 return result; |
|
494 } |
|
495 } |
|
496 |
|
497 static int cx_arl_remove( |
|
498 struct cx_list_s *list, |
|
499 size_t index |
|
500 ) { |
|
501 cx_array_list *arl = (cx_array_list *) list; |
|
502 |
|
503 // out-of-bounds check |
|
504 if (index >= list->collection.size) { |
|
505 return 1; |
|
506 } |
|
507 |
|
508 // content destruction |
|
509 cx_invoke_destructor(list, ((char *) arl->data) + index * list->collection.elem_size); |
|
510 |
|
511 // short-circuit removal of last element |
|
512 if (index == list->collection.size - 1) { |
|
513 list->collection.size--; |
|
514 return 0; |
|
515 } |
|
516 |
|
517 // just move the elements starting at index to the left |
|
518 int result = cx_array_copy( |
|
519 &arl->data, |
|
520 &list->collection.size, |
|
521 &arl->capacity, |
|
522 index, |
|
523 ((char *) arl->data) + (index + 1) * list->collection.elem_size, |
|
524 list->collection.elem_size, |
|
525 list->collection.size - index - 1, |
|
526 &arl->reallocator |
|
527 ); |
|
528 |
|
529 // cx_array_copy cannot fail, array cannot grow |
|
530 assert(result == 0); |
|
531 |
|
532 // decrease the size |
|
533 list->collection.size--; |
|
534 |
|
535 return 0; |
|
536 } |
|
537 |
|
538 static void cx_arl_clear(struct cx_list_s *list) { |
|
539 if (list->collection.size == 0) return; |
|
540 |
|
541 cx_array_list *arl = (cx_array_list *) list; |
|
542 char *ptr = arl->data; |
|
543 |
|
544 if (list->collection.simple_destructor) { |
|
545 for (size_t i = 0; i < list->collection.size; i++) { |
|
546 cx_invoke_simple_destructor(list, ptr); |
|
547 ptr += list->collection.elem_size; |
|
548 } |
|
549 } |
|
550 if (list->collection.advanced_destructor) { |
|
551 for (size_t i = 0; i < list->collection.size; i++) { |
|
552 cx_invoke_advanced_destructor(list, ptr); |
|
553 ptr += list->collection.elem_size; |
|
554 } |
|
555 } |
|
556 |
|
557 memset(arl->data, 0, list->collection.size * list->collection.elem_size); |
|
558 list->collection.size = 0; |
|
559 } |
|
560 |
|
561 static int cx_arl_swap( |
|
562 struct cx_list_s *list, |
|
563 size_t i, |
|
564 size_t j |
|
565 ) { |
|
566 if (i >= list->collection.size || j >= list->collection.size) return 1; |
|
567 cx_array_list *arl = (cx_array_list *) list; |
|
568 cx_array_swap(arl->data, list->collection.elem_size, i, j); |
|
569 return 0; |
|
570 } |
|
571 |
|
572 static void *cx_arl_at( |
|
573 const struct cx_list_s *list, |
|
574 size_t index |
|
575 ) { |
|
576 if (index < list->collection.size) { |
|
577 const cx_array_list *arl = (const cx_array_list *) list; |
|
578 char *space = arl->data; |
|
579 return space + index * list->collection.elem_size; |
|
580 } else { |
|
581 return NULL; |
|
582 } |
|
583 } |
|
584 |
|
585 static ssize_t cx_arl_find_remove( |
|
586 struct cx_list_s *list, |
|
587 const void *elem, |
|
588 bool remove |
|
589 ) { |
|
590 assert(list->collection.cmpfunc != NULL); |
|
591 assert(list->collection.size < SIZE_MAX / 2); |
|
592 char *cur = ((const cx_array_list *) list)->data; |
|
593 |
|
594 for (ssize_t i = 0; i < (ssize_t) list->collection.size; i++) { |
|
595 if (0 == list->collection.cmpfunc(elem, cur)) { |
|
596 if (remove) { |
|
597 if (0 == cx_arl_remove(list, i)) { |
|
598 return i; |
|
599 } else { |
|
600 return -1; |
|
601 } |
|
602 } else { |
|
603 return i; |
|
604 } |
|
605 } |
|
606 cur += list->collection.elem_size; |
|
607 } |
|
608 |
|
609 return -1; |
|
610 } |
|
611 |
|
612 static void cx_arl_sort(struct cx_list_s *list) { |
|
613 assert(list->collection.cmpfunc != NULL); |
|
614 qsort(((cx_array_list *) list)->data, |
|
615 list->collection.size, |
|
616 list->collection.elem_size, |
|
617 list->collection.cmpfunc |
|
618 ); |
|
619 } |
|
620 |
|
621 static int cx_arl_compare( |
|
622 const struct cx_list_s *list, |
|
623 const struct cx_list_s *other |
|
624 ) { |
|
625 assert(list->collection.cmpfunc != NULL); |
|
626 if (list->collection.size == other->collection.size) { |
|
627 const char *left = ((const cx_array_list *) list)->data; |
|
628 const char *right = ((const cx_array_list *) other)->data; |
|
629 for (size_t i = 0; i < list->collection.size; i++) { |
|
630 int d = list->collection.cmpfunc(left, right); |
|
631 if (d != 0) { |
|
632 return d; |
|
633 } |
|
634 left += list->collection.elem_size; |
|
635 right += other->collection.elem_size; |
|
636 } |
|
637 return 0; |
|
638 } else { |
|
639 return list->collection.size < other->collection.size ? -1 : 1; |
|
640 } |
|
641 } |
|
642 |
|
643 static void cx_arl_reverse(struct cx_list_s *list) { |
|
644 if (list->collection.size < 2) return; |
|
645 void *data = ((const cx_array_list *) list)->data; |
|
646 size_t half = list->collection.size / 2; |
|
647 for (size_t i = 0; i < half; i++) { |
|
648 cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i); |
|
649 } |
|
650 } |
|
651 |
|
652 static bool cx_arl_iter_valid(const void *it) { |
|
653 const struct cx_iterator_s *iter = it; |
|
654 const struct cx_list_s *list = iter->src_handle.c; |
|
655 return iter->index < list->collection.size; |
|
656 } |
|
657 |
|
658 static void *cx_arl_iter_current(const void *it) { |
|
659 const struct cx_iterator_s *iter = it; |
|
660 return iter->elem_handle; |
|
661 } |
|
662 |
|
663 static void cx_arl_iter_next(void *it) { |
|
664 struct cx_iterator_s *iter = it; |
|
665 if (iter->base.remove) { |
|
666 iter->base.remove = false; |
|
667 cx_arl_remove(iter->src_handle.m, iter->index); |
|
668 } else { |
|
669 iter->index++; |
|
670 iter->elem_handle = |
|
671 ((char *) iter->elem_handle) |
|
672 + ((const struct cx_list_s *) iter->src_handle.c)->collection.elem_size; |
|
673 } |
|
674 } |
|
675 |
|
676 static void cx_arl_iter_prev(void *it) { |
|
677 struct cx_iterator_s *iter = it; |
|
678 const cx_array_list *list = iter->src_handle.c; |
|
679 if (iter->base.remove) { |
|
680 iter->base.remove = false; |
|
681 cx_arl_remove(iter->src_handle.m, iter->index); |
|
682 } |
|
683 iter->index--; |
|
684 if (iter->index < list->base.collection.size) { |
|
685 iter->elem_handle = ((char *) list->data) |
|
686 + iter->index * list->base.collection.elem_size; |
|
687 } |
|
688 } |
|
689 |
|
690 |
|
691 static struct cx_iterator_s cx_arl_iterator( |
|
692 const struct cx_list_s *list, |
|
693 size_t index, |
|
694 bool backwards |
|
695 ) { |
|
696 struct cx_iterator_s iter; |
|
697 |
|
698 iter.index = index; |
|
699 iter.src_handle.c = list; |
|
700 iter.elem_handle = cx_arl_at(list, index); |
|
701 iter.elem_size = list->collection.elem_size; |
|
702 iter.elem_count = list->collection.size; |
|
703 iter.base.valid = cx_arl_iter_valid; |
|
704 iter.base.current = cx_arl_iter_current; |
|
705 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; |
|
706 iter.base.remove = false; |
|
707 iter.base.mutating = false; |
|
708 |
|
709 return iter; |
|
710 } |
|
711 |
|
712 static cx_list_class cx_array_list_class = { |
|
713 cx_arl_destructor, |
|
714 cx_arl_insert_element, |
|
715 cx_arl_insert_array, |
|
716 cx_arl_insert_sorted, |
|
717 cx_arl_insert_iter, |
|
718 cx_arl_remove, |
|
719 cx_arl_clear, |
|
720 cx_arl_swap, |
|
721 cx_arl_at, |
|
722 cx_arl_find_remove, |
|
723 cx_arl_sort, |
|
724 cx_arl_compare, |
|
725 cx_arl_reverse, |
|
726 cx_arl_iterator, |
|
727 }; |
|
728 |
|
729 CxList *cxArrayListCreate( |
|
730 const CxAllocator *allocator, |
|
731 cx_compare_func comparator, |
|
732 size_t elem_size, |
|
733 size_t initial_capacity |
|
734 ) { |
|
735 if (allocator == NULL) { |
|
736 allocator = cxDefaultAllocator; |
|
737 } |
|
738 |
|
739 cx_array_list *list = cxCalloc(allocator, 1, sizeof(cx_array_list)); |
|
740 if (list == NULL) return NULL; |
|
741 |
|
742 list->base.cl = &cx_array_list_class; |
|
743 list->base.collection.allocator = allocator; |
|
744 list->capacity = initial_capacity; |
|
745 |
|
746 if (elem_size > 0) { |
|
747 list->base.collection.elem_size = elem_size; |
|
748 list->base.collection.cmpfunc = comparator; |
|
749 } else { |
|
750 elem_size = sizeof(void *); |
|
751 list->base.collection.cmpfunc = comparator == NULL ? cx_cmp_ptr : comparator; |
|
752 cxListStorePointers((CxList *) list); |
|
753 } |
|
754 |
|
755 // allocate the array after the real elem_size is known |
|
756 list->data = cxCalloc(allocator, initial_capacity, elem_size); |
|
757 if (list->data == NULL) { |
|
758 cxFree(allocator, list); |
|
759 return NULL; |
|
760 } |
|
761 |
|
762 // configure the reallocator |
|
763 list->reallocator.realloc = cx_arl_realloc; |
|
764 list->reallocator.ptr1 = (void *) allocator; |
|
765 |
|
766 return (CxList *) list; |
|
767 } |