| 28 |
28 |
| 29 #include "cx/array_list.h" |
29 #include "cx/array_list.h" |
| 30 #include "cx/compare.h" |
30 #include "cx/compare.h" |
| 31 #include <assert.h> |
31 #include <assert.h> |
| 32 #include <string.h> |
32 #include <string.h> |
| |
33 #include <errno.h> |
| 33 |
34 |
| 34 // Default array reallocator |
35 // Default array reallocator |
| 35 |
36 |
| 36 static void *cx_array_default_realloc( |
37 static void *cx_array_default_realloc( |
| 37 void *array, |
38 void *array, |
| 38 size_t capacity, |
39 size_t capacity, |
| 39 size_t elem_size, |
40 size_t elem_size, |
| 40 __attribute__((__unused__)) struct cx_array_reallocator_s *alloc |
41 cx_attr_unused CxArrayReallocator *alloc |
| 41 ) { |
42 ) { |
| 42 return realloc(array, capacity * elem_size); |
43 size_t n; |
| 43 } |
44 if (cx_szmul(capacity, elem_size, &n)) { |
| 44 |
45 errno = EOVERFLOW; |
| 45 struct cx_array_reallocator_s cx_array_default_reallocator_impl = { |
46 return NULL; |
| |
47 } |
| |
48 return realloc(array, n); |
| |
49 } |
| |
50 |
| |
51 CxArrayReallocator cx_array_default_reallocator_impl = { |
| 46 cx_array_default_realloc, NULL, NULL, 0, 0 |
52 cx_array_default_realloc, NULL, NULL, 0, 0 |
| 47 }; |
53 }; |
| 48 |
54 |
| 49 struct cx_array_reallocator_s *cx_array_default_reallocator = &cx_array_default_reallocator_impl; |
55 CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl; |
| |
56 |
| |
57 // Stack-aware array reallocator |
| |
58 |
| |
59 static void *cx_array_advanced_realloc( |
| |
60 void *array, |
| |
61 size_t capacity, |
| |
62 size_t elem_size, |
| |
63 cx_attr_unused CxArrayReallocator *alloc |
| |
64 ) { |
| |
65 // check for overflow |
| |
66 size_t n; |
| |
67 if (cx_szmul(capacity, elem_size, &n)) { |
| |
68 errno = EOVERFLOW; |
| |
69 return NULL; |
| |
70 } |
| |
71 |
| |
72 // retrieve the pointer to the actual allocator |
| |
73 const CxAllocator *al = alloc->ptr1; |
| |
74 |
| |
75 // check if the array is still located on the stack |
| |
76 void *newmem; |
| |
77 if (array == alloc->ptr2) { |
| |
78 newmem = cxMalloc(al, n); |
| |
79 if (newmem != NULL && array != NULL) { |
| |
80 memcpy(newmem, array, n); |
| |
81 } |
| |
82 } else { |
| |
83 newmem = cxRealloc(al, array, n); |
| |
84 } |
| |
85 return newmem; |
| |
86 } |
| |
87 |
| |
88 struct cx_array_reallocator_s cx_array_reallocator( |
| |
89 const struct cx_allocator_s *allocator, |
| |
90 const void *stackmem |
| |
91 ) { |
| |
92 if (allocator == NULL) { |
| |
93 allocator = cxDefaultAllocator; |
| |
94 } |
| |
95 return (struct cx_array_reallocator_s) { |
| |
96 cx_array_advanced_realloc, |
| |
97 (void*) allocator, (void*) stackmem, |
| |
98 0, 0 |
| |
99 }; |
| |
100 } |
| 50 |
101 |
| 51 // LOW LEVEL ARRAY LIST FUNCTIONS |
102 // LOW LEVEL ARRAY LIST FUNCTIONS |
| 52 |
103 |
| 53 enum cx_array_result cx_array_copy( |
104 static size_t cx_array_align_capacity( |
| 54 void **target, |
105 size_t cap, |
| 55 size_t *size, |
106 size_t alignment, |
| 56 size_t *capacity, |
107 size_t max |
| 57 size_t index, |
108 ) { |
| 58 void const *src, |
109 if (cap > max - alignment) { |
| |
110 return cap; |
| |
111 } else { |
| |
112 return cap - (cap % alignment) + alignment; |
| |
113 } |
| |
114 } |
| |
115 |
| |
116 int cx_array_reserve( |
| |
117 void **array, |
| |
118 void *size, |
| |
119 void *capacity, |
| |
120 unsigned width, |
| 59 size_t elem_size, |
121 size_t elem_size, |
| 60 size_t elem_count, |
122 size_t elem_count, |
| 61 struct cx_array_reallocator_s *reallocator |
123 CxArrayReallocator *reallocator |
| |
124 ) { |
| |
125 // assert pointers |
| |
126 assert(array != NULL); |
| |
127 assert(size != NULL); |
| |
128 assert(capacity != NULL); |
| |
129 assert(reallocator != NULL); |
| |
130 |
| |
131 // determine size and capacity |
| |
132 size_t oldcap; |
| |
133 size_t oldsize; |
| |
134 size_t max_size; |
| |
135 if (width == 0 || width == 8*sizeof(size_t)) { |
| |
136 oldcap = *(size_t*) capacity; |
| |
137 oldsize = *(size_t*) size; |
| |
138 max_size = SIZE_MAX; |
| |
139 } else if (width == 16) { |
| |
140 oldcap = *(uint16_t*) capacity; |
| |
141 oldsize = *(uint16_t*) size; |
| |
142 max_size = UINT16_MAX; |
| |
143 } else if (width == 8) { |
| |
144 oldcap = *(uint8_t*) capacity; |
| |
145 oldsize = *(uint8_t*) size; |
| |
146 max_size = UINT8_MAX; |
| |
147 } |
| |
148 #if CX_WORDSIZE == 64 |
| |
149 else if (width == 32) { |
| |
150 oldcap = *(uint32_t*) capacity; |
| |
151 oldsize = *(uint32_t*) size; |
| |
152 max_size = UINT32_MAX; |
| |
153 } |
| |
154 #endif |
| |
155 else { |
| |
156 errno = EINVAL; |
| |
157 return 1; |
| |
158 } |
| |
159 |
| |
160 // assert that the array is allocated when it has capacity |
| |
161 assert(*array != NULL || oldcap == 0); |
| |
162 |
| |
163 // check for overflow |
| |
164 if (elem_count > max_size - oldsize) { |
| |
165 errno = EOVERFLOW; |
| |
166 return 1; |
| |
167 } |
| |
168 |
| |
169 // determine new capacity |
| |
170 size_t newcap = oldsize + elem_count; |
| |
171 |
| |
172 // reallocate if possible |
| |
173 if (newcap > oldcap) { |
| |
174 // calculate new capacity (next number divisible by 16) |
| |
175 newcap = cx_array_align_capacity(newcap, 16, max_size); |
| |
176 |
| |
177 // perform reallocation |
| |
178 void *newmem = reallocator->realloc( |
| |
179 *array, newcap, elem_size, reallocator |
| |
180 ); |
| |
181 if (newmem == NULL) { |
| |
182 return 1; // LCOV_EXCL_LINE |
| |
183 } |
| |
184 |
| |
185 // store new pointer |
| |
186 *array = newmem; |
| |
187 |
| |
188 // store new capacity |
| |
189 if (width == 0 || width == 8*sizeof(size_t)) { |
| |
190 *(size_t*) capacity = newcap; |
| |
191 } else if (width == 16) { |
| |
192 *(uint16_t*) capacity = (uint16_t) newcap; |
| |
193 } else if (width == 8) { |
| |
194 *(uint8_t*) capacity = (uint8_t) newcap; |
| |
195 } |
| |
196 #if CX_WORDSIZE == 64 |
| |
197 else if (width == 32) { |
| |
198 *(uint32_t*) capacity = (uint32_t) newcap; |
| |
199 } |
| |
200 #endif |
| |
201 } |
| |
202 |
| |
203 return 0; |
| |
204 } |
| |
205 |
| |
206 int cx_array_copy( |
| |
207 void **target, |
| |
208 void *size, |
| |
209 void *capacity, |
| |
210 unsigned width, |
| |
211 size_t index, |
| |
212 const void *src, |
| |
213 size_t elem_size, |
| |
214 size_t elem_count, |
| |
215 CxArrayReallocator *reallocator |
| 62 ) { |
216 ) { |
| 63 // assert pointers |
217 // assert pointers |
| 64 assert(target != NULL); |
218 assert(target != NULL); |
| 65 assert(size != NULL); |
219 assert(size != NULL); |
| |
220 assert(capacity != NULL); |
| 66 assert(src != NULL); |
221 assert(src != NULL); |
| 67 |
222 assert(reallocator != NULL); |
| 68 // determine capacity |
223 |
| 69 size_t cap = capacity == NULL ? *size : *capacity; |
224 // determine size and capacity |
| |
225 size_t oldcap; |
| |
226 size_t oldsize; |
| |
227 size_t max_size; |
| |
228 if (width == 0 || width == 8*sizeof(size_t)) { |
| |
229 oldcap = *(size_t*) capacity; |
| |
230 oldsize = *(size_t*) size; |
| |
231 max_size = SIZE_MAX; |
| |
232 } else if (width == 16) { |
| |
233 oldcap = *(uint16_t*) capacity; |
| |
234 oldsize = *(uint16_t*) size; |
| |
235 max_size = UINT16_MAX; |
| |
236 } else if (width == 8) { |
| |
237 oldcap = *(uint8_t*) capacity; |
| |
238 oldsize = *(uint8_t*) size; |
| |
239 max_size = UINT8_MAX; |
| |
240 } |
| |
241 #if CX_WORDSIZE == 64 |
| |
242 else if (width == 32) { |
| |
243 oldcap = *(uint32_t*) capacity; |
| |
244 oldsize = *(uint32_t*) size; |
| |
245 max_size = UINT32_MAX; |
| |
246 } |
| |
247 #endif |
| |
248 else { |
| |
249 errno = EINVAL; |
| |
250 return 1; |
| |
251 } |
| |
252 |
| |
253 // assert that the array is allocated when it has capacity |
| |
254 assert(*target != NULL || oldcap == 0); |
| |
255 |
| |
256 // check for overflow |
| |
257 if (index > max_size || elem_count > max_size - index) { |
| |
258 errno = EOVERFLOW; |
| |
259 return 1; |
| |
260 } |
| 70 |
261 |
| 71 // check if resize is required |
262 // check if resize is required |
| 72 size_t minsize = index + elem_count; |
263 size_t minsize = index + elem_count; |
| 73 size_t newsize = *size < minsize ? minsize : *size; |
264 size_t newsize = oldsize < minsize ? minsize : oldsize; |
| 74 bool needrealloc = newsize > cap; |
|
| 75 |
265 |
| 76 // reallocate if possible |
266 // reallocate if possible |
| 77 if (needrealloc) { |
267 size_t newcap = oldcap; |
| 78 // a reallocator and a capacity variable must be available |
268 if (newsize > oldcap) { |
| 79 if (reallocator == NULL || capacity == NULL) { |
|
| 80 return CX_ARRAY_REALLOC_NOT_SUPPORTED; |
|
| 81 } |
|
| 82 |
|
| 83 // check, if we need to repair the src pointer |
269 // check, if we need to repair the src pointer |
| 84 uintptr_t targetaddr = (uintptr_t) *target; |
270 uintptr_t targetaddr = (uintptr_t) *target; |
| 85 uintptr_t srcaddr = (uintptr_t) src; |
271 uintptr_t srcaddr = (uintptr_t) src; |
| 86 bool repairsrc = targetaddr <= srcaddr |
272 bool repairsrc = targetaddr <= srcaddr |
| 87 && srcaddr < targetaddr + cap * elem_size; |
273 && srcaddr < targetaddr + oldcap * elem_size; |
| 88 |
274 |
| 89 // calculate new capacity (next number divisible by 16) |
275 // calculate new capacity (next number divisible by 16) |
| 90 cap = newsize - (newsize % 16) + 16; |
276 newcap = cx_array_align_capacity(newsize, 16, max_size); |
| 91 assert(cap > newsize); |
277 assert(newcap > newsize); |
| 92 |
278 |
| 93 // perform reallocation |
279 // perform reallocation |
| 94 void *newmem = reallocator->realloc( |
280 void *newmem = reallocator->realloc( |
| 95 *target, cap, elem_size, reallocator |
281 *target, newcap, elem_size, reallocator |
| 96 ); |
282 ); |
| 97 if (newmem == NULL) { |
283 if (newmem == NULL) { |
| 98 return CX_ARRAY_REALLOC_FAILED; |
284 return 1; |
| 99 } |
285 } |
| 100 |
286 |
| 101 // repair src pointer, if necessary |
287 // repair src pointer, if necessary |
| 102 if (repairsrc) { |
288 if (repairsrc) { |
| 103 src = ((char *) newmem) + (srcaddr - targetaddr); |
289 src = ((char *) newmem) + (srcaddr - targetaddr); |
| 104 } |
290 } |
| 105 |
291 |
| 106 // store new pointer and capacity |
292 // store new pointer |
| 107 *target = newmem; |
293 *target = newmem; |
| 108 *capacity = cap; |
|
| 109 } |
294 } |
| 110 |
295 |
| 111 // determine target pointer |
296 // determine target pointer |
| 112 char *start = *target; |
297 char *start = *target; |
| 113 start += index * elem_size; |
298 start += index * elem_size; |
| 114 |
299 |
| 115 // copy elements and set new size |
300 // copy elements and set new size |
| |
301 // note: no overflow check here, b/c we cannot get here w/o allocation |
| 116 memmove(start, src, elem_count * elem_size); |
302 memmove(start, src, elem_count * elem_size); |
| 117 *size = newsize; |
303 |
| |
304 // if any of size or capacity changed, store them back |
| |
305 if (newsize != oldsize || newcap != oldcap) { |
| |
306 if (width == 0 || width == 8*sizeof(size_t)) { |
| |
307 *(size_t*) capacity = newcap; |
| |
308 *(size_t*) size = newsize; |
| |
309 } else if (width == 16) { |
| |
310 *(uint16_t*) capacity = (uint16_t) newcap; |
| |
311 *(uint16_t*) size = (uint16_t) newsize; |
| |
312 } else if (width == 8) { |
| |
313 *(uint8_t*) capacity = (uint8_t) newcap; |
| |
314 *(uint8_t*) size = (uint8_t) newsize; |
| |
315 } |
| |
316 #if CX_WORDSIZE == 64 |
| |
317 else if (width == 32) { |
| |
318 *(uint32_t*) capacity = (uint32_t) newcap; |
| |
319 *(uint32_t*) size = (uint32_t) newsize; |
| |
320 } |
| |
321 #endif |
| |
322 } |
| 118 |
323 |
| 119 // return successfully |
324 // return successfully |
| 120 return CX_ARRAY_SUCCESS; |
325 return 0; |
| |
326 } |
| |
327 |
| |
328 int cx_array_insert_sorted( |
| |
329 void **target, |
| |
330 size_t *size, |
| |
331 size_t *capacity, |
| |
332 cx_compare_func cmp_func, |
| |
333 const void *sorted_data, |
| |
334 size_t elem_size, |
| |
335 size_t elem_count, |
| |
336 CxArrayReallocator *reallocator |
| |
337 ) { |
| |
338 // assert pointers |
| |
339 assert(target != NULL); |
| |
340 assert(size != NULL); |
| |
341 assert(capacity != NULL); |
| |
342 assert(cmp_func != NULL); |
| |
343 assert(sorted_data != NULL); |
| |
344 assert(reallocator != NULL); |
| |
345 |
| |
346 // corner case |
| |
347 if (elem_count == 0) return 0; |
| |
348 |
| |
349 // overflow check |
| |
350 if (elem_count > SIZE_MAX - *size) { |
| |
351 errno = EOVERFLOW; |
| |
352 return 1; |
| |
353 } |
| |
354 |
| |
355 // store some counts |
| |
356 size_t old_size = *size; |
| |
357 size_t needed_capacity = old_size + elem_count; |
| |
358 |
| |
359 // if we need more than we have, try a reallocation |
| |
360 if (needed_capacity > *capacity) { |
| |
361 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
| |
362 void *new_mem = reallocator->realloc( |
| |
363 *target, new_capacity, elem_size, reallocator |
| |
364 ); |
| |
365 if (new_mem == NULL) { |
| |
366 // give it up right away, there is no contract |
| |
367 // that requires us to insert as much as we can |
| |
368 return 1; // LCOV_EXCL_LINE |
| |
369 } |
| |
370 *target = new_mem; |
| |
371 *capacity = new_capacity; |
| |
372 } |
| |
373 |
| |
374 // now we have guaranteed that we can insert everything |
| |
375 size_t new_size = old_size + elem_count; |
| |
376 *size = new_size; |
| |
377 |
| |
378 // declare the source and destination indices/pointers |
| |
379 size_t si = 0, di = 0; |
| |
380 const char *src = sorted_data; |
| |
381 char *dest = *target; |
| |
382 |
| |
383 // find the first insertion point |
| |
384 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); |
| |
385 dest += di * elem_size; |
| |
386 |
| |
387 // move the remaining elements in the array completely to the right |
| |
388 // we will call it the "buffer" for parked elements |
| |
389 size_t buf_size = old_size - di; |
| |
390 size_t bi = new_size - buf_size; |
| |
391 char *bptr = ((char *) *target) + bi * elem_size; |
| |
392 memmove(bptr, dest, buf_size * elem_size); |
| |
393 |
| |
394 // while there are both source and buffered elements left, |
| |
395 // copy them interleaving |
| |
396 while (si < elem_count && bi < new_size) { |
| |
397 // determine how many source elements can be inserted |
| |
398 size_t copy_len, bytes_copied; |
| |
399 copy_len = cx_array_binary_search_sup( |
| |
400 src, |
| |
401 elem_count - si, |
| |
402 elem_size, |
| |
403 bptr, |
| |
404 cmp_func |
| |
405 ); |
| |
406 |
| |
407 // copy the source elements |
| |
408 bytes_copied = copy_len * elem_size; |
| |
409 memcpy(dest, src, bytes_copied); |
| |
410 dest += bytes_copied; |
| |
411 src += bytes_copied; |
| |
412 si += copy_len; |
| |
413 |
| |
414 // when all source elements are in place, we are done |
| |
415 if (si >= elem_count) break; |
| |
416 |
| |
417 // determine how many buffered elements need to be restored |
| |
418 copy_len = cx_array_binary_search_sup( |
| |
419 bptr, |
| |
420 new_size - bi, |
| |
421 elem_size, |
| |
422 src, |
| |
423 cmp_func |
| |
424 ); |
| |
425 |
| |
426 // restore the buffered elements |
| |
427 bytes_copied = copy_len * elem_size; |
| |
428 memmove(dest, bptr, bytes_copied); |
| |
429 dest += bytes_copied; |
| |
430 bptr += bytes_copied; |
| |
431 bi += copy_len; |
| |
432 } |
| |
433 |
| |
434 // still source elements left? simply append them |
| |
435 if (si < elem_count) { |
| |
436 memcpy(dest, src, elem_size * (elem_count - si)); |
| |
437 } |
| |
438 |
| |
439 // still buffer elements left? |
| |
440 // don't worry, we already moved them to the correct place |
| |
441 |
| |
442 return 0; |
| |
443 } |
| |
444 |
| |
445 size_t cx_array_binary_search_inf( |
| |
446 const void *arr, |
| |
447 size_t size, |
| |
448 size_t elem_size, |
| |
449 const void *elem, |
| |
450 cx_compare_func cmp_func |
| |
451 ) { |
| |
452 // special case: empty array |
| |
453 if (size == 0) return 0; |
| |
454 |
| |
455 // declare a variable that will contain the compare results |
| |
456 int result; |
| |
457 |
| |
458 // cast the array pointer to something we can use offsets with |
| |
459 const char *array = arr; |
| |
460 |
| |
461 // check the first array element |
| |
462 result = cmp_func(elem, array); |
| |
463 if (result < 0) { |
| |
464 return size; |
| |
465 } else if (result == 0) { |
| |
466 return 0; |
| |
467 } |
| |
468 |
| |
469 // special case: there is only one element and that is smaller |
| |
470 if (size == 1) return 0; |
| |
471 |
| |
472 // check the last array element |
| |
473 result = cmp_func(elem, array + elem_size * (size - 1)); |
| |
474 if (result >= 0) { |
| |
475 return size - 1; |
| |
476 } |
| |
477 |
| |
478 // the element is now guaranteed to be somewhere in the list |
| |
479 // so start the binary search |
| |
480 size_t left_index = 1; |
| |
481 size_t right_index = size - 1; |
| |
482 size_t pivot_index; |
| |
483 |
| |
484 while (left_index <= right_index) { |
| |
485 pivot_index = left_index + (right_index - left_index) / 2; |
| |
486 const char *arr_elem = array + pivot_index * elem_size; |
| |
487 result = cmp_func(elem, arr_elem); |
| |
488 if (result == 0) { |
| |
489 // found it! |
| |
490 return pivot_index; |
| |
491 } else if (result < 0) { |
| |
492 // element is smaller than pivot, continue search left |
| |
493 right_index = pivot_index - 1; |
| |
494 } else { |
| |
495 // element is larger than pivot, continue search right |
| |
496 left_index = pivot_index + 1; |
| |
497 } |
| |
498 } |
| |
499 |
| |
500 // report the largest upper bound |
| |
501 return result < 0 ? (pivot_index - 1) : pivot_index; |
| |
502 } |
| |
503 |
| |
504 size_t cx_array_binary_search( |
| |
505 const void *arr, |
| |
506 size_t size, |
| |
507 size_t elem_size, |
| |
508 const void *elem, |
| |
509 cx_compare_func cmp_func |
| |
510 ) { |
| |
511 size_t index = cx_array_binary_search_inf( |
| |
512 arr, size, elem_size, elem, cmp_func |
| |
513 ); |
| |
514 if (index < size && |
| |
515 cmp_func(((const char *) arr) + index * elem_size, elem) == 0) { |
| |
516 return index; |
| |
517 } else { |
| |
518 return size; |
| |
519 } |
| |
520 } |
| |
521 |
| |
522 size_t cx_array_binary_search_sup( |
| |
523 const void *arr, |
| |
524 size_t size, |
| |
525 size_t elem_size, |
| |
526 const void *elem, |
| |
527 cx_compare_func cmp_func |
| |
528 ) { |
| |
529 size_t inf = cx_array_binary_search_inf( |
| |
530 arr, size, elem_size, elem, cmp_func |
| |
531 ); |
| |
532 if (inf == size) { |
| |
533 // no infimum means, first element is supremum |
| |
534 return 0; |
| |
535 } else if (cmp_func(((const char *) arr) + inf * elem_size, elem) == 0) { |
| |
536 return inf; |
| |
537 } else { |
| |
538 return inf + 1; |
| |
539 } |
| 121 } |
540 } |
| 122 |
541 |
| 123 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
542 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
| 124 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
543 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
| 125 #endif |
544 #endif |
| 126 unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE; |
545 const unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE; |
| 127 |
546 |
| 128 void cx_array_swap( |
547 void cx_array_swap( |
| 129 void *arr, |
548 void *arr, |
| 130 size_t elem_size, |
549 size_t elem_size, |
| 131 size_t idx1, |
550 size_t idx1, |
| 245 // note that if we had to move the elements, the following operation |
652 // note that if we had to move the elements, the following operation |
| 246 // is guaranteed to succeed, because we have the memory already allocated |
653 // is guaranteed to succeed, because we have the memory already allocated |
| 247 // therefore, it is impossible to leave this function with an invalid array |
654 // therefore, it is impossible to leave this function with an invalid array |
| 248 |
655 |
| 249 // place the new elements |
656 // place the new elements |
| 250 if (CX_ARRAY_SUCCESS == cx_array_copy( |
657 if (cx_array_copy( |
| 251 &arl->data, |
658 &arl->data, |
| 252 &list->collection.size, |
659 &list->collection.size, |
| 253 &arl->capacity, |
660 &arl->capacity, |
| |
661 0, |
| 254 index, |
662 index, |
| 255 array, |
663 array, |
| 256 list->collection.elem_size, |
664 list->collection.elem_size, |
| 257 n, |
665 n, |
| 258 &arl->reallocator |
666 &arl->reallocator |
| 259 )) { |
667 )) { |
| 260 return n; |
|
| 261 } else { |
|
| 262 // array list implementation is "all or nothing" |
668 // array list implementation is "all or nothing" |
| 263 return 0; |
669 return 0; |
| |
670 } else { |
| |
671 return n; |
| |
672 } |
| |
673 } |
| |
674 |
| |
675 static size_t cx_arl_insert_sorted( |
| |
676 struct cx_list_s *list, |
| |
677 const void *sorted_data, |
| |
678 size_t n |
| |
679 ) { |
| |
680 // get a correctly typed pointer to the list |
| |
681 cx_array_list *arl = (cx_array_list *) list; |
| |
682 |
| |
683 if (cx_array_insert_sorted( |
| |
684 &arl->data, |
| |
685 &list->collection.size, |
| |
686 &arl->capacity, |
| |
687 list->collection.cmpfunc, |
| |
688 sorted_data, |
| |
689 list->collection.elem_size, |
| |
690 n, |
| |
691 &arl->reallocator |
| |
692 )) { |
| |
693 // array list implementation is "all or nothing" |
| |
694 return 0; |
| |
695 } else { |
| |
696 return n; |
| 264 } |
697 } |
| 265 } |
698 } |
| 266 |
699 |
| 267 static int cx_arl_insert_element( |
700 static int cx_arl_insert_element( |
| 268 struct cx_list_s *list, |
701 struct cx_list_s *list, |
| 269 size_t index, |
702 size_t index, |
| 270 void const *element |
703 const void *element |
| 271 ) { |
704 ) { |
| 272 return 1 != cx_arl_insert_array(list, index, element, 1); |
705 return 1 != cx_arl_insert_array(list, index, element, 1); |
| 273 } |
706 } |
| 274 |
707 |
| 275 static int cx_arl_insert_iter( |
708 static int cx_arl_insert_iter( |
| 276 struct cx_iterator_s *iter, |
709 struct cx_iterator_s *iter, |
| 277 void const *elem, |
710 const void *elem, |
| 278 int prepend |
711 int prepend |
| 279 ) { |
712 ) { |
| 280 struct cx_list_s *list = iter->src_handle.m; |
713 struct cx_list_s *list = iter->src_handle.m; |
| 281 if (iter->index < list->collection.size) { |
714 if (iter->index < list->collection.size) { |
| 282 int result = cx_arl_insert_element( |
715 int result = cx_arl_insert_element( |
| 283 list, |
716 list, |
| 284 iter->index + 1 - prepend, |
717 iter->index + 1 - prepend, |
| 285 elem |
718 elem |
| 286 ); |
719 ); |
| 287 if (result == 0 && prepend != 0) { |
720 if (result == 0) { |
| 288 iter->index++; |
721 iter->elem_count++; |
| 289 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
722 if (prepend != 0) { |
| |
723 iter->index++; |
| |
724 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
| |
725 } |
| 290 } |
726 } |
| 291 return result; |
727 return result; |
| 292 } else { |
728 } else { |
| 293 int result = cx_arl_insert_element(list, list->collection.size, elem); |
729 int result = cx_arl_insert_element(list, list->collection.size, elem); |
| 294 iter->index = list->collection.size; |
730 if (result == 0) { |
| |
731 iter->elem_count++; |
| |
732 iter->index = list->collection.size; |
| |
733 } |
| 295 return result; |
734 return result; |
| 296 } |
735 } |
| 297 } |
736 } |
| 298 |
737 |
| 299 static int cx_arl_remove( |
738 static size_t cx_arl_remove( |
| 300 struct cx_list_s *list, |
739 struct cx_list_s *list, |
| 301 size_t index |
740 size_t index, |
| |
741 size_t num, |
| |
742 void *targetbuf |
| 302 ) { |
743 ) { |
| 303 cx_array_list *arl = (cx_array_list *) list; |
744 cx_array_list *arl = (cx_array_list *) list; |
| 304 |
745 |
| 305 // out-of-bounds check |
746 // out-of-bounds check |
| |
747 size_t remove; |
| 306 if (index >= list->collection.size) { |
748 if (index >= list->collection.size) { |
| 307 return 1; |
749 remove = 0; |
| 308 } |
750 } else if (index + num > list->collection.size) { |
| 309 |
751 remove = list->collection.size - index; |
| 310 // content destruction |
752 } else { |
| 311 cx_invoke_destructor(list, ((char *) arl->data) + index * list->collection.elem_size); |
753 remove = num; |
| 312 |
754 } |
| 313 // short-circuit removal of last element |
755 |
| 314 if (index == list->collection.size - 1) { |
756 // easy exit |
| 315 list->collection.size--; |
757 if (remove == 0) return 0; |
| 316 return 0; |
758 |
| 317 } |
759 // destroy or copy contents |
| 318 |
760 if (targetbuf == NULL) { |
| 319 // just move the elements starting at index to the left |
761 for (size_t idx = index; idx < index + remove; idx++) { |
| 320 int result = cx_array_copy( |
762 cx_invoke_destructor( |
| |
763 list, |
| |
764 ((char *) arl->data) + idx * list->collection.elem_size |
| |
765 ); |
| |
766 } |
| |
767 } else { |
| |
768 memcpy( |
| |
769 targetbuf, |
| |
770 ((char *) arl->data) + index * list->collection.elem_size, |
| |
771 remove * list->collection.elem_size |
| |
772 ); |
| |
773 } |
| |
774 |
| |
775 // short-circuit removal of last elements |
| |
776 if (index + remove == list->collection.size) { |
| |
777 list->collection.size -= remove; |
| |
778 return remove; |
| |
779 } |
| |
780 |
| |
781 // just move the elements to the left |
| |
782 cx_array_copy( |
| 321 &arl->data, |
783 &arl->data, |
| 322 &list->collection.size, |
784 &list->collection.size, |
| 323 &arl->capacity, |
785 &arl->capacity, |
| |
786 0, |
| 324 index, |
787 index, |
| 325 ((char *) arl->data) + (index + 1) * list->collection.elem_size, |
788 ((char *) arl->data) + (index + remove) * list->collection.elem_size, |
| 326 list->collection.elem_size, |
789 list->collection.elem_size, |
| 327 list->collection.size - index - 1, |
790 list->collection.size - index - remove, |
| 328 &arl->reallocator |
791 &arl->reallocator |
| 329 ); |
792 ); |
| 330 |
793 |
| 331 // cx_array_copy cannot fail, array cannot grow |
|
| 332 assert(result == 0); |
|
| 333 |
|
| 334 // decrease the size |
794 // decrease the size |
| 335 list->collection.size--; |
795 list->collection.size -= remove; |
| 336 |
796 |
| 337 return 0; |
797 return remove; |
| 338 } |
798 } |
| 339 |
799 |
| 340 static void cx_arl_clear(struct cx_list_s *list) { |
800 static void cx_arl_clear(struct cx_list_s *list) { |
| 341 if (list->collection.size == 0) return; |
801 if (list->collection.size == 0) return; |
| 342 |
802 |
| 442 } |
903 } |
| 443 } |
904 } |
| 444 |
905 |
| 445 static void cx_arl_reverse(struct cx_list_s *list) { |
906 static void cx_arl_reverse(struct cx_list_s *list) { |
| 446 if (list->collection.size < 2) return; |
907 if (list->collection.size < 2) return; |
| 447 void *data = ((cx_array_list const *) list)->data; |
908 void *data = ((const cx_array_list *) list)->data; |
| 448 size_t half = list->collection.size / 2; |
909 size_t half = list->collection.size / 2; |
| 449 for (size_t i = 0; i < half; i++) { |
910 for (size_t i = 0; i < half; i++) { |
| 450 cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i); |
911 cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i); |
| 451 } |
912 } |
| 452 } |
913 } |
| 453 |
914 |
| 454 static bool cx_arl_iter_valid(void const *it) { |
915 static bool cx_arl_iter_valid(const void *it) { |
| 455 struct cx_iterator_s const *iter = it; |
916 const struct cx_iterator_s *iter = it; |
| 456 struct cx_list_s const *list = iter->src_handle.c; |
917 const struct cx_list_s *list = iter->src_handle.c; |
| 457 return iter->index < list->collection.size; |
918 return iter->index < list->collection.size; |
| 458 } |
919 } |
| 459 |
920 |
| 460 static void *cx_arl_iter_current(void const *it) { |
921 static void *cx_arl_iter_current(const void *it) { |
| 461 struct cx_iterator_s const *iter = it; |
922 const struct cx_iterator_s *iter = it; |
| 462 return iter->elem_handle; |
923 return iter->elem_handle; |
| 463 } |
924 } |
| 464 |
925 |
| 465 static void cx_arl_iter_next(void *it) { |
926 static void cx_arl_iter_next(void *it) { |
| 466 struct cx_iterator_s *iter = it; |
927 struct cx_iterator_s *iter = it; |
| 467 if (iter->base.remove) { |
928 if (iter->base.remove) { |
| 468 iter->base.remove = false; |
929 iter->base.remove = false; |
| 469 cx_arl_remove(iter->src_handle.m, iter->index); |
930 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); |
| 470 } else { |
931 } else { |
| 471 iter->index++; |
932 iter->index++; |
| 472 iter->elem_handle = |
933 iter->elem_handle = |
| 473 ((char *) iter->elem_handle) |
934 ((char *) iter->elem_handle) |
| 474 + ((struct cx_list_s const *) iter->src_handle.c)->collection.elem_size; |
935 + ((const struct cx_list_s *) iter->src_handle.c)->collection.elem_size; |
| 475 } |
936 } |
| 476 } |
937 } |
| 477 |
938 |
| 478 static void cx_arl_iter_prev(void *it) { |
939 static void cx_arl_iter_prev(void *it) { |
| 479 struct cx_iterator_s *iter = it; |
940 struct cx_iterator_s *iter = it; |
| 480 cx_array_list const* list = iter->src_handle.c; |
941 const cx_array_list *list = iter->src_handle.c; |
| 481 if (iter->base.remove) { |
942 if (iter->base.remove) { |
| 482 iter->base.remove = false; |
943 iter->base.remove = false; |
| 483 cx_arl_remove(iter->src_handle.m, iter->index); |
944 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); |
| 484 } |
945 } |
| 485 iter->index--; |
946 iter->index--; |
| 486 if (iter->index < list->base.collection.size) { |
947 if (iter->index < list->base.collection.size) { |
| 487 iter->elem_handle = ((char *) list->data) |
948 iter->elem_handle = ((char *) list->data) |
| 488 + iter->index * list->base.collection.elem_size; |
949 + iter->index * list->base.collection.elem_size; |
| 489 } |
950 } |
| 490 } |
951 } |
| 491 |
952 |
| 492 |
953 |
| 493 static struct cx_iterator_s cx_arl_iterator( |
954 static struct cx_iterator_s cx_arl_iterator( |
| 494 struct cx_list_s const *list, |
955 const struct cx_list_s *list, |
| 495 size_t index, |
956 size_t index, |
| 496 bool backwards |
957 bool backwards |
| 497 ) { |
958 ) { |
| 498 struct cx_iterator_s iter; |
959 struct cx_iterator_s iter; |
| 499 |
960 |