25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 * POSSIBILITY OF SUCH DAMAGE. |
26 * POSSIBILITY OF SUCH DAMAGE. |
27 */ |
27 */ |
28 |
28 |
29 #include "cx/array_list.h" |
29 #include "cx/array_list.h" |
|
30 #include "cx/compare.h" |
30 #include <assert.h> |
31 #include <assert.h> |
31 #include <string.h> |
32 #include <string.h> |
|
33 #include <errno.h> |
|
34 |
|
35 // Default array reallocator |
|
36 |
|
37 static void *cx_array_default_realloc( |
|
38 void *array, |
|
39 size_t capacity, |
|
40 size_t elem_size, |
|
41 cx_attr_unused CxArrayReallocator *alloc |
|
42 ) { |
|
43 size_t n; |
|
44 if (cx_szmul(capacity, elem_size, &n)) { |
|
45 errno = EOVERFLOW; |
|
46 return NULL; |
|
47 } |
|
48 return realloc(array, n); |
|
49 } |
|
50 |
|
51 CxArrayReallocator cx_array_default_reallocator_impl = { |
|
52 cx_array_default_realloc, NULL, NULL, 0, 0 |
|
53 }; |
|
54 |
|
55 CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl; |
|
56 |
|
57 // Stack-aware array reallocator |
|
58 |
|
59 static void *cx_array_advanced_realloc( |
|
60 void *array, |
|
61 size_t capacity, |
|
62 size_t elem_size, |
|
63 cx_attr_unused CxArrayReallocator *alloc |
|
64 ) { |
|
65 // check for overflow |
|
66 size_t n; |
|
67 if (cx_szmul(capacity, elem_size, &n)) { |
|
68 errno = EOVERFLOW; |
|
69 return NULL; |
|
70 } |
|
71 |
|
72 // retrieve the pointer to the actual allocator |
|
73 const CxAllocator *al = alloc->ptr1; |
|
74 |
|
75 // check if the array is still located on the stack |
|
76 void *newmem; |
|
77 if (array == alloc->ptr2) { |
|
78 newmem = cxMalloc(al, n); |
|
79 if (newmem != NULL && array != NULL) { |
|
80 memcpy(newmem, array, n); |
|
81 } |
|
82 } else { |
|
83 newmem = cxRealloc(al, array, n); |
|
84 } |
|
85 return newmem; |
|
86 } |
|
87 |
|
88 struct cx_array_reallocator_s cx_array_reallocator( |
|
89 const struct cx_allocator_s *allocator, |
|
90 const void *stackmem |
|
91 ) { |
|
92 if (allocator == NULL) { |
|
93 allocator = cxDefaultAllocator; |
|
94 } |
|
95 return (struct cx_array_reallocator_s) { |
|
96 cx_array_advanced_realloc, |
|
97 (void*) allocator, (void*) stackmem, |
|
98 0, 0 |
|
99 }; |
|
100 } |
32 |
101 |
33 // LOW LEVEL ARRAY LIST FUNCTIONS |
102 // LOW LEVEL ARRAY LIST FUNCTIONS |
34 |
103 |
35 enum cx_array_copy_result cx_array_copy( |
104 static size_t cx_array_align_capacity( |
36 void **target, |
105 size_t cap, |
37 size_t *size, |
106 size_t alignment, |
38 size_t *capacity, |
107 size_t max |
39 size_t index, |
108 ) { |
40 void const *src, |
109 if (cap > max - alignment) { |
|
110 return cap; |
|
111 } else { |
|
112 return cap - (cap % alignment) + alignment; |
|
113 } |
|
114 } |
|
115 |
|
116 int cx_array_reserve( |
|
117 void **array, |
|
118 void *size, |
|
119 void *capacity, |
|
120 unsigned width, |
41 size_t elem_size, |
121 size_t elem_size, |
42 size_t elem_count, |
122 size_t elem_count, |
43 struct cx_array_reallocator_s *reallocator |
123 CxArrayReallocator *reallocator |
|
124 ) { |
|
125 // assert pointers |
|
126 assert(array != NULL); |
|
127 assert(size != NULL); |
|
128 assert(capacity != NULL); |
|
129 |
|
130 // default reallocator |
|
131 if (reallocator == NULL) { |
|
132 reallocator = cx_array_default_reallocator; |
|
133 } |
|
134 |
|
135 // determine size and capacity |
|
136 size_t oldcap; |
|
137 size_t oldsize; |
|
138 size_t max_size; |
|
139 if (width == 0 || width == sizeof(size_t)) { |
|
140 oldcap = *(size_t*) capacity; |
|
141 oldsize = *(size_t*) size; |
|
142 max_size = SIZE_MAX; |
|
143 } else if (width == sizeof(uint16_t)) { |
|
144 oldcap = *(uint16_t*) capacity; |
|
145 oldsize = *(uint16_t*) size; |
|
146 max_size = UINT16_MAX; |
|
147 } else if (width == sizeof(uint8_t)) { |
|
148 oldcap = *(uint8_t*) capacity; |
|
149 oldsize = *(uint8_t*) size; |
|
150 max_size = UINT8_MAX; |
|
151 } |
|
152 #if CX_WORDSIZE == 64 |
|
153 else if (width == sizeof(uint32_t)) { |
|
154 oldcap = *(uint32_t*) capacity; |
|
155 oldsize = *(uint32_t*) size; |
|
156 max_size = UINT32_MAX; |
|
157 } |
|
158 #endif |
|
159 else { |
|
160 errno = EINVAL; |
|
161 return 1; |
|
162 } |
|
163 |
|
164 // assert that the array is allocated when it has capacity |
|
165 assert(*array != NULL || oldcap == 0); |
|
166 |
|
167 // check for overflow |
|
168 if (elem_count > max_size - oldsize) { |
|
169 errno = EOVERFLOW; |
|
170 return 1; |
|
171 } |
|
172 |
|
173 // determine new capacity |
|
174 size_t newcap = oldsize + elem_count; |
|
175 |
|
176 // reallocate if possible |
|
177 if (newcap > oldcap) { |
|
178 // calculate new capacity (next number divisible by 16) |
|
179 newcap = cx_array_align_capacity(newcap, 16, max_size); |
|
180 |
|
181 // perform reallocation |
|
182 void *newmem = reallocator->realloc( |
|
183 *array, newcap, elem_size, reallocator |
|
184 ); |
|
185 if (newmem == NULL) { |
|
186 return 1; // LCOV_EXCL_LINE |
|
187 } |
|
188 |
|
189 // store new pointer |
|
190 *array = newmem; |
|
191 |
|
192 // store new capacity |
|
193 if (width == 0 || width == sizeof(size_t)) { |
|
194 *(size_t*) capacity = newcap; |
|
195 } else if (width == sizeof(uint16_t)) { |
|
196 *(uint16_t*) capacity = (uint16_t) newcap; |
|
197 } else if (width == sizeof(uint8_t)) { |
|
198 *(uint8_t*) capacity = (uint8_t) newcap; |
|
199 } |
|
200 #if CX_WORDSIZE == 64 |
|
201 else if (width == sizeof(uint32_t)) { |
|
202 *(uint32_t*) capacity = (uint32_t) newcap; |
|
203 } |
|
204 #endif |
|
205 } |
|
206 |
|
207 return 0; |
|
208 } |
|
209 |
|
210 int cx_array_copy( |
|
211 void **target, |
|
212 void *size, |
|
213 void *capacity, |
|
214 unsigned width, |
|
215 size_t index, |
|
216 const void *src, |
|
217 size_t elem_size, |
|
218 size_t elem_count, |
|
219 CxArrayReallocator *reallocator |
44 ) { |
220 ) { |
45 // assert pointers |
221 // assert pointers |
46 assert(target != NULL); |
222 assert(target != NULL); |
47 assert(size != NULL); |
223 assert(size != NULL); |
|
224 assert(capacity != NULL); |
48 assert(src != NULL); |
225 assert(src != NULL); |
49 |
226 |
50 // determine capacity |
227 // default reallocator |
51 size_t cap = capacity == NULL ? *size : *capacity; |
228 if (reallocator == NULL) { |
|
229 reallocator = cx_array_default_reallocator; |
|
230 } |
|
231 |
|
232 // determine size and capacity |
|
233 size_t oldcap; |
|
234 size_t oldsize; |
|
235 size_t max_size; |
|
236 if (width == 0 || width == sizeof(size_t)) { |
|
237 oldcap = *(size_t*) capacity; |
|
238 oldsize = *(size_t*) size; |
|
239 max_size = SIZE_MAX; |
|
240 } else if (width == sizeof(uint16_t)) { |
|
241 oldcap = *(uint16_t*) capacity; |
|
242 oldsize = *(uint16_t*) size; |
|
243 max_size = UINT16_MAX; |
|
244 } else if (width == sizeof(uint8_t)) { |
|
245 oldcap = *(uint8_t*) capacity; |
|
246 oldsize = *(uint8_t*) size; |
|
247 max_size = UINT8_MAX; |
|
248 } |
|
249 #if CX_WORDSIZE == 64 |
|
250 else if (width == sizeof(uint32_t)) { |
|
251 oldcap = *(uint32_t*) capacity; |
|
252 oldsize = *(uint32_t*) size; |
|
253 max_size = UINT32_MAX; |
|
254 } |
|
255 #endif |
|
256 else { |
|
257 errno = EINVAL; |
|
258 return 1; |
|
259 } |
|
260 |
|
261 // assert that the array is allocated when it has capacity |
|
262 assert(*target != NULL || oldcap == 0); |
|
263 |
|
264 // check for overflow |
|
265 if (index > max_size || elem_count > max_size - index) { |
|
266 errno = EOVERFLOW; |
|
267 return 1; |
|
268 } |
52 |
269 |
53 // check if resize is required |
270 // check if resize is required |
54 size_t minsize = index + elem_count; |
271 size_t minsize = index + elem_count; |
55 size_t newsize = *size < minsize ? minsize : *size; |
272 size_t newsize = oldsize < minsize ? minsize : oldsize; |
56 bool needrealloc = newsize > cap; |
|
57 |
273 |
58 // reallocate if possible |
274 // reallocate if possible |
59 if (needrealloc) { |
275 size_t newcap = oldcap; |
60 // a reallocator and a capacity variable must be available |
276 if (newsize > oldcap) { |
61 if (reallocator == NULL || capacity == NULL) { |
|
62 return CX_ARRAY_COPY_REALLOC_NOT_SUPPORTED; |
|
63 } |
|
64 |
|
65 // check, if we need to repair the src pointer |
277 // check, if we need to repair the src pointer |
66 uintptr_t targetaddr = (uintptr_t) *target; |
278 uintptr_t targetaddr = (uintptr_t) *target; |
67 uintptr_t srcaddr = (uintptr_t) src; |
279 uintptr_t srcaddr = (uintptr_t) src; |
68 bool repairsrc = targetaddr <= srcaddr |
280 bool repairsrc = targetaddr <= srcaddr |
69 && srcaddr < targetaddr + cap * elem_size; |
281 && srcaddr < targetaddr + oldcap * elem_size; |
70 |
282 |
71 // calculate new capacity (next number divisible by 16) |
283 // calculate new capacity (next number divisible by 16) |
72 cap = newsize - (newsize % 16) + 16; |
284 newcap = cx_array_align_capacity(newsize, 16, max_size); |
73 assert(cap > newsize); |
285 assert(newcap > newsize); |
74 |
286 |
75 // perform reallocation |
287 // perform reallocation |
76 void *newmem = reallocator->realloc( |
288 void *newmem = reallocator->realloc( |
77 *target, cap, elem_size, reallocator |
289 *target, newcap, elem_size, reallocator |
78 ); |
290 ); |
79 if (newmem == NULL) { |
291 if (newmem == NULL) { |
80 return CX_ARRAY_COPY_REALLOC_FAILED; |
292 return 1; |
81 } |
293 } |
82 |
294 |
83 // repair src pointer, if necessary |
295 // repair src pointer, if necessary |
84 if (repairsrc) { |
296 if (repairsrc) { |
85 src = ((char *) newmem) + (srcaddr - targetaddr); |
297 src = ((char *) newmem) + (srcaddr - targetaddr); |
86 } |
298 } |
87 |
299 |
88 // store new pointer and capacity |
300 // store new pointer |
89 *target = newmem; |
301 *target = newmem; |
90 *capacity = cap; |
|
91 } |
302 } |
92 |
303 |
93 // determine target pointer |
304 // determine target pointer |
94 char *start = *target; |
305 char *start = *target; |
95 start += index * elem_size; |
306 start += index * elem_size; |
96 |
307 |
97 // copy elements and set new size |
308 // copy elements and set new size |
|
309 // note: no overflow check here, b/c we cannot get here w/o allocation |
98 memmove(start, src, elem_count * elem_size); |
310 memmove(start, src, elem_count * elem_size); |
99 *size = newsize; |
311 |
|
312 // if any of size or capacity changed, store them back |
|
313 if (newsize != oldsize || newcap != oldcap) { |
|
314 if (width == 0 || width == sizeof(size_t)) { |
|
315 *(size_t*) capacity = newcap; |
|
316 *(size_t*) size = newsize; |
|
317 } else if (width == sizeof(uint16_t)) { |
|
318 *(uint16_t*) capacity = (uint16_t) newcap; |
|
319 *(uint16_t*) size = (uint16_t) newsize; |
|
320 } else if (width == sizeof(uint8_t)) { |
|
321 *(uint8_t*) capacity = (uint8_t) newcap; |
|
322 *(uint8_t*) size = (uint8_t) newsize; |
|
323 } |
|
324 #if CX_WORDSIZE == 64 |
|
325 else if (width == sizeof(uint32_t)) { |
|
326 *(uint32_t*) capacity = (uint32_t) newcap; |
|
327 *(uint32_t*) size = (uint32_t) newsize; |
|
328 } |
|
329 #endif |
|
330 } |
100 |
331 |
101 // return successfully |
332 // return successfully |
102 return CX_ARRAY_COPY_SUCCESS; |
333 return 0; |
|
334 } |
|
335 |
|
336 int cx_array_insert_sorted( |
|
337 void **target, |
|
338 size_t *size, |
|
339 size_t *capacity, |
|
340 cx_compare_func cmp_func, |
|
341 const void *sorted_data, |
|
342 size_t elem_size, |
|
343 size_t elem_count, |
|
344 CxArrayReallocator *reallocator |
|
345 ) { |
|
346 // assert pointers |
|
347 assert(target != NULL); |
|
348 assert(size != NULL); |
|
349 assert(capacity != NULL); |
|
350 assert(cmp_func != NULL); |
|
351 assert(sorted_data != NULL); |
|
352 |
|
353 // default reallocator |
|
354 if (reallocator == NULL) { |
|
355 reallocator = cx_array_default_reallocator; |
|
356 } |
|
357 |
|
358 // corner case |
|
359 if (elem_count == 0) return 0; |
|
360 |
|
361 // overflow check |
|
362 if (elem_count > SIZE_MAX - *size) { |
|
363 errno = EOVERFLOW; |
|
364 return 1; |
|
365 } |
|
366 |
|
367 // store some counts |
|
368 size_t old_size = *size; |
|
369 size_t needed_capacity = old_size + elem_count; |
|
370 |
|
371 // if we need more than we have, try a reallocation |
|
372 if (needed_capacity > *capacity) { |
|
373 size_t new_capacity = cx_array_align_capacity(needed_capacity, 16, SIZE_MAX); |
|
374 void *new_mem = reallocator->realloc( |
|
375 *target, new_capacity, elem_size, reallocator |
|
376 ); |
|
377 if (new_mem == NULL) { |
|
378 // give it up right away, there is no contract |
|
379 // that requires us to insert as much as we can |
|
380 return 1; // LCOV_EXCL_LINE |
|
381 } |
|
382 *target = new_mem; |
|
383 *capacity = new_capacity; |
|
384 } |
|
385 |
|
386 // now we have guaranteed that we can insert everything |
|
387 size_t new_size = old_size + elem_count; |
|
388 *size = new_size; |
|
389 |
|
390 // declare the source and destination indices/pointers |
|
391 size_t si = 0, di = 0; |
|
392 const char *src = sorted_data; |
|
393 char *dest = *target; |
|
394 |
|
395 // find the first insertion point |
|
396 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); |
|
397 dest += di * elem_size; |
|
398 |
|
399 // move the remaining elements in the array completely to the right |
|
400 // we will call it the "buffer" for parked elements |
|
401 size_t buf_size = old_size - di; |
|
402 size_t bi = new_size - buf_size; |
|
403 char *bptr = ((char *) *target) + bi * elem_size; |
|
404 memmove(bptr, dest, buf_size * elem_size); |
|
405 |
|
406 // while there are both source and buffered elements left, |
|
407 // copy them interleaving |
|
408 while (si < elem_count && bi < new_size) { |
|
409 // determine how many source elements can be inserted |
|
410 size_t copy_len, bytes_copied; |
|
411 copy_len = cx_array_binary_search_sup( |
|
412 src, |
|
413 elem_count - si, |
|
414 elem_size, |
|
415 bptr, |
|
416 cmp_func |
|
417 ); |
|
418 |
|
419 // copy the source elements |
|
420 bytes_copied = copy_len * elem_size; |
|
421 memcpy(dest, src, bytes_copied); |
|
422 dest += bytes_copied; |
|
423 src += bytes_copied; |
|
424 si += copy_len; |
|
425 |
|
426 // when all source elements are in place, we are done |
|
427 if (si >= elem_count) break; |
|
428 |
|
429 // determine how many buffered elements need to be restored |
|
430 copy_len = cx_array_binary_search_sup( |
|
431 bptr, |
|
432 new_size - bi, |
|
433 elem_size, |
|
434 src, |
|
435 cmp_func |
|
436 ); |
|
437 |
|
438 // restore the buffered elements |
|
439 bytes_copied = copy_len * elem_size; |
|
440 memmove(dest, bptr, bytes_copied); |
|
441 dest += bytes_copied; |
|
442 bptr += bytes_copied; |
|
443 bi += copy_len; |
|
444 } |
|
445 |
|
446 // still source elements left? simply append them |
|
447 if (si < elem_count) { |
|
448 memcpy(dest, src, elem_size * (elem_count - si)); |
|
449 } |
|
450 |
|
451 // still buffer elements left? |
|
452 // don't worry, we already moved them to the correct place |
|
453 |
|
454 return 0; |
|
455 } |
|
456 |
|
457 size_t cx_array_binary_search_inf( |
|
458 const void *arr, |
|
459 size_t size, |
|
460 size_t elem_size, |
|
461 const void *elem, |
|
462 cx_compare_func cmp_func |
|
463 ) { |
|
464 // special case: empty array |
|
465 if (size == 0) return 0; |
|
466 |
|
467 // declare a variable that will contain the compare results |
|
468 int result; |
|
469 |
|
470 // cast the array pointer to something we can use offsets with |
|
471 const char *array = arr; |
|
472 |
|
473 // check the first array element |
|
474 result = cmp_func(elem, array); |
|
475 if (result < 0) { |
|
476 return size; |
|
477 } else if (result == 0) { |
|
478 return 0; |
|
479 } |
|
480 |
|
481 // special case: there is only one element and that is smaller |
|
482 if (size == 1) return 0; |
|
483 |
|
484 // check the last array element |
|
485 result = cmp_func(elem, array + elem_size * (size - 1)); |
|
486 if (result >= 0) { |
|
487 return size - 1; |
|
488 } |
|
489 |
|
490 // the element is now guaranteed to be somewhere in the list |
|
491 // so start the binary search |
|
492 size_t left_index = 1; |
|
493 size_t right_index = size - 1; |
|
494 size_t pivot_index; |
|
495 |
|
496 while (left_index <= right_index) { |
|
497 pivot_index = left_index + (right_index - left_index) / 2; |
|
498 const char *arr_elem = array + pivot_index * elem_size; |
|
499 result = cmp_func(elem, arr_elem); |
|
500 if (result == 0) { |
|
501 // found it! |
|
502 return pivot_index; |
|
503 } else if (result < 0) { |
|
504 // element is smaller than pivot, continue search left |
|
505 right_index = pivot_index - 1; |
|
506 } else { |
|
507 // element is larger than pivot, continue search right |
|
508 left_index = pivot_index + 1; |
|
509 } |
|
510 } |
|
511 |
|
512 // report the largest upper bound |
|
513 return result < 0 ? (pivot_index - 1) : pivot_index; |
|
514 } |
|
515 |
|
516 size_t cx_array_binary_search( |
|
517 const void *arr, |
|
518 size_t size, |
|
519 size_t elem_size, |
|
520 const void *elem, |
|
521 cx_compare_func cmp_func |
|
522 ) { |
|
523 size_t index = cx_array_binary_search_inf( |
|
524 arr, size, elem_size, elem, cmp_func |
|
525 ); |
|
526 if (index < size && |
|
527 cmp_func(((const char *) arr) + index * elem_size, elem) == 0) { |
|
528 return index; |
|
529 } else { |
|
530 return size; |
|
531 } |
|
532 } |
|
533 |
|
534 size_t cx_array_binary_search_sup( |
|
535 const void *arr, |
|
536 size_t size, |
|
537 size_t elem_size, |
|
538 const void *elem, |
|
539 cx_compare_func cmp_func |
|
540 ) { |
|
541 size_t inf = cx_array_binary_search_inf( |
|
542 arr, size, elem_size, elem, cmp_func |
|
543 ); |
|
544 if (inf == size) { |
|
545 // no infimum means, first element is supremum |
|
546 return 0; |
|
547 } else if (cmp_func(((const char *) arr) + inf * elem_size, elem) == 0) { |
|
548 return inf; |
|
549 } else { |
|
550 return inf + 1; |
|
551 } |
103 } |
552 } |
104 |
553 |
105 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
554 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
106 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
555 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
107 #endif |
556 #endif |
|
557 const unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE; |
108 |
558 |
109 void cx_array_swap( |
559 void cx_array_swap( |
110 void *arr, |
560 void *arr, |
111 size_t elem_size, |
561 size_t elem_size, |
112 size_t idx1, |
562 size_t idx1, |
226 // note that if we had to move the elements, the following operation |
664 // note that if we had to move the elements, the following operation |
227 // is guaranteed to succeed, because we have the memory already allocated |
665 // is guaranteed to succeed, because we have the memory already allocated |
228 // therefore, it is impossible to leave this function with an invalid array |
666 // therefore, it is impossible to leave this function with an invalid array |
229 |
667 |
230 // place the new elements |
668 // place the new elements |
231 if (CX_ARRAY_COPY_SUCCESS == cx_array_copy( |
669 if (cx_array_copy( |
232 &arl->data, |
670 &arl->data, |
233 &list->size, |
671 &list->collection.size, |
234 &arl->capacity, |
672 &arl->capacity, |
|
673 0, |
235 index, |
674 index, |
236 array, |
675 array, |
237 list->item_size, |
676 list->collection.elem_size, |
238 n, |
677 n, |
239 &arl->reallocator |
678 &arl->reallocator |
240 )) { |
679 )) { |
241 return n; |
|
242 } else { |
|
243 // array list implementation is "all or nothing" |
680 // array list implementation is "all or nothing" |
244 return 0; |
681 return 0; |
|
682 } else { |
|
683 return n; |
|
684 } |
|
685 } |
|
686 |
|
687 static size_t cx_arl_insert_sorted( |
|
688 struct cx_list_s *list, |
|
689 const void *sorted_data, |
|
690 size_t n |
|
691 ) { |
|
692 // get a correctly typed pointer to the list |
|
693 cx_array_list *arl = (cx_array_list *) list; |
|
694 |
|
695 if (cx_array_insert_sorted( |
|
696 &arl->data, |
|
697 &list->collection.size, |
|
698 &arl->capacity, |
|
699 list->collection.cmpfunc, |
|
700 sorted_data, |
|
701 list->collection.elem_size, |
|
702 n, |
|
703 &arl->reallocator |
|
704 )) { |
|
705 // array list implementation is "all or nothing" |
|
706 return 0; |
|
707 } else { |
|
708 return n; |
245 } |
709 } |
246 } |
710 } |
247 |
711 |
248 static int cx_arl_insert_element( |
712 static int cx_arl_insert_element( |
249 struct cx_list_s *list, |
713 struct cx_list_s *list, |
250 size_t index, |
714 size_t index, |
251 void const *element |
715 const void *element |
252 ) { |
716 ) { |
253 return 1 != cx_arl_insert_array(list, index, element, 1); |
717 return 1 != cx_arl_insert_array(list, index, element, 1); |
254 } |
718 } |
255 |
719 |
256 static int cx_arl_insert_iter( |
720 static int cx_arl_insert_iter( |
257 struct cx_mut_iterator_s *iter, |
721 struct cx_iterator_s *iter, |
258 void const *elem, |
722 const void *elem, |
259 int prepend |
723 int prepend |
260 ) { |
724 ) { |
261 struct cx_list_s *list = iter->src_handle; |
725 struct cx_list_s *list = iter->src_handle.m; |
262 if (iter->index < list->size) { |
726 if (iter->index < list->collection.size) { |
263 int result = cx_arl_insert_element( |
727 int result = cx_arl_insert_element( |
264 list, |
728 list, |
265 iter->index + 1 - prepend, |
729 iter->index + 1 - prepend, |
266 elem |
730 elem |
267 ); |
731 ); |
268 if (result == 0 && prepend != 0) { |
732 if (result == 0) { |
269 iter->index++; |
733 iter->elem_count++; |
270 iter->elem_handle = ((char *) iter->elem_handle) + list->item_size; |
734 if (prepend != 0) { |
|
735 iter->index++; |
|
736 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size; |
|
737 } |
271 } |
738 } |
272 return result; |
739 return result; |
273 } else { |
740 } else { |
274 int result = cx_arl_insert_element(list, list->size, elem); |
741 int result = cx_arl_insert_element(list, list->collection.size, elem); |
275 iter->index = list->size; |
742 if (result == 0) { |
|
743 iter->elem_count++; |
|
744 iter->index = list->collection.size; |
|
745 } |
276 return result; |
746 return result; |
277 } |
747 } |
278 } |
748 } |
279 |
749 |
280 static int cx_arl_remove( |
750 static size_t cx_arl_remove( |
281 struct cx_list_s *list, |
751 struct cx_list_s *list, |
282 size_t index |
752 size_t index, |
|
753 size_t num, |
|
754 void *targetbuf |
283 ) { |
755 ) { |
284 cx_array_list *arl = (cx_array_list *) list; |
756 cx_array_list *arl = (cx_array_list *) list; |
285 |
757 |
286 // out-of-bounds check |
758 // out-of-bounds check |
287 if (index >= list->size) { |
759 size_t remove; |
288 return 1; |
760 if (index >= list->collection.size) { |
289 } |
761 remove = 0; |
290 |
762 } else if (index + num > list->collection.size) { |
291 // content destruction |
763 remove = list->collection.size - index; |
292 cx_invoke_destructor(list, ((char *) arl->data) + index * list->item_size); |
764 } else { |
293 |
765 remove = num; |
294 // short-circuit removal of last element |
766 } |
295 if (index == list->size - 1) { |
767 |
296 list->size--; |
768 // easy exit |
297 return 0; |
769 if (remove == 0) return 0; |
298 } |
770 |
299 |
771 // destroy or copy contents |
300 // just move the elements starting at index to the left |
772 if (targetbuf == NULL) { |
301 int result = cx_array_copy( |
773 for (size_t idx = index; idx < index + remove; idx++) { |
|
774 cx_invoke_destructor( |
|
775 list, |
|
776 ((char *) arl->data) + idx * list->collection.elem_size |
|
777 ); |
|
778 } |
|
779 } else { |
|
780 memcpy( |
|
781 targetbuf, |
|
782 ((char *) arl->data) + index * list->collection.elem_size, |
|
783 remove * list->collection.elem_size |
|
784 ); |
|
785 } |
|
786 |
|
787 // short-circuit removal of last elements |
|
788 if (index + remove == list->collection.size) { |
|
789 list->collection.size -= remove; |
|
790 return remove; |
|
791 } |
|
792 |
|
793 // just move the elements to the left |
|
794 cx_array_copy( |
302 &arl->data, |
795 &arl->data, |
303 &list->size, |
796 &list->collection.size, |
304 &arl->capacity, |
797 &arl->capacity, |
|
798 0, |
305 index, |
799 index, |
306 ((char *) arl->data) + (index + 1) * list->item_size, |
800 ((char *) arl->data) + (index + remove) * list->collection.elem_size, |
307 list->item_size, |
801 list->collection.elem_size, |
308 list->size - index - 1, |
802 list->collection.size - index - remove, |
309 &arl->reallocator |
803 &arl->reallocator |
310 ); |
804 ); |
311 if (result == 0) { |
805 |
312 // decrease the size |
806 // decrease the size |
313 list->size--; |
807 list->collection.size -= remove; |
314 } |
808 |
315 return result; |
809 return remove; |
316 } |
810 } |
317 |
811 |
318 static void cx_arl_clear(struct cx_list_s *list) { |
812 static void cx_arl_clear(struct cx_list_s *list) { |
319 if (list->size == 0) return; |
813 if (list->collection.size == 0) return; |
320 |
814 |
321 cx_array_list *arl = (cx_array_list *) list; |
815 cx_array_list *arl = (cx_array_list *) list; |
322 char *ptr = arl->data; |
816 char *ptr = arl->data; |
323 |
817 |
324 if (list->simple_destructor) { |
818 if (list->collection.simple_destructor) { |
325 for (size_t i = 0; i < list->size; i++) { |
819 for (size_t i = 0; i < list->collection.size; i++) { |
326 cx_invoke_simple_destructor(list, ptr); |
820 cx_invoke_simple_destructor(list, ptr); |
327 ptr += list->item_size; |
821 ptr += list->collection.elem_size; |
328 } |
822 } |
329 } |
823 } |
330 if (list->advanced_destructor) { |
824 if (list->collection.advanced_destructor) { |
331 for (size_t i = 0; i < list->size; i++) { |
825 for (size_t i = 0; i < list->collection.size; i++) { |
332 cx_invoke_advanced_destructor(list, ptr); |
826 cx_invoke_advanced_destructor(list, ptr); |
333 ptr += list->item_size; |
827 ptr += list->collection.elem_size; |
334 } |
828 } |
335 } |
829 } |
336 |
830 |
337 memset(arl->data, 0, list->size * list->item_size); |
831 memset(arl->data, 0, list->collection.size * list->collection.elem_size); |
338 list->size = 0; |
832 list->collection.size = 0; |
339 } |
833 } |
340 |
834 |
341 static int cx_arl_swap( |
835 static int cx_arl_swap( |
342 struct cx_list_s *list, |
836 struct cx_list_s *list, |
343 size_t i, |
837 size_t i, |
344 size_t j |
838 size_t j |
345 ) { |
839 ) { |
346 if (i >= list->size || j >= list->size) return 1; |
840 if (i >= list->collection.size || j >= list->collection.size) return 1; |
347 cx_array_list *arl = (cx_array_list *) list; |
841 cx_array_list *arl = (cx_array_list *) list; |
348 cx_array_swap(arl->data, list->item_size, i, j); |
842 cx_array_swap(arl->data, list->collection.elem_size, i, j); |
349 return 0; |
843 return 0; |
350 } |
844 } |
351 |
845 |
352 static void *cx_arl_at( |
846 static void *cx_arl_at( |
353 struct cx_list_s const *list, |
847 const struct cx_list_s *list, |
354 size_t index |
848 size_t index |
355 ) { |
849 ) { |
356 if (index < list->size) { |
850 if (index < list->collection.size) { |
357 cx_array_list const *arl = (cx_array_list const *) list; |
851 const cx_array_list *arl = (const cx_array_list *) list; |
358 char *space = arl->data; |
852 char *space = arl->data; |
359 return space + index * list->item_size; |
853 return space + index * list->collection.elem_size; |
360 } else { |
854 } else { |
361 return NULL; |
855 return NULL; |
362 } |
856 } |
363 } |
857 } |
364 |
858 |
365 static ssize_t cx_arl_find( |
859 static size_t cx_arl_find_remove( |
366 struct cx_list_s const *list, |
860 struct cx_list_s *list, |
367 void const *elem |
861 const void *elem, |
368 ) { |
862 bool remove |
369 assert(list->cmpfunc != NULL); |
863 ) { |
370 assert(list->size < SIZE_MAX / 2); |
864 assert(list != NULL); |
371 char *cur = ((cx_array_list const *) list)->data; |
865 assert(list->collection.cmpfunc != NULL); |
372 |
866 if (list->collection.size == 0) return 0; |
373 for (ssize_t i = 0; i < (ssize_t) list->size; i++) { |
867 char *cur = ((const cx_array_list *) list)->data; |
374 if (0 == list->cmpfunc(elem, cur)) { |
868 |
|
869 // optimize with binary search, when sorted |
|
870 if (list->collection.sorted) { |
|
871 size_t i = cx_array_binary_search( |
|
872 cur, |
|
873 list->collection.size, |
|
874 list->collection.elem_size, |
|
875 elem, |
|
876 list->collection.cmpfunc |
|
877 ); |
|
878 if (remove && i < list->collection.size) { |
|
879 cx_arl_remove(list, i, 1, NULL); |
|
880 } |
|
881 return i; |
|
882 } |
|
883 |
|
884 // fallback: linear search |
|
885 for (size_t i = 0; i < list->collection.size; i++) { |
|
886 if (0 == list->collection.cmpfunc(elem, cur)) { |
|
887 if (remove) { |
|
888 cx_arl_remove(list, i, 1, NULL); |
|
889 } |
375 return i; |
890 return i; |
376 } |
891 } |
377 cur += list->item_size; |
892 cur += list->collection.elem_size; |
378 } |
893 } |
379 |
894 return list->collection.size; |
380 return -1; |
|
381 } |
895 } |
382 |
896 |
383 static void cx_arl_sort(struct cx_list_s *list) { |
897 static void cx_arl_sort(struct cx_list_s *list) { |
384 assert(list->cmpfunc != NULL); |
898 assert(list->collection.cmpfunc != NULL); |
385 qsort(((cx_array_list *) list)->data, |
899 qsort(((cx_array_list *) list)->data, |
386 list->size, |
900 list->collection.size, |
387 list->item_size, |
901 list->collection.elem_size, |
388 list->cmpfunc |
902 list->collection.cmpfunc |
389 ); |
903 ); |
390 } |
904 } |
391 |
905 |
392 static int cx_arl_compare( |
906 static int cx_arl_compare( |
393 struct cx_list_s const *list, |
907 const struct cx_list_s *list, |
394 struct cx_list_s const *other |
908 const struct cx_list_s *other |
395 ) { |
909 ) { |
396 assert(list->cmpfunc != NULL); |
910 assert(list->collection.cmpfunc != NULL); |
397 if (list->size == other->size) { |
911 if (list->collection.size == other->collection.size) { |
398 char const *left = ((cx_array_list const *) list)->data; |
912 const char *left = ((const cx_array_list *) list)->data; |
399 char const *right = ((cx_array_list const *) other)->data; |
913 const char *right = ((const cx_array_list *) other)->data; |
400 for (size_t i = 0; i < list->size; i++) { |
914 for (size_t i = 0; i < list->collection.size; i++) { |
401 int d = list->cmpfunc(left, right); |
915 int d = list->collection.cmpfunc(left, right); |
402 if (d != 0) { |
916 if (d != 0) { |
403 return d; |
917 return d; |
404 } |
918 } |
405 left += list->item_size; |
919 left += list->collection.elem_size; |
406 right += other->item_size; |
920 right += other->collection.elem_size; |
407 } |
921 } |
408 return 0; |
922 return 0; |
409 } else { |
923 } else { |
410 return list->size < other->size ? -1 : 1; |
924 return list->collection.size < other->collection.size ? -1 : 1; |
411 } |
925 } |
412 } |
926 } |
413 |
927 |
414 static void cx_arl_reverse(struct cx_list_s *list) { |
928 static void cx_arl_reverse(struct cx_list_s *list) { |
415 if (list->size < 2) return; |
929 if (list->collection.size < 2) return; |
416 void *data = ((cx_array_list const *) list)->data; |
930 void *data = ((const cx_array_list *) list)->data; |
417 size_t half = list->size / 2; |
931 size_t half = list->collection.size / 2; |
418 for (size_t i = 0; i < half; i++) { |
932 for (size_t i = 0; i < half; i++) { |
419 cx_array_swap(data, list->item_size, i, list->size - 1 - i); |
933 cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i); |
420 } |
934 } |
421 } |
935 } |
422 |
936 |
423 static bool cx_arl_iter_valid(void const *it) { |
937 static bool cx_arl_iter_valid(const void *it) { |
424 struct cx_iterator_s const *iter = it; |
938 const struct cx_iterator_s *iter = it; |
425 struct cx_list_s const *list = iter->src_handle; |
939 const struct cx_list_s *list = iter->src_handle.c; |
426 return iter->index < list->size; |
940 return iter->index < list->collection.size; |
427 } |
941 } |
428 |
942 |
429 static void *cx_arl_iter_current(void const *it) { |
943 static void *cx_arl_iter_current(const void *it) { |
430 struct cx_iterator_s const *iter = it; |
944 const struct cx_iterator_s *iter = it; |
431 return iter->elem_handle; |
945 return iter->elem_handle; |
432 } |
946 } |
433 |
947 |
434 static void cx_arl_iter_next(void *it) { |
948 static void cx_arl_iter_next(void *it) { |
435 struct cx_iterator_base_s *itbase = it; |
949 struct cx_iterator_s *iter = it; |
436 if (itbase->remove) { |
950 if (iter->base.remove) { |
437 struct cx_mut_iterator_s *iter = it; |
951 iter->base.remove = false; |
438 itbase->remove = false; |
952 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); |
439 cx_arl_remove(iter->src_handle, iter->index); |
953 } else { |
440 } else { |
|
441 struct cx_iterator_s *iter = it; |
|
442 iter->index++; |
954 iter->index++; |
443 iter->elem_handle = |
955 iter->elem_handle = |
444 ((char *) iter->elem_handle) |
956 ((char *) iter->elem_handle) |
445 + ((struct cx_list_s const *) iter->src_handle)->item_size; |
957 + ((const struct cx_list_s *) iter->src_handle.c)->collection.elem_size; |
446 } |
958 } |
447 } |
959 } |
448 |
960 |
449 static void cx_arl_iter_prev(void *it) { |
961 static void cx_arl_iter_prev(void *it) { |
450 struct cx_iterator_base_s *itbase = it; |
962 struct cx_iterator_s *iter = it; |
451 struct cx_mut_iterator_s *iter = it; |
963 const cx_array_list *list = iter->src_handle.c; |
452 cx_array_list *const list = iter->src_handle; |
964 if (iter->base.remove) { |
453 if (itbase->remove) { |
965 iter->base.remove = false; |
454 itbase->remove = false; |
966 cx_arl_remove(iter->src_handle.m, iter->index, 1, NULL); |
455 cx_arl_remove(iter->src_handle, iter->index); |
|
456 } |
967 } |
457 iter->index--; |
968 iter->index--; |
458 if (iter->index < list->base.size) { |
969 if (iter->index < list->base.collection.size) { |
459 iter->elem_handle = ((char *) list->data) |
970 iter->elem_handle = ((char *) list->data) |
460 + iter->index * list->base.item_size; |
971 + iter->index * list->base.collection.elem_size; |
461 } |
972 } |
462 } |
973 } |
463 |
974 |
464 static bool cx_arl_iter_flag_rm(void *it) { |
|
465 struct cx_iterator_base_s *iter = it; |
|
466 if (iter->mutating) { |
|
467 iter->remove = true; |
|
468 return true; |
|
469 } else { |
|
470 return false; |
|
471 } |
|
472 } |
|
473 |
975 |
474 static struct cx_iterator_s cx_arl_iterator( |
976 static struct cx_iterator_s cx_arl_iterator( |
475 struct cx_list_s const *list, |
977 const struct cx_list_s *list, |
476 size_t index, |
978 size_t index, |
477 bool backwards |
979 bool backwards |
478 ) { |
980 ) { |
479 struct cx_iterator_s iter; |
981 struct cx_iterator_s iter; |
480 |
982 |
481 iter.index = index; |
983 iter.index = index; |
482 iter.src_handle = list; |
984 iter.src_handle.c = list; |
483 iter.elem_handle = cx_arl_at(list, index); |
985 iter.elem_handle = cx_arl_at(list, index); |
|
986 iter.elem_size = list->collection.elem_size; |
|
987 iter.elem_count = list->collection.size; |
484 iter.base.valid = cx_arl_iter_valid; |
988 iter.base.valid = cx_arl_iter_valid; |
485 iter.base.current = cx_arl_iter_current; |
989 iter.base.current = cx_arl_iter_current; |
486 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; |
990 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next; |
487 iter.base.flag_removal = cx_arl_iter_flag_rm; |
|
488 iter.base.remove = false; |
991 iter.base.remove = false; |
489 iter.base.mutating = false; |
992 iter.base.mutating = false; |
490 |
993 |
491 return iter; |
994 return iter; |
492 } |
995 } |
493 |
996 |
494 static cx_list_class cx_array_list_class = { |
997 static cx_list_class cx_array_list_class = { |
495 cx_arl_destructor, |
998 cx_arl_destructor, |
496 cx_arl_insert_element, |
999 cx_arl_insert_element, |
497 cx_arl_insert_array, |
1000 cx_arl_insert_array, |
|
1001 cx_arl_insert_sorted, |
498 cx_arl_insert_iter, |
1002 cx_arl_insert_iter, |
499 cx_arl_remove, |
1003 cx_arl_remove, |
500 cx_arl_clear, |
1004 cx_arl_clear, |
501 cx_arl_swap, |
1005 cx_arl_swap, |
502 cx_arl_at, |
1006 cx_arl_at, |
503 cx_arl_find, |
1007 cx_arl_find_remove, |
504 cx_arl_sort, |
1008 cx_arl_sort, |
505 cx_arl_compare, |
1009 cx_arl_compare, |
506 cx_arl_reverse, |
1010 cx_arl_reverse, |
507 cx_arl_iterator, |
1011 cx_arl_iterator, |
508 }; |
1012 }; |
509 |
1013 |
510 CxList *cxArrayListCreate( |
1014 CxList *cxArrayListCreate( |
511 CxAllocator const *allocator, |
1015 const CxAllocator *allocator, |
512 cx_compare_func comparator, |
1016 cx_compare_func comparator, |
513 size_t item_size, |
1017 size_t elem_size, |
514 size_t initial_capacity |
1018 size_t initial_capacity |
515 ) { |
1019 ) { |
516 if (allocator == NULL) { |
1020 if (allocator == NULL) { |
517 allocator = cxDefaultAllocator; |
1021 allocator = cxDefaultAllocator; |
518 } |
1022 } |
519 |
1023 |
520 cx_array_list *list = cxCalloc(allocator, 1, sizeof(cx_array_list)); |
1024 cx_array_list *list = cxCalloc(allocator, 1, sizeof(cx_array_list)); |
521 if (list == NULL) return NULL; |
1025 if (list == NULL) return NULL; |
522 |
1026 cx_list_init((CxList*)list, &cx_array_list_class, |
523 list->base.cl = &cx_array_list_class; |
1027 allocator, comparator, elem_size); |
524 list->base.allocator = allocator; |
|
525 list->base.cmpfunc = comparator; |
|
526 list->capacity = initial_capacity; |
1028 list->capacity = initial_capacity; |
527 |
1029 |
528 if (item_size > 0) { |
1030 // allocate the array after the real elem_size is known |
529 list->base.item_size = item_size; |
1031 list->data = cxCalloc(allocator, initial_capacity, |
530 } else { |
1032 list->base.collection.elem_size); |
531 item_size = sizeof(void *); |
1033 if (list->data == NULL) { // LCOV_EXCL_START |
532 cxListStorePointers((CxList *) list); |
|
533 } |
|
534 |
|
535 // allocate the array after the real item_size is known |
|
536 list->data = cxCalloc(allocator, initial_capacity, item_size); |
|
537 if (list->data == NULL) { |
|
538 cxFree(allocator, list); |
1034 cxFree(allocator, list); |
539 return NULL; |
1035 return NULL; |
540 } |
1036 } // LCOV_EXCL_STOP |
541 |
1037 |
542 // configure the reallocator |
1038 // configure the reallocator |
543 list->reallocator.realloc = cx_arl_realloc; |
1039 list->reallocator = cx_array_reallocator(allocator, NULL); |
544 list->reallocator.ptr1 = (void *) allocator; |
|
545 |
1040 |
546 return (CxList *) list; |
1041 return (CxList *) list; |
547 } |
1042 } |