| 126 else if (cap < 8192) alignment = 512; |
60 else if (cap < 8192) alignment = 512; |
| 127 else alignment = 1024; |
61 else alignment = 1024; |
| 128 return cap - (cap % alignment) + alignment; |
62 return cap - (cap % alignment) + alignment; |
| 129 } |
63 } |
| 130 |
64 |
| 131 int cx_array_reserve( |
65 int cx_array_init_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
| 132 void **array, |
66 memset(array, 0, sizeof(CxArray)); |
| 133 void *size, |
67 return cx_array_reserve_(allocator, array, elem_size, capacity); |
| 134 void *capacity, |
68 } |
| 135 unsigned width, |
69 |
| |
70 void cx_array_init_fixed_(CxArray *array, const void *data, size_t capacity, size_t size) { |
| |
71 array->data = (void*) data; |
| |
72 array->capacity = capacity; |
| |
73 array->size = size; |
| |
74 } |
| |
75 |
| |
76 int cx_array_reserve_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
| |
77 if (cxReallocateArray(allocator, &array->data, capacity, elem_size)) { |
| |
78 return -1; // LCOV_EXCL_LINE |
| |
79 } |
| |
80 array->capacity = capacity; |
| |
81 if (array->size > capacity) { |
| |
82 array->size = capacity; |
| |
83 } |
| |
84 return 0; |
| |
85 } |
| |
86 |
| |
87 int cx_array_copy_to_new_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
| |
88 CxArray heap_array; |
| |
89 if (cx_array_init_(allocator, &heap_array, elem_size, capacity)) { |
| |
90 return -1; // LCOV_EXCL_LINE |
| |
91 } |
| |
92 heap_array.size = array->size; |
| |
93 memcpy(heap_array.data, array->data, elem_size * array->size); |
| |
94 *array = heap_array; |
| |
95 return 0; |
| |
96 } |
| |
97 |
| |
98 int cx_array_insert_(const CxAllocator *allocator, CxArray *array, |
| |
99 size_t elem_size, size_t index, const void *other, size_t n) { |
| |
100 // out of bounds and special case check |
| |
101 if (index > array->size) return -1; |
| |
102 if (n == 0) return 0; |
| |
103 |
| |
104 // guarantee enough capacity |
| |
105 if (array->capacity < array->size + n) { |
| |
106 const size_t new_capacity = cx_array_grow_capacity(array->capacity,array->size + n); |
| |
107 if (cxReallocateArray(allocator, &array->data, new_capacity, elem_size)) { |
| |
108 return -1; // LCOV_EXCL_LINE |
| |
109 } |
| |
110 array->capacity = new_capacity; |
| |
111 } |
| |
112 |
| |
113 // determine insert position |
| |
114 char *dst = array->data; |
| |
115 dst += index * elem_size; |
| |
116 |
| |
117 // do we need to move some elements? |
| |
118 if (index < array->size) { |
| |
119 size_t elems_to_move = array->size - index; |
| |
120 char *target = dst + n * elem_size; |
| |
121 memmove(target, dst, elems_to_move * elem_size); |
| |
122 } |
| |
123 |
| |
124 // place the new elements, if any |
| |
125 // otherwise, this function just reserved the memory (a.k.a emplace) |
| |
126 if (other != NULL) { |
| |
127 memcpy(dst, other, n * elem_size); |
| |
128 } |
| |
129 array->size += n; |
| |
130 |
| |
131 return 0; |
| |
132 } |
| |
133 |
| |
134 int cx_array_insert_sorted_c_( |
| |
135 const CxAllocator *allocator, |
| |
136 CxArray *array, |
| 136 size_t elem_size, |
137 size_t elem_size, |
| 137 size_t elem_count, |
138 const void *sorted_data, |
| 138 CxArrayReallocator *reallocator |
139 size_t n, |
| |
140 cx_compare_func2 cmp_func, |
| |
141 void *context, |
| |
142 bool allow_duplicates |
| 139 ) { |
143 ) { |
| 140 // assert pointers |
144 // assert pointers |
| |
145 assert(allocator != NULL); |
| 141 assert(array != NULL); |
146 assert(array != NULL); |
| 142 assert(size != NULL); |
147 assert(cmp_func != NULL); |
| 143 assert(capacity != NULL); |
148 assert(sorted_data != NULL); |
| 144 |
149 |
| 145 // default reallocator |
150 // corner case |
| 146 if (reallocator == NULL) { |
151 if (n == 0) return 0; |
| 147 reallocator = cx_array_default_reallocator; |
152 |
| 148 } |
153 // overflow check |
| 149 |
154 // LCOV_EXCL_START |
| 150 // determine size and capacity |
155 if (n > SIZE_MAX - array->size) { |
| 151 size_t oldcap; |
|
| 152 size_t oldsize; |
|
| 153 size_t max_size; |
|
| 154 if (width == 0 || width == sizeof(size_t)) { |
|
| 155 oldcap = *(size_t*) capacity; |
|
| 156 oldsize = *(size_t*) size; |
|
| 157 max_size = SIZE_MAX; |
|
| 158 } else if (width == sizeof(uint16_t)) { |
|
| 159 oldcap = *(uint16_t*) capacity; |
|
| 160 oldsize = *(uint16_t*) size; |
|
| 161 max_size = UINT16_MAX; |
|
| 162 } else if (width == sizeof(uint8_t)) { |
|
| 163 oldcap = *(uint8_t*) capacity; |
|
| 164 oldsize = *(uint8_t*) size; |
|
| 165 max_size = UINT8_MAX; |
|
| 166 } |
|
| 167 #if CX_WORDSIZE == 64 |
|
| 168 else if (width == sizeof(uint32_t)) { |
|
| 169 oldcap = *(uint32_t*) capacity; |
|
| 170 oldsize = *(uint32_t*) size; |
|
| 171 max_size = UINT32_MAX; |
|
| 172 } |
|
| 173 #endif |
|
| 174 else { |
|
| 175 errno = EINVAL; |
|
| 176 return 1; |
|
| 177 } |
|
| 178 |
|
| 179 // assert that the array is allocated when it has capacity |
|
| 180 assert(*array != NULL || oldcap == 0); |
|
| 181 |
|
| 182 // check for overflow |
|
| 183 if (elem_count > max_size - oldsize) { |
|
| 184 errno = EOVERFLOW; |
156 errno = EOVERFLOW; |
| 185 return 1; |
157 return 1; |
| 186 } |
158 } |
| 187 |
|
| 188 // determine new capacity |
|
| 189 size_t newcap = oldsize + elem_count; |
|
| 190 |
|
| 191 // reallocate if possible |
|
| 192 if (newcap > oldcap) { |
|
| 193 void *newmem = reallocator->realloc( |
|
| 194 *array, oldcap, newcap, elem_size, reallocator |
|
| 195 ); |
|
| 196 if (newmem == NULL) { |
|
| 197 return 1; // LCOV_EXCL_LINE |
|
| 198 } |
|
| 199 |
|
| 200 // store new pointer |
|
| 201 *array = newmem; |
|
| 202 |
|
| 203 // store new capacity |
|
| 204 if (width == 0 || width == sizeof(size_t)) { |
|
| 205 *(size_t*) capacity = newcap; |
|
| 206 } else if (width == sizeof(uint16_t)) { |
|
| 207 *(uint16_t*) capacity = (uint16_t) newcap; |
|
| 208 } else if (width == sizeof(uint8_t)) { |
|
| 209 *(uint8_t*) capacity = (uint8_t) newcap; |
|
| 210 } |
|
| 211 #if CX_WORDSIZE == 64 |
|
| 212 else if (width == sizeof(uint32_t)) { |
|
| 213 *(uint32_t*) capacity = (uint32_t) newcap; |
|
| 214 } |
|
| 215 #endif |
|
| 216 } |
|
| 217 |
|
| 218 return 0; |
|
| 219 } |
|
| 220 |
|
| 221 int cx_array_copy( |
|
| 222 void **target, |
|
| 223 void *size, |
|
| 224 void *capacity, |
|
| 225 unsigned width, |
|
| 226 size_t index, |
|
| 227 const void *src, |
|
| 228 size_t elem_size, |
|
| 229 size_t elem_count, |
|
| 230 CxArrayReallocator *reallocator |
|
| 231 ) { |
|
| 232 // assert pointers |
|
| 233 assert(target != NULL); |
|
| 234 assert(size != NULL); |
|
| 235 assert(capacity != NULL); |
|
| 236 assert(src != NULL); |
|
| 237 |
|
| 238 // default reallocator |
|
| 239 if (reallocator == NULL) { |
|
| 240 reallocator = cx_array_default_reallocator; |
|
| 241 } |
|
| 242 |
|
| 243 // determine size and capacity |
|
| 244 size_t oldcap; |
|
| 245 size_t oldsize; |
|
| 246 size_t max_size; |
|
| 247 if (width == 0 || width == sizeof(size_t)) { |
|
| 248 oldcap = *(size_t*) capacity; |
|
| 249 oldsize = *(size_t*) size; |
|
| 250 max_size = SIZE_MAX; |
|
| 251 } else if (width == sizeof(uint16_t)) { |
|
| 252 oldcap = *(uint16_t*) capacity; |
|
| 253 oldsize = *(uint16_t*) size; |
|
| 254 max_size = UINT16_MAX; |
|
| 255 } else if (width == sizeof(uint8_t)) { |
|
| 256 oldcap = *(uint8_t*) capacity; |
|
| 257 oldsize = *(uint8_t*) size; |
|
| 258 max_size = UINT8_MAX; |
|
| 259 } |
|
| 260 #if CX_WORDSIZE == 64 |
|
| 261 else if (width == sizeof(uint32_t)) { |
|
| 262 oldcap = *(uint32_t*) capacity; |
|
| 263 oldsize = *(uint32_t*) size; |
|
| 264 max_size = UINT32_MAX; |
|
| 265 } |
|
| 266 #endif |
|
| 267 else { |
|
| 268 errno = EINVAL; |
|
| 269 return 1; |
|
| 270 } |
|
| 271 |
|
| 272 // assert that the array is allocated when it has capacity |
|
| 273 assert(*target != NULL || oldcap == 0); |
|
| 274 |
|
| 275 // check for overflow |
|
| 276 if (index > max_size || elem_count > max_size - index) { |
|
| 277 errno = EOVERFLOW; |
|
| 278 return 1; |
|
| 279 } |
|
| 280 |
|
| 281 // check if resize is required |
|
| 282 const size_t minsize = index + elem_count; |
|
| 283 const size_t newsize = oldsize < minsize ? minsize : oldsize; |
|
| 284 |
|
| 285 // reallocate if necessary |
|
| 286 const size_t newcap = cx_array_grow_capacity(oldcap, newsize); |
|
| 287 if (newcap > oldcap) { |
|
| 288 // check if we need to repair the src pointer |
|
| 289 uintptr_t targetaddr = (uintptr_t) *target; |
|
| 290 uintptr_t srcaddr = (uintptr_t) src; |
|
| 291 bool repairsrc = targetaddr <= srcaddr |
|
| 292 && srcaddr < targetaddr + oldcap * elem_size; |
|
| 293 |
|
| 294 // perform reallocation |
|
| 295 void *newmem = reallocator->realloc( |
|
| 296 *target, oldcap, newcap, elem_size, reallocator |
|
| 297 ); |
|
| 298 if (newmem == NULL) { |
|
| 299 return 1; // LCOV_EXCL_LINE |
|
| 300 } |
|
| 301 |
|
| 302 // repair src pointer, if necessary |
|
| 303 if (repairsrc) { |
|
| 304 src = ((char *) newmem) + (srcaddr - targetaddr); |
|
| 305 } |
|
| 306 |
|
| 307 // store new pointer |
|
| 308 *target = newmem; |
|
| 309 } |
|
| 310 |
|
| 311 // determine target pointer |
|
| 312 char *start = *target; |
|
| 313 start += index * elem_size; |
|
| 314 |
|
| 315 // copy elements and set new size |
|
| 316 // note: no overflow check here, b/c we cannot get here w/o allocation |
|
| 317 memmove(start, src, elem_count * elem_size); |
|
| 318 |
|
| 319 // if any of size or capacity changed, store them back |
|
| 320 if (newsize != oldsize || newcap != oldcap) { |
|
| 321 if (width == 0 || width == sizeof(size_t)) { |
|
| 322 *(size_t*) capacity = newcap; |
|
| 323 *(size_t*) size = newsize; |
|
| 324 } else if (width == sizeof(uint16_t)) { |
|
| 325 *(uint16_t*) capacity = (uint16_t) newcap; |
|
| 326 *(uint16_t*) size = (uint16_t) newsize; |
|
| 327 } else if (width == sizeof(uint8_t)) { |
|
| 328 *(uint8_t*) capacity = (uint8_t) newcap; |
|
| 329 *(uint8_t*) size = (uint8_t) newsize; |
|
| 330 } |
|
| 331 #if CX_WORDSIZE == 64 |
|
| 332 else if (width == sizeof(uint32_t)) { |
|
| 333 *(uint32_t*) capacity = (uint32_t) newcap; |
|
| 334 *(uint32_t*) size = (uint32_t) newsize; |
|
| 335 } |
|
| 336 #endif |
|
| 337 } |
|
| 338 |
|
| 339 // return successfully |
|
| 340 return 0; |
|
| 341 } |
|
| 342 |
|
| 343 static int cx_array_insert_sorted_impl( |
|
| 344 void **target, |
|
| 345 size_t *size, |
|
| 346 size_t *capacity, |
|
| 347 cx_compare_func cmp_func, |
|
| 348 const void *sorted_data, |
|
| 349 size_t elem_size, |
|
| 350 size_t elem_count, |
|
| 351 CxArrayReallocator *reallocator, |
|
| 352 bool allow_duplicates |
|
| 353 ) { |
|
| 354 // assert pointers |
|
| 355 assert(target != NULL); |
|
| 356 assert(size != NULL); |
|
| 357 assert(capacity != NULL); |
|
| 358 assert(cmp_func != NULL); |
|
| 359 assert(sorted_data != NULL); |
|
| 360 |
|
| 361 // default reallocator |
|
| 362 if (reallocator == NULL) { |
|
| 363 reallocator = cx_array_default_reallocator; |
|
| 364 } |
|
| 365 |
|
| 366 // corner case |
|
| 367 if (elem_count == 0) return 0; |
|
| 368 |
|
| 369 // overflow check |
|
| 370 // LCOV_EXCL_START |
|
| 371 if (elem_count > SIZE_MAX - *size) { |
|
| 372 errno = EOVERFLOW; |
|
| 373 return 1; |
|
| 374 } |
|
| 375 // LCOV_EXCL_STOP |
159 // LCOV_EXCL_STOP |
| 376 |
160 |
| 377 // store some counts |
161 // store some counts |
| 378 const size_t old_size = *size; |
162 const size_t old_size = array->size; |
| 379 const size_t old_capacity = *capacity; |
163 const size_t old_capacity = array->capacity; |
| 380 // the necessary capacity is the worst case assumption, including duplicates |
164 // the necessary capacity is the worst case assumption, including duplicates |
| 381 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + elem_count); |
165 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + n); |
| 382 |
166 |
| 383 // if we need more than we have, try a reallocation |
167 // if we need more than we have, try a reallocation |
| 384 if (needed_capacity > old_capacity) { |
168 if (needed_capacity > old_capacity) { |
| 385 void *new_mem = reallocator->realloc( |
169 if (cxReallocateArray(allocator, &array->data, needed_capacity, elem_size)) { |
| 386 *target, old_capacity, needed_capacity, elem_size, reallocator |
170 return -1; // LCOV_EXCL_LINE |
| 387 ); |
171 } |
| 388 if (new_mem == NULL) { |
172 array->capacity = needed_capacity; |
| 389 // give it up right away, there is no contract |
|
| 390 // that requires us to insert as much as we can |
|
| 391 return 1; // LCOV_EXCL_LINE |
|
| 392 } |
|
| 393 *target = new_mem; |
|
| 394 *capacity = needed_capacity; |
|
| 395 } |
173 } |
| 396 |
174 |
| 397 // now we have guaranteed that we can insert everything |
175 // now we have guaranteed that we can insert everything |
| 398 size_t new_size = old_size + elem_count; |
176 size_t new_size = old_size + n; |
| 399 *size = new_size; |
177 array->size = new_size; |
| 400 |
178 |
| 401 // declare the source and destination indices/pointers |
179 // declare the source and destination indices/pointers |
| 402 size_t si = 0, di = 0; |
180 size_t si = 0, di = 0; |
| 403 const char *src = sorted_data; |
181 const char *src = sorted_data; |
| 404 char *dest = *target; |
182 char *dest = array->data; |
| 405 |
183 |
| 406 // find the first insertion point |
184 // find the first insertion point |
| 407 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); |
185 di = cx_array_binary_search_sup_c(dest, old_size, elem_size, src, cmp_func, context); |
| 408 dest += di * elem_size; |
186 dest += di * elem_size; |
| 409 |
187 |
| 410 // move the remaining elements in the array completely to the right |
188 // move the remaining elements in the array completely to the right |
| 411 // we will call it the "buffer" for parked elements |
189 // we will call it the "buffer" for parked elements |
| 412 size_t buf_size = old_size - di; |
190 size_t buf_size = old_size - di; |
| 413 size_t bi = new_size - buf_size; |
191 size_t bi = new_size - buf_size; |
| 414 char *bptr = ((char *) *target) + bi * elem_size; |
192 char *bptr = ((char *) array->data) + bi * elem_size; |
| 415 memmove(bptr, dest, buf_size * elem_size); |
193 memmove(bptr, dest, buf_size * elem_size); |
| 416 |
194 |
| 417 // while there are both source and buffered elements left, |
195 // while there are both source and buffered elements left, |
| 418 // copy them interleaving |
196 // copy them interleaving |
| 419 while (si < elem_count && bi < new_size) { |
197 while (si < n && bi < new_size) { |
| 420 // determine how many source elements can be inserted. |
198 // determine how many source elements can be inserted. |
| 421 // the first element that shall not be inserted is the smallest element |
199 // the first element that shall not be inserted is the smallest element |
| 422 // that is strictly larger than the first buffered element |
200 // that is strictly larger than the first buffered element |
| 423 // (located at the index of the infimum plus one). |
201 // (located at the index of the infimum plus one). |
| 424 // the infimum is guaranteed to exist: |
202 // the infimum is guaranteed to exist: |
| 543 memcpy(dest, src, bytes_copied); |
322 memcpy(dest, src, bytes_copied); |
| 544 dest += bytes_copied; |
323 dest += bytes_copied; |
| 545 src += bytes_copied + skip_len * elem_size; |
324 src += bytes_copied + skip_len * elem_size; |
| 546 si += copy_len + skip_len; |
325 si += copy_len + skip_len; |
| 547 di += copy_len; |
326 di += copy_len; |
| 548 *size -= skip_len; |
327 array->size -= skip_len; |
| 549 } |
328 } |
| 550 } |
329 } |
| 551 } |
330 } |
| 552 |
331 |
| 553 // buffered elements need to be moved when we skipped duplicates |
332 // buffered elements need to be moved when we skipped duplicates |
| 554 size_t total_skipped = new_size - *size; |
333 size_t total_skipped = new_size - array->size; |
| 555 if (bi < new_size && total_skipped > 0) { |
334 if (bi < new_size && total_skipped > 0) { |
| 556 // move the remaining buffer to the end of the array |
335 // move the remaining buffer to the end of the array |
| 557 memmove(dest, bptr, elem_size * (new_size - bi)); |
336 memmove(dest, bptr, elem_size * (new_size - bi)); |
| 558 } |
337 } |
| 559 |
338 |
| 560 return 0; |
339 return 0; |
| 561 } |
340 } |
| 562 |
341 |
| 563 int cx_array_insert_sorted( |
342 int cx_array_insert_sorted_( |
| 564 void **target, |
343 const CxAllocator *allocator, |
| 565 size_t *size, |
344 CxArray *array, |
| 566 size_t *capacity, |
345 size_t elem_size, |
| |
346 const void *sorted_data, |
| |
347 size_t n, |
| 567 cx_compare_func cmp_func, |
348 cx_compare_func cmp_func, |
| 568 const void *sorted_data, |
349 bool allow_duplicates |
| 569 size_t elem_size, |
350 ) { |
| 570 size_t elem_count, |
351 cx_compare_func_wrapper wrapper = {cmp_func}; |
| 571 CxArrayReallocator *reallocator |
352 return cx_array_insert_sorted_c_(allocator, array, elem_size, sorted_data, |
| 572 ) { |
353 n, cx_ccmp_wrap, &wrapper, allow_duplicates); |
| 573 return cx_array_insert_sorted_impl(target, size, capacity, |
354 } |
| 574 cmp_func, sorted_data, elem_size, elem_count, reallocator, true); |
355 |
| 575 } |
356 #ifndef WITH_QSORT_R |
| 576 |
357 static thread_local cx_compare_func2 cx_array_fn_for_qsort; |
| 577 int cx_array_insert_unique( |
358 static thread_local void *cx_array_context_for_qsort; |
| 578 void **target, |
359 static int cx_array_qsort_wrapper(const void *l, const void *r) { |
| 579 size_t *size, |
360 return cx_array_fn_for_qsort(l, r, cx_array_context_for_qsort); |
| 580 size_t *capacity, |
361 } |
| 581 cx_compare_func cmp_func, |
362 #endif |
| 582 const void *sorted_data, |
363 |
| 583 size_t elem_size, |
364 void cx_array_qsort_c(void *array, size_t nmemb, size_t size, |
| 584 size_t elem_count, |
365 cx_compare_func2 fn, void *context) { |
| 585 CxArrayReallocator *reallocator |
366 #ifdef WITH_QSORT_R |
| 586 ) { |
367 qsort_r(array, nmemb, size, fn, context); |
| 587 return cx_array_insert_sorted_impl(target, size, capacity, |
368 #else |
| 588 cmp_func, sorted_data, elem_size, elem_count, reallocator, false); |
369 cx_array_fn_for_qsort = fn; |
| 589 } |
370 cx_array_context_for_qsort = context; |
| |
371 qsort(array, nmemb, size, cx_array_qsort_wrapper); |
| |
372 #endif |
| |
373 } |
| |
374 |
| |
375 void cx_array_sort_(CxArray *array, size_t elem_size, |
| |
376 cx_compare_func fn) { |
| |
377 qsort(array->data, array->size, elem_size, fn); |
| |
378 } |
| |
379 |
| |
380 void cx_array_sort_c_(CxArray *array, size_t elem_size, |
| |
381 cx_compare_func2 fn, void *context) { |
| |
382 cx_array_qsort_c(array->data, array->size, elem_size, fn, context); |
| |
383 } |
| |
384 |
| |
385 CxIterator cx_array_iterator_(CxArray *array, size_t elem_size) { |
| |
386 return cxIterator(array->data, elem_size, array->size); |
| |
387 } |
| |
388 |
| |
389 CxIterator cx_array_iterator_ptr_(CxArray *array) { |
| |
390 return cxIteratorPtr(array->data, array->size); |
| |
391 } |
| |
392 |
| |
393 void cx_array_remove_(CxArray *array, size_t elem_size, size_t index, size_t n, bool fast) { |
| |
394 if (n == 0) return; |
| |
395 if (index >= array->size) return; |
| |
396 if (index + n >= array->size) { |
| |
397 // only tail elements are removed |
| |
398 array->size = index; |
| |
399 return; |
| |
400 } |
| |
401 array->size -= n; |
| |
402 size_t remaining = array->size - index; |
| |
403 char *dest = ((char*)array->data) + index * elem_size; |
| |
404 if (fast) { |
| |
405 char *src = dest + remaining * elem_size; |
| |
406 if (n == 1 && elem_size <= CX_WORDSIZE/8) { |
| |
407 // try to optimize int-sized values |
| |
408 // (from likely to unlikely) |
| |
409 if (elem_size == sizeof(int32_t)) { |
| |
410 *(int32_t*)dest = *(int32_t*)src; |
| |
411 return; |
| |
412 } |
| |
413 #if CX_WORDSIZE == 64 |
| |
414 if (elem_size == sizeof(int64_t)) { |
| |
415 *(int64_t*)dest = *(int64_t*)src; |
| |
416 return; |
| |
417 } |
| |
418 #endif |
| |
419 if (elem_size == sizeof(int8_t)) { |
| |
420 *(int8_t*)dest = *(int8_t*)src; |
| |
421 return; |
| |
422 } |
| |
423 if (elem_size == sizeof(int16_t)) { |
| |
424 *(int16_t*)dest = *(int16_t*)src; |
| |
425 return; |
| |
426 } |
| |
427 // note we cannot optimize the last branch, because |
| |
428 // the elem_size could be crazily misaligned |
| |
429 } |
| |
430 memcpy(dest, src, n * elem_size); |
| |
431 } else { |
| |
432 char *src = dest + n * elem_size; |
| |
433 memmove(dest, src, remaining * elem_size); |
| |
434 } |
| |
435 } |
| |
436 |
| |
437 void cx_array_free_(const CxAllocator *allocator, CxArray *array) { |
| |
438 cxFree(allocator, array->data); |
| |
439 array->data = NULL; |
| |
440 array->size = array->capacity = 0; |
| |
441 } |
| |
442 |
| 590 |
443 |
| 591 // implementation that finds ANY index |
444 // implementation that finds ANY index |
| 592 static size_t cx_array_binary_search_inf_impl( |
445 static size_t cx_array_binary_search_inf_impl( |
| 593 const void *arr, |
446 const void *arr, |
| 594 size_t size, |
447 size_t size, |
| 595 size_t elem_size, |
448 size_t elem_size, |
| 596 const void *elem, |
449 const void *elem, |
| 597 cx_compare_func cmp_func |
450 cx_compare_func2 cmp_func, |
| |
451 void *context |
| 598 ) { |
452 ) { |
| 599 // special case: empty array |
453 // special case: empty array |
| 600 if (size == 0) return 0; |
454 if (size == 0) return 0; |
| 601 |
455 |
| 602 // declare a variable that will contain the compare results |
456 // declare a variable that will contain the compare results |
| 646 |
500 |
| 647 // report the largest upper bound |
501 // report the largest upper bound |
| 648 return result < 0 ? (pivot_index - 1) : pivot_index; |
502 return result < 0 ? (pivot_index - 1) : pivot_index; |
| 649 } |
503 } |
| 650 |
504 |
| |
505 size_t cx_array_binary_search_inf_c( |
| |
506 const void *arr, |
| |
507 size_t size, |
| |
508 size_t elem_size, |
| |
509 const void *elem, |
| |
510 cx_compare_func2 cmp_func, |
| |
511 void *context |
| |
512 ) { |
| |
513 size_t index = cx_array_binary_search_inf_impl( |
| |
514 arr, size, elem_size, elem, cmp_func, context); |
| |
515 // in case of equality, report the largest index |
| |
516 const char *e = ((const char *) arr) + (index + 1) * elem_size; |
| |
517 while (index + 1 < size && cmp_func(e, elem, context) == 0) { |
| |
518 e += elem_size; |
| |
519 index++; |
| |
520 } |
| |
521 return index; |
| |
522 } |
| |
523 |
| |
524 size_t cx_array_binary_search_c( |
| |
525 const void *arr, |
| |
526 size_t size, |
| |
527 size_t elem_size, |
| |
528 const void *elem, |
| |
529 cx_compare_func2 cmp_func, |
| |
530 void *context |
| |
531 ) { |
| |
532 size_t index = cx_array_binary_search_inf_c( |
| |
533 arr, size, elem_size, elem, cmp_func, context |
| |
534 ); |
| |
535 if (index < size && cmp_func(((const char *) arr) + index * elem_size, |
| |
536 elem, context) == 0) { |
| |
537 return index; |
| |
538 } else { |
| |
539 return size; |
| |
540 } |
| |
541 } |
| |
542 |
| |
543 size_t cx_array_binary_search_sup_c( |
| |
544 const void *arr, |
| |
545 size_t size, |
| |
546 size_t elem_size, |
| |
547 const void *elem, |
| |
548 cx_compare_func2 cmp_func, |
| |
549 void *context |
| |
550 ) { |
| |
551 size_t index = cx_array_binary_search_inf_impl( |
| |
552 arr, size, elem_size, elem, cmp_func, context |
| |
553 ); |
| |
554 const char *e = ((const char *) arr) + index * elem_size; |
| |
555 if (index == size) { |
| |
556 // no infimum means the first element is supremum |
| |
557 return 0; |
| |
558 } else if (cmp_func(e, elem, context) == 0) { |
| |
559 // found an equal element, search the smallest index |
| |
560 e -= elem_size; // e now contains the element at index-1 |
| |
561 while (index > 0 && cmp_func(e, elem, context) == 0) { |
| |
562 e -= elem_size; |
| |
563 index--; |
| |
564 } |
| |
565 return index; |
| |
566 } else { |
| |
567 // we already have the largest index of the infimum (by design) |
| |
568 // the next element is the supremum (or there is no supremum) |
| |
569 return index + 1; |
| |
570 } |
| |
571 } |
| |
572 |
| 651 size_t cx_array_binary_search_inf( |
573 size_t cx_array_binary_search_inf( |
| 652 const void *arr, |
574 const void *arr, |
| 653 size_t size, |
575 size_t size, |
| 654 size_t elem_size, |
576 size_t elem_size, |
| 655 const void *elem, |
577 const void *elem, |
| 656 cx_compare_func cmp_func |
578 cx_compare_func cmp_func |
| 657 ) { |
579 ) { |
| 658 size_t index = cx_array_binary_search_inf_impl( |
580 cx_compare_func_wrapper wrapper = {cmp_func}; |
| 659 arr, size, elem_size, elem, cmp_func); |
581 return cx_array_binary_search_inf_c(arr, size, elem_size, elem, cx_ccmp_wrap, &wrapper); |
| 660 // in case of equality, report the largest index |
|
| 661 const char *e = ((const char *) arr) + (index + 1) * elem_size; |
|
| 662 while (index + 1 < size && cmp_func(e, elem) == 0) { |
|
| 663 e += elem_size; |
|
| 664 index++; |
|
| 665 } |
|
| 666 return index; |
|
| 667 } |
582 } |
| 668 |
583 |
| 669 size_t cx_array_binary_search( |
584 size_t cx_array_binary_search( |
| 670 const void *arr, |
585 const void *arr, |
| 671 size_t size, |
586 size_t size, |
| 672 size_t elem_size, |
587 size_t elem_size, |
| 673 const void *elem, |
588 const void *elem, |
| 674 cx_compare_func cmp_func |
589 cx_compare_func cmp_func |
| 675 ) { |
590 ) { |
| 676 size_t index = cx_array_binary_search_inf( |
591 cx_compare_func_wrapper wrapper = {cmp_func}; |
| 677 arr, size, elem_size, elem, cmp_func |
592 return cx_array_binary_search_c(arr, size, elem_size, elem, cx_ccmp_wrap, &wrapper); |
| 678 ); |
|
| 679 if (index < size && |
|
| 680 cmp_func(((const char *) arr) + index * elem_size, elem) == 0) { |
|
| 681 return index; |
|
| 682 } else { |
|
| 683 return size; |
|
| 684 } |
|
| 685 } |
593 } |
| 686 |
594 |
| 687 size_t cx_array_binary_search_sup( |
595 size_t cx_array_binary_search_sup( |
| 688 const void *arr, |
596 const void *arr, |
| 689 size_t size, |
597 size_t size, |
| 690 size_t elem_size, |
598 size_t elem_size, |
| 691 const void *elem, |
599 const void *elem, |
| 692 cx_compare_func cmp_func |
600 cx_compare_func cmp_func |
| 693 ) { |
601 ) { |
| 694 size_t index = cx_array_binary_search_inf_impl( |
602 cx_compare_func_wrapper wrapper = {cmp_func}; |
| 695 arr, size, elem_size, elem, cmp_func |
603 return cx_array_binary_search_sup_c(arr, size, elem_size, elem, cx_ccmp_wrap, &wrapper); |
| 696 ); |
|
| 697 const char *e = ((const char *) arr) + index * elem_size; |
|
| 698 if (index == size) { |
|
| 699 // no infimum means the first element is supremum |
|
| 700 return 0; |
|
| 701 } else if (cmp_func(e, elem) == 0) { |
|
| 702 // found an equal element, search the smallest index |
|
| 703 e -= elem_size; // e now contains the element at index-1 |
|
| 704 while (index > 0 && cmp_func(e, elem) == 0) { |
|
| 705 e -= elem_size; |
|
| 706 index--; |
|
| 707 } |
|
| 708 return index; |
|
| 709 } else { |
|
| 710 // we already have the largest index of the infimum (by design) |
|
| 711 // the next element is the supremum (or there is no supremum) |
|
| 712 return index + 1; |
|
| 713 } |
|
| 714 } |
604 } |
| 715 |
605 |
| 716 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
606 #ifndef CX_ARRAY_SWAP_SBO_SIZE |
| 717 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
607 #define CX_ARRAY_SWAP_SBO_SIZE 128 |
| 718 #endif |
608 #endif |
| 792 struct cx_list_s *list, |
681 struct cx_list_s *list, |
| 793 size_t index, |
682 size_t index, |
| 794 const void *array, |
683 const void *array, |
| 795 size_t n |
684 size_t n |
| 796 ) { |
685 ) { |
| 797 // out of bounds and special case check |
|
| 798 if (index > list->collection.size || n == 0) return 0; |
|
| 799 |
|
| 800 // get a correctly typed pointer to the list |
|
| 801 cx_array_list *arl = (cx_array_list *) list; |
686 cx_array_list *arl = (cx_array_list *) list; |
| 802 |
687 CxArray wrap = { |
| 803 // guarantee enough capacity |
688 arl->data, list->collection.size, arl->capacity |
| 804 if (arl->capacity < list->collection.size + n) { |
689 }; |
| 805 const size_t new_capacity = cx_array_grow_capacity(arl->capacity,list->collection.size + n); |
690 if (cx_array_insert_(list->collection.allocator, &wrap, |
| 806 if (cxReallocateArray( |
691 list->collection.elem_size, index, array, n)) { |
| 807 list->collection.allocator, |
692 return 0; |
| 808 &arl->data, new_capacity, |
693 } |
| 809 list->collection.elem_size) |
694 arl->data = wrap.data; |
| 810 ) { |
695 arl->capacity = wrap.capacity; |
| 811 return 0; // LCOV_EXCL_LINE |
696 list->collection.size = wrap.size; |
| 812 } |
697 return n; |
| 813 arl->capacity = new_capacity; |
698 } |
| 814 } |
699 |
| 815 |
700 static size_t cx_arl_insert_sorted_impl( |
| 816 // determine insert position |
701 struct cx_list_s *list, |
| 817 char *arl_data = arl->data; |
702 const void *sorted_data, |
| 818 char *insert_pos = arl_data + index * list->collection.elem_size; |
703 size_t n, |
| 819 |
704 bool allow_duplicates |
| 820 // do we need to move some elements? |
705 ) { |
| 821 if (index < list->collection.size) { |
706 cx_array_list *arl = (cx_array_list *) list; |
| 822 size_t elems_to_move = list->collection.size - index; |
707 CxArray wrap = { |
| 823 char *target = insert_pos + n * list->collection.elem_size; |
708 arl->data, list->collection.size, arl->capacity |
| 824 memmove(target, insert_pos, elems_to_move * list->collection.elem_size); |
709 }; |
| 825 } |
710 |
| 826 |
711 if (cx_array_insert_sorted_c_( |
| 827 // place the new elements, if any |
712 list->collection.allocator, |
| 828 if (array != NULL) { |
713 &wrap, |
| 829 memcpy(insert_pos, array, n * list->collection.elem_size); |
714 list->collection.elem_size, |
| 830 } |
715 sorted_data, |
| 831 list->collection.size += n; |
716 n, |
| 832 |
717 cx_list_compare_wrapper, |
| |
718 list, |
| |
719 allow_duplicates |
| |
720 )) { |
| |
721 // array list implementation is "all or nothing" |
| |
722 return 0; // LCOV_EXCL_LINE |
| |
723 } |
| |
724 arl->data = wrap.data; |
| |
725 arl->capacity = wrap.capacity; |
| |
726 list->collection.size = wrap.size; |
| 833 return n; |
727 return n; |
| 834 } |
728 } |
| 835 |
729 |
| 836 static size_t cx_arl_insert_sorted( |
730 static size_t cx_arl_insert_sorted( |
| 837 struct cx_list_s *list, |
731 struct cx_list_s *list, |
| 838 const void *sorted_data, |
732 const void *sorted_data, |
| 839 size_t n |
733 size_t n |
| 840 ) { |
734 ) { |
| 841 // get a correctly typed pointer to the list |
735 return cx_arl_insert_sorted_impl(list, sorted_data, n, true); |
| 842 cx_array_list *arl = (cx_array_list *) list; |
|
| 843 |
|
| 844 if (cx_array_insert_sorted( |
|
| 845 &arl->data, |
|
| 846 &list->collection.size, |
|
| 847 &arl->capacity, |
|
| 848 list->collection.cmpfunc, |
|
| 849 sorted_data, |
|
| 850 list->collection.elem_size, |
|
| 851 n, |
|
| 852 &arl->reallocator |
|
| 853 )) { |
|
| 854 // array list implementation is "all or nothing" |
|
| 855 return 0; // LCOV_EXCL_LINE |
|
| 856 } else { |
|
| 857 return n; |
|
| 858 } |
|
| 859 } |
736 } |
| 860 |
737 |
| 861 static size_t cx_arl_insert_unique( |
738 static size_t cx_arl_insert_unique( |
| 862 struct cx_list_s *list, |
739 struct cx_list_s *list, |
| 863 const void *sorted_data, |
740 const void *sorted_data, |
| 864 size_t n |
741 size_t n |
| 865 ) { |
742 ) { |
| 866 // get a correctly typed pointer to the list |
743 return cx_arl_insert_sorted_impl(list, sorted_data, n, false); |
| 867 cx_array_list *arl = (cx_array_list *) list; |
|
| 868 |
|
| 869 if (cx_array_insert_unique( |
|
| 870 &arl->data, |
|
| 871 &list->collection.size, |
|
| 872 &arl->capacity, |
|
| 873 list->collection.cmpfunc, |
|
| 874 sorted_data, |
|
| 875 list->collection.elem_size, |
|
| 876 n, |
|
| 877 &arl->reallocator |
|
| 878 )) { |
|
| 879 // array list implementation is "all or nothing" |
|
| 880 return 0; // LCOV_EXCL_LINE |
|
| 881 } else { |
|
| 882 return n; |
|
| 883 } |
|
| 884 } |
744 } |
| 885 |
745 |
| 886 static void *cx_arl_insert_element( |
746 static void *cx_arl_insert_element( |
| 887 struct cx_list_s *list, |
747 struct cx_list_s *list, |
| 888 size_t index, |
748 size_t index, |