| 56 else if (cap < 8192) alignment = 512; |
56 else if (cap < 8192) alignment = 512; |
| 57 else alignment = 1024; |
57 else alignment = 1024; |
| 58 return cap - (cap % alignment) + alignment; |
58 return cap - (cap % alignment) + alignment; |
| 59 } |
59 } |
| 60 |
60 |
| 61 // Default array reallocator |
|
| 62 |
|
| 63 static void *cx_array_default_realloc( |
|
| 64 void *array, |
|
| 65 cx_attr_unused size_t old_capacity, |
|
| 66 size_t new_capacity, |
|
| 67 size_t elem_size, |
|
| 68 cx_attr_unused CxArrayReallocator *alloc |
|
| 69 ) { |
|
| 70 size_t n; |
|
| 71 // LCOV_EXCL_START |
|
| 72 if (cx_szmul(new_capacity, elem_size, &n)) { |
|
| 73 errno = EOVERFLOW; |
|
| 74 return NULL; |
|
| 75 } // LCOV_EXCL_STOP |
|
| 76 return cxReallocDefault(array, n); |
|
| 77 } |
|
| 78 |
|
| 79 CxArrayReallocator cx_array_default_reallocator_impl = { |
|
| 80 cx_array_default_realloc, NULL, NULL |
|
| 81 }; |
|
| 82 |
|
| 83 CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl; |
|
| 84 |
|
| 85 // Stack-aware array reallocator |
|
| 86 |
|
| 87 static void *cx_array_advanced_realloc( |
|
| 88 void *array, |
|
| 89 size_t old_capacity, |
|
| 90 size_t new_capacity, |
|
| 91 size_t elem_size, |
|
| 92 cx_attr_unused CxArrayReallocator *alloc |
|
| 93 ) { |
|
| 94 // check for overflow |
|
| 95 size_t n; |
|
| 96 // LCOV_EXCL_START |
|
| 97 if (cx_szmul(new_capacity, elem_size, &n)) { |
|
| 98 errno = EOVERFLOW; |
|
| 99 return NULL; |
|
| 100 } // LCOV_EXCL_STOP |
|
| 101 |
|
| 102 // retrieve the pointer to the actual allocator |
|
| 103 const CxAllocator *al = alloc->allocator; |
|
| 104 |
|
| 105 // check if the array is still located on the stack |
|
| 106 void *newmem; |
|
| 107 if (array == alloc->stack_ptr) { |
|
| 108 newmem = cxMalloc(al, n); |
|
| 109 if (newmem != NULL && array != NULL) { |
|
| 110 memcpy(newmem, array, old_capacity*elem_size); |
|
| 111 } |
|
| 112 } else { |
|
| 113 newmem = cxRealloc(al, array, n); |
|
| 114 } |
|
| 115 return newmem; |
|
| 116 } |
|
| 117 |
|
| 118 struct cx_array_reallocator_s cx_array_reallocator( |
|
| 119 const struct cx_allocator_s *allocator, |
|
| 120 const void *stack_ptr |
|
| 121 ) { |
|
| 122 if (allocator == NULL) { |
|
| 123 allocator = cxDefaultAllocator; |
|
| 124 } |
|
| 125 return (struct cx_array_reallocator_s) { |
|
| 126 cx_array_advanced_realloc, |
|
| 127 allocator, stack_ptr, |
|
| 128 }; |
|
| 129 } |
|
| 130 |
|
| 131 int cx_array_init_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
61 int cx_array_init_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
| 132 memset(array, 0, sizeof(CxArray)); |
62 memset(array, 0, sizeof(CxArray)); |
| 133 return cx_array_reserve_(allocator, array, elem_size, capacity); |
63 return cx_array_reserve_(allocator, array, elem_size, capacity); |
| 134 } |
64 } |
| 135 |
65 |
| |
66 void cx_array_init_fixed_(CxArray *array, const void *data, size_t capacity, size_t size) { |
| |
67 array->data = (void*) data; |
| |
68 array->capacity = capacity; |
| |
69 array->size = size; |
| |
70 } |
| |
71 |
| 136 int cx_array_reserve_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
72 int cx_array_reserve_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
| 137 if (cxReallocateArray(allocator, &array->data, capacity, elem_size)) { |
73 if (cxReallocateArray(allocator, &array->data, capacity, elem_size)) { |
| 138 return -1; // LCOV_EXCL_LINE |
74 return -1; // LCOV_EXCL_LINE |
| 139 } |
75 } |
| 140 array->capacity = capacity; |
76 array->capacity = capacity; |
| |
77 if (array->size > capacity) { |
| |
78 array->size = capacity; |
| |
79 } |
| 141 return 0; |
80 return 0; |
| 142 } |
81 } |
| 143 |
82 |
| 144 int cx_array_move_to_new_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
83 int cx_array_move_to_new_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) { |
| 145 CxArray heap_array; |
84 CxArray heap_array; |
| 181 } |
105 } |
| 182 array->capacity = new_capacity; |
106 array->capacity = new_capacity; |
| 183 } |
107 } |
| 184 |
108 |
| 185 // determine insert position |
109 // determine insert position |
| 186 char *arl_data = array->data; |
110 char *dst = array->data; |
| 187 char *insert_pos = arl_data + index * elem_size; |
111 dst += index * elem_size; |
| 188 |
112 |
| 189 // do we need to move some elements? |
113 // do we need to move some elements? |
| 190 if (index < array->size) { |
114 if (index < array->size) { |
| 191 size_t elems_to_move = array->size - index; |
115 size_t elems_to_move = array->size - index; |
| 192 char *target = insert_pos + n * elem_size; |
116 char *target = dst + n * elem_size; |
| 193 memmove(target, insert_pos, elems_to_move * elem_size); |
117 memmove(target, dst, elems_to_move * elem_size); |
| 194 } |
118 } |
| 195 |
119 |
| 196 // place the new elements, if any |
120 // place the new elements, if any |
| 197 // otherwise, this function just reserved the memory (a.k.a emplace) |
121 // otherwise, this function just reserved the memory (a.k.a emplace) |
| 198 if (other != NULL) { |
122 if (other != NULL) { |
| 199 memcpy(insert_pos, other, n * elem_size); |
123 memcpy(dst, other, n * elem_size); |
| 200 } |
124 } |
| 201 array->size += n; |
125 array->size += n; |
| 202 |
126 |
| 203 return 0; |
127 return 0; |
| 204 } |
128 } |
| 205 |
129 |
| 206 CxIterator cx_array_iterator_(CxArray *array, size_t elem_size) { |
130 int cx_array_insert_sorted_( |
| 207 return cxIterator(array->data, elem_size, array->size); |
131 const CxAllocator *allocator, |
| 208 } |
132 CxArray *array, |
| 209 |
|
| 210 CxIterator cx_array_iterator_ptr_(CxArray *array) { |
|
| 211 return cxIteratorPtr(array->data, array->size); |
|
| 212 } |
|
| 213 |
|
| 214 void cx_array_free_(const CxAllocator *allocator, CxArray *array) { |
|
| 215 cxFree(allocator, array->data); |
|
| 216 array->data = NULL; |
|
| 217 array->size = array->capacity = 0; |
|
| 218 } |
|
| 219 |
|
| 220 int cx_array_copy( |
|
| 221 void **target, |
|
| 222 void *size, |
|
| 223 void *capacity, |
|
| 224 unsigned width, |
|
| 225 size_t index, |
|
| 226 const void *src, |
|
| 227 size_t elem_size, |
133 size_t elem_size, |
| 228 size_t elem_count, |
134 cx_compare_func cmp_func, |
| 229 CxArrayReallocator *reallocator |
135 const void *sorted_data, |
| |
136 size_t n, |
| |
137 bool allow_duplicates |
| 230 ) { |
138 ) { |
| 231 // assert pointers |
139 // assert pointers |
| 232 assert(target != NULL); |
140 assert(allocator != NULL); |
| 233 assert(size != NULL); |
141 assert(array != NULL); |
| 234 assert(capacity != NULL); |
142 assert(cmp_func != NULL); |
| 235 assert(src != NULL); |
143 assert(sorted_data != NULL); |
| 236 |
144 |
| 237 // default reallocator |
145 // corner case |
| 238 if (reallocator == NULL) { |
146 if (n == 0) return 0; |
| 239 reallocator = cx_array_default_reallocator; |
147 |
| 240 } |
148 // overflow check |
| 241 |
149 // LCOV_EXCL_START |
| 242 // determine size and capacity |
150 if (n > SIZE_MAX - array->size) { |
| 243 size_t oldcap; |
|
| 244 size_t oldsize; |
|
| 245 size_t max_size; |
|
| 246 if (width == 0 || width == sizeof(size_t)) { |
|
| 247 oldcap = *(size_t*) capacity; |
|
| 248 oldsize = *(size_t*) size; |
|
| 249 max_size = SIZE_MAX; |
|
| 250 } else if (width == sizeof(uint16_t)) { |
|
| 251 oldcap = *(uint16_t*) capacity; |
|
| 252 oldsize = *(uint16_t*) size; |
|
| 253 max_size = UINT16_MAX; |
|
| 254 } else if (width == sizeof(uint8_t)) { |
|
| 255 oldcap = *(uint8_t*) capacity; |
|
| 256 oldsize = *(uint8_t*) size; |
|
| 257 max_size = UINT8_MAX; |
|
| 258 } |
|
| 259 #if CX_WORDSIZE == 64 |
|
| 260 else if (width == sizeof(uint32_t)) { |
|
| 261 oldcap = *(uint32_t*) capacity; |
|
| 262 oldsize = *(uint32_t*) size; |
|
| 263 max_size = UINT32_MAX; |
|
| 264 } |
|
| 265 #endif |
|
| 266 else { |
|
| 267 errno = EINVAL; |
|
| 268 return 1; |
|
| 269 } |
|
| 270 |
|
| 271 // assert that the array is allocated when it has capacity |
|
| 272 assert(*target != NULL || oldcap == 0); |
|
| 273 |
|
| 274 // check for overflow |
|
| 275 if (index > max_size || elem_count > max_size - index) { |
|
| 276 errno = EOVERFLOW; |
151 errno = EOVERFLOW; |
| 277 return 1; |
152 return 1; |
| 278 } |
153 } |
| 279 |
|
| 280 // check if resize is required |
|
| 281 const size_t minsize = index + elem_count; |
|
| 282 const size_t newsize = oldsize < minsize ? minsize : oldsize; |
|
| 283 |
|
| 284 // reallocate if necessary |
|
| 285 const size_t newcap = cx_array_grow_capacity(oldcap, newsize); |
|
| 286 if (newcap > oldcap) { |
|
| 287 // check if we need to repair the src pointer |
|
| 288 uintptr_t targetaddr = (uintptr_t) *target; |
|
| 289 uintptr_t srcaddr = (uintptr_t) src; |
|
| 290 bool repairsrc = targetaddr <= srcaddr |
|
| 291 && srcaddr < targetaddr + oldcap * elem_size; |
|
| 292 |
|
| 293 // perform reallocation |
|
| 294 void *newmem = reallocator->realloc( |
|
| 295 *target, oldcap, newcap, elem_size, reallocator |
|
| 296 ); |
|
| 297 if (newmem == NULL) { |
|
| 298 return 1; // LCOV_EXCL_LINE |
|
| 299 } |
|
| 300 |
|
| 301 // repair src pointer, if necessary |
|
| 302 if (repairsrc) { |
|
| 303 src = ((char *) newmem) + (srcaddr - targetaddr); |
|
| 304 } |
|
| 305 |
|
| 306 // store new pointer |
|
| 307 *target = newmem; |
|
| 308 } |
|
| 309 |
|
| 310 // determine target pointer |
|
| 311 char *start = *target; |
|
| 312 start += index * elem_size; |
|
| 313 |
|
| 314 // copy elements and set new size |
|
| 315 // note: no overflow check here, b/c we cannot get here w/o allocation |
|
| 316 memmove(start, src, elem_count * elem_size); |
|
| 317 |
|
| 318 // if any of size or capacity changed, store them back |
|
| 319 if (newsize != oldsize || newcap != oldcap) { |
|
| 320 if (width == 0 || width == sizeof(size_t)) { |
|
| 321 *(size_t*) capacity = newcap; |
|
| 322 *(size_t*) size = newsize; |
|
| 323 } else if (width == sizeof(uint16_t)) { |
|
| 324 *(uint16_t*) capacity = (uint16_t) newcap; |
|
| 325 *(uint16_t*) size = (uint16_t) newsize; |
|
| 326 } else if (width == sizeof(uint8_t)) { |
|
| 327 *(uint8_t*) capacity = (uint8_t) newcap; |
|
| 328 *(uint8_t*) size = (uint8_t) newsize; |
|
| 329 } |
|
| 330 #if CX_WORDSIZE == 64 |
|
| 331 else if (width == sizeof(uint32_t)) { |
|
| 332 *(uint32_t*) capacity = (uint32_t) newcap; |
|
| 333 *(uint32_t*) size = (uint32_t) newsize; |
|
| 334 } |
|
| 335 #endif |
|
| 336 } |
|
| 337 |
|
| 338 // return successfully |
|
| 339 return 0; |
|
| 340 } |
|
| 341 |
|
| 342 static int cx_array_insert_sorted_impl( |
|
| 343 void **target, |
|
| 344 size_t *size, |
|
| 345 size_t *capacity, |
|
| 346 cx_compare_func cmp_func, |
|
| 347 const void *sorted_data, |
|
| 348 size_t elem_size, |
|
| 349 size_t elem_count, |
|
| 350 CxArrayReallocator *reallocator, |
|
| 351 bool allow_duplicates |
|
| 352 ) { |
|
| 353 // assert pointers |
|
| 354 assert(target != NULL); |
|
| 355 assert(size != NULL); |
|
| 356 assert(capacity != NULL); |
|
| 357 assert(cmp_func != NULL); |
|
| 358 assert(sorted_data != NULL); |
|
| 359 |
|
| 360 // default reallocator |
|
| 361 if (reallocator == NULL) { |
|
| 362 reallocator = cx_array_default_reallocator; |
|
| 363 } |
|
| 364 |
|
| 365 // corner case |
|
| 366 if (elem_count == 0) return 0; |
|
| 367 |
|
| 368 // overflow check |
|
| 369 // LCOV_EXCL_START |
|
| 370 if (elem_count > SIZE_MAX - *size) { |
|
| 371 errno = EOVERFLOW; |
|
| 372 return 1; |
|
| 373 } |
|
| 374 // LCOV_EXCL_STOP |
154 // LCOV_EXCL_STOP |
| 375 |
155 |
| 376 // store some counts |
156 // store some counts |
| 377 const size_t old_size = *size; |
157 const size_t old_size = array->size; |
| 378 const size_t old_capacity = *capacity; |
158 const size_t old_capacity = array->capacity; |
| 379 // the necessary capacity is the worst case assumption, including duplicates |
159 // the necessary capacity is the worst case assumption, including duplicates |
| 380 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + elem_count); |
160 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + n); |
| 381 |
161 |
| 382 // if we need more than we have, try a reallocation |
162 // if we need more than we have, try a reallocation |
| 383 if (needed_capacity > old_capacity) { |
163 if (needed_capacity > old_capacity) { |
| 384 void *new_mem = reallocator->realloc( |
164 if (cxReallocateArray(allocator, &array->data, needed_capacity, elem_size)) { |
| 385 *target, old_capacity, needed_capacity, elem_size, reallocator |
165 return -1; // LCOV_EXCL_LINE |
| 386 ); |
166 } |
| 387 if (new_mem == NULL) { |
167 array->capacity = needed_capacity; |
| 388 // give it up right away, there is no contract |
|
| 389 // that requires us to insert as much as we can |
|
| 390 return 1; // LCOV_EXCL_LINE |
|
| 391 } |
|
| 392 *target = new_mem; |
|
| 393 *capacity = needed_capacity; |
|
| 394 } |
168 } |
| 395 |
169 |
| 396 // now we have guaranteed that we can insert everything |
170 // now we have guaranteed that we can insert everything |
| 397 size_t new_size = old_size + elem_count; |
171 size_t new_size = old_size + n; |
| 398 *size = new_size; |
172 array->size = new_size; |
| 399 |
173 |
| 400 // declare the source and destination indices/pointers |
174 // declare the source and destination indices/pointers |
| 401 size_t si = 0, di = 0; |
175 size_t si = 0, di = 0; |
| 402 const char *src = sorted_data; |
176 const char *src = sorted_data; |
| 403 char *dest = *target; |
177 char *dest = array->data; |
| 404 |
178 |
| 405 // find the first insertion point |
179 // find the first insertion point |
| 406 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); |
180 di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func); |
| 407 dest += di * elem_size; |
181 dest += di * elem_size; |
| 408 |
182 |
| 409 // move the remaining elements in the array completely to the right |
183 // move the remaining elements in the array completely to the right |
| 410 // we will call it the "buffer" for parked elements |
184 // we will call it the "buffer" for parked elements |
| 411 size_t buf_size = old_size - di; |
185 size_t buf_size = old_size - di; |
| 412 size_t bi = new_size - buf_size; |
186 size_t bi = new_size - buf_size; |
| 413 char *bptr = ((char *) *target) + bi * elem_size; |
187 char *bptr = ((char *) array->data) + bi * elem_size; |
| 414 memmove(bptr, dest, buf_size * elem_size); |
188 memmove(bptr, dest, buf_size * elem_size); |
| 415 |
189 |
| 416 // while there are both source and buffered elements left, |
190 // while there are both source and buffered elements left, |
| 417 // copy them interleaving |
191 // copy them interleaving |
| 418 while (si < elem_count && bi < new_size) { |
192 while (si < n && bi < new_size) { |
| 419 // determine how many source elements can be inserted. |
193 // determine how many source elements can be inserted. |
| 420 // the first element that shall not be inserted is the smallest element |
194 // the first element that shall not be inserted is the smallest element |
| 421 // that is strictly larger than the first buffered element |
195 // that is strictly larger than the first buffered element |
| 422 // (located at the index of the infimum plus one). |
196 // (located at the index of the infimum plus one). |
| 423 // the infimum is guaranteed to exist: |
197 // the infimum is guaranteed to exist: |
| 542 memcpy(dest, src, bytes_copied); |
316 memcpy(dest, src, bytes_copied); |
| 543 dest += bytes_copied; |
317 dest += bytes_copied; |
| 544 src += bytes_copied + skip_len * elem_size; |
318 src += bytes_copied + skip_len * elem_size; |
| 545 si += copy_len + skip_len; |
319 si += copy_len + skip_len; |
| 546 di += copy_len; |
320 di += copy_len; |
| 547 *size -= skip_len; |
321 array->size -= skip_len; |
| 548 } |
322 } |
| 549 } |
323 } |
| 550 } |
324 } |
| 551 |
325 |
| 552 // buffered elements need to be moved when we skipped duplicates |
326 // buffered elements need to be moved when we skipped duplicates |
| 553 size_t total_skipped = new_size - *size; |
327 size_t total_skipped = new_size - array->size; |
| 554 if (bi < new_size && total_skipped > 0) { |
328 if (bi < new_size && total_skipped > 0) { |
| 555 // move the remaining buffer to the end of the array |
329 // move the remaining buffer to the end of the array |
| 556 memmove(dest, bptr, elem_size * (new_size - bi)); |
330 memmove(dest, bptr, elem_size * (new_size - bi)); |
| 557 } |
331 } |
| 558 |
332 |
| 559 return 0; |
333 return 0; |
| 560 } |
334 } |
| 561 |
335 |
| 562 int cx_array_insert_sorted( |
336 CxIterator cx_array_iterator_(CxArray *array, size_t elem_size) { |
| 563 void **target, |
337 return cxIterator(array->data, elem_size, array->size); |
| 564 size_t *size, |
338 } |
| 565 size_t *capacity, |
339 |
| 566 cx_compare_func cmp_func, |
340 CxIterator cx_array_iterator_ptr_(CxArray *array) { |
| 567 const void *sorted_data, |
341 return cxIteratorPtr(array->data, array->size); |
| 568 size_t elem_size, |
342 } |
| 569 size_t elem_count, |
343 |
| 570 CxArrayReallocator *reallocator |
344 void cx_array_free_(const CxAllocator *allocator, CxArray *array) { |
| 571 ) { |
345 cxFree(allocator, array->data); |
| 572 return cx_array_insert_sorted_impl(target, size, capacity, |
346 array->data = NULL; |
| 573 cmp_func, sorted_data, elem_size, elem_count, reallocator, true); |
347 array->size = array->capacity = 0; |
| 574 } |
348 } |
| 575 |
349 |
| 576 int cx_array_insert_unique( |
|
| 577 void **target, |
|
| 578 size_t *size, |
|
| 579 size_t *capacity, |
|
| 580 cx_compare_func cmp_func, |
|
| 581 const void *sorted_data, |
|
| 582 size_t elem_size, |
|
| 583 size_t elem_count, |
|
| 584 CxArrayReallocator *reallocator |
|
| 585 ) { |
|
| 586 return cx_array_insert_sorted_impl(target, size, capacity, |
|
| 587 cmp_func, sorted_data, elem_size, elem_count, reallocator, false); |
|
| 588 } |
|
| 589 |
350 |
| 590 // implementation that finds ANY index |
351 // implementation that finds ANY index |
| 591 static size_t cx_array_binary_search_inf_impl( |
352 static size_t cx_array_binary_search_inf_impl( |
| 592 const void *arr, |
353 const void *arr, |
| 593 size_t size, |
354 size_t size, |
| 795 ) { |
555 ) { |
| 796 cx_array_list *arl = (cx_array_list *) list; |
556 cx_array_list *arl = (cx_array_list *) list; |
| 797 CxArray wrap = { |
557 CxArray wrap = { |
| 798 arl->data, list->collection.size, arl->capacity |
558 arl->data, list->collection.size, arl->capacity |
| 799 }; |
559 }; |
| 800 if (cx_array_insert_array_(list->collection.allocator, &wrap, |
560 if (cx_array_insert_(list->collection.allocator, &wrap, |
| 801 list->collection.elem_size, index, array, n)) { |
561 list->collection.elem_size, index, array, n)) { |
| 802 return 0; |
562 return 0; |
| 803 } |
563 } |
| 804 arl->data = wrap.data; |
564 arl->data = wrap.data; |
| 805 arl->capacity = wrap.capacity; |
565 arl->capacity = wrap.capacity; |
| 806 list->collection.size = wrap.size; |
566 list->collection.size = wrap.size; |
| 807 return n; |
567 return n; |
| 808 } |
568 } |
| 809 |
569 |
| |
570 static size_t cx_arl_insert_sorted_impl( |
| |
571 struct cx_list_s *list, |
| |
572 const void *sorted_data, |
| |
573 size_t n, |
| |
574 bool allow_duplicates |
| |
575 ) { |
| |
576 cx_array_list *arl = (cx_array_list *) list; |
| |
577 CxArray wrap = { |
| |
578 arl->data, list->collection.size, arl->capacity |
| |
579 }; |
| |
580 |
| |
581 if (cx_array_insert_sorted_( |
| |
582 list->collection.allocator, |
| |
583 &wrap, |
| |
584 list->collection.elem_size, |
| |
585 list->collection.cmpfunc, |
| |
586 sorted_data, |
| |
587 n, |
| |
588 allow_duplicates |
| |
589 )) { |
| |
590 // array list implementation is "all or nothing" |
| |
591 return 0; // LCOV_EXCL_LINE |
| |
592 } |
| |
593 arl->data = wrap.data; |
| |
594 arl->capacity = wrap.capacity; |
| |
595 list->collection.size = wrap.size; |
| |
596 return n; |
| |
597 } |
| |
598 |
| 810 static size_t cx_arl_insert_sorted( |
599 static size_t cx_arl_insert_sorted( |
| 811 struct cx_list_s *list, |
600 struct cx_list_s *list, |
| 812 const void *sorted_data, |
601 const void *sorted_data, |
| 813 size_t n |
602 size_t n |
| 814 ) { |
603 ) { |
| 815 // get a correctly typed pointer to the list |
604 return cx_arl_insert_sorted_impl(list, sorted_data, n, true); |
| 816 cx_array_list *arl = (cx_array_list *) list; |
|
| 817 |
|
| 818 if (cx_array_insert_sorted( |
|
| 819 &arl->data, |
|
| 820 &list->collection.size, |
|
| 821 &arl->capacity, |
|
| 822 list->collection.cmpfunc, |
|
| 823 sorted_data, |
|
| 824 list->collection.elem_size, |
|
| 825 n, |
|
| 826 &arl->reallocator |
|
| 827 )) { |
|
| 828 // array list implementation is "all or nothing" |
|
| 829 return 0; // LCOV_EXCL_LINE |
|
| 830 } else { |
|
| 831 return n; |
|
| 832 } |
|
| 833 } |
605 } |
| 834 |
606 |
| 835 static size_t cx_arl_insert_unique( |
607 static size_t cx_arl_insert_unique( |
| 836 struct cx_list_s *list, |
608 struct cx_list_s *list, |
| 837 const void *sorted_data, |
609 const void *sorted_data, |
| 838 size_t n |
610 size_t n |
| 839 ) { |
611 ) { |
| 840 // get a correctly typed pointer to the list |
612 return cx_arl_insert_sorted_impl(list, sorted_data, n, false); |
| 841 cx_array_list *arl = (cx_array_list *) list; |
|
| 842 |
|
| 843 if (cx_array_insert_unique( |
|
| 844 &arl->data, |
|
| 845 &list->collection.size, |
|
| 846 &arl->capacity, |
|
| 847 list->collection.cmpfunc, |
|
| 848 sorted_data, |
|
| 849 list->collection.elem_size, |
|
| 850 n, |
|
| 851 &arl->reallocator |
|
| 852 )) { |
|
| 853 // array list implementation is "all or nothing" |
|
| 854 return 0; // LCOV_EXCL_LINE |
|
| 855 } else { |
|
| 856 return n; |
|
| 857 } |
|
| 858 } |
613 } |
| 859 |
614 |
| 860 static void *cx_arl_insert_element( |
615 static void *cx_arl_insert_element( |
| 861 struct cx_list_s *list, |
616 struct cx_list_s *list, |
| 862 size_t index, |
617 size_t index, |