src/array_list.c

Sun, 23 Nov 2025 13:15:19 +0100

author
Mike Becker <universe@uap-core.de>
date
Sun, 23 Nov 2025 13:15:19 +0100
changeset 1508
dfc0ddd9571e
parent 1507
f4010cda9a2a
child 1509
0437871200d6
permissions
-rw-r--r--

optimize sorted insertion by using the infimum instead of the supremum

The reason is that the supremum returns the equal element with the smallest index, and we want the largest.
Therefore, we use the infimum, which already gives us the largest index when there are equal elements, and increase the index by one. The infimum is also guaranteed to exist in that case.

/*
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
 *
 * Copyright 2021 Mike Becker, Olaf Wintermann All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 *   1. Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *   2. Redistributions in binary form must reproduce the above copyright
 *      notice, this list of conditions and the following disclaimer in the
 *      documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "cx/array_list.h"
#include "cx/compare.h"
#include <assert.h>
#include <string.h>
#include <errno.h>

// Default array reallocator

static void *cx_array_default_realloc(
        void *array,
        cx_attr_unused size_t old_capacity,
        size_t new_capacity,
        size_t elem_size,
        cx_attr_unused CxArrayReallocator *alloc
) {
    size_t n;
    // LCOV_EXCL_START
    if (cx_szmul(new_capacity, elem_size, &n)) {
        errno = EOVERFLOW;
        return NULL;
    } // LCOV_EXCL_STOP
    return cxReallocDefault(array, n);
}

CxArrayReallocator cx_array_default_reallocator_impl = {
        cx_array_default_realloc, NULL, NULL
};

CxArrayReallocator *cx_array_default_reallocator = &cx_array_default_reallocator_impl;

// Stack-aware array reallocator

static void *cx_array_advanced_realloc(
        void *array,
        size_t old_capacity,
        size_t new_capacity,
        size_t elem_size,
        cx_attr_unused CxArrayReallocator *alloc
) {
    // check for overflow
    size_t n;
    // LCOV_EXCL_START
    if (cx_szmul(new_capacity, elem_size, &n)) {
        errno = EOVERFLOW;
        return NULL;
    } // LCOV_EXCL_STOP

    // retrieve the pointer to the actual allocator
    const CxAllocator *al = alloc->allocator;

    // check if the array is still located on the stack
    void *newmem;
    if (array == alloc->stack_ptr) {
        newmem = cxMalloc(al, n);
        if (newmem != NULL && array != NULL) {
            memcpy(newmem, array, old_capacity*elem_size);
        }
    } else {
        newmem = cxRealloc(al, array, n);
    }
    return newmem;
}

struct cx_array_reallocator_s cx_array_reallocator(
        const struct cx_allocator_s *allocator,
        const void *stack_ptr
) {
    if (allocator == NULL) {
        allocator = cxDefaultAllocator;
    }
    return (struct cx_array_reallocator_s) {
            cx_array_advanced_realloc,
            allocator, stack_ptr,
    };
}

// LOW LEVEL ARRAY LIST FUNCTIONS

/**
 * Intelligently calculates a new capacity, reserving some more
 * elements than required to prevent too many allocations.
 *
 * @param current_capacity the current capacity of the array
 * @param needed_capacity the required capacity of the array
 * @return the new capacity
 */
static size_t cx_array_grow_capacity(
    size_t current_capacity,
    size_t needed_capacity
) {
    if (current_capacity >= needed_capacity) {
        return current_capacity;
    }
    size_t cap = needed_capacity;
    size_t alignment;
    if (cap < 128) alignment = 16;
    else if (cap < 1024) alignment = 64;
    else if (cap < 8192) alignment = 512;
    else alignment = 1024;
    return cap - (cap % alignment) + alignment;
}

int cx_array_reserve(
        void **array,
        void *size,
        void *capacity,
        unsigned width,
        size_t elem_size,
        size_t elem_count,
        CxArrayReallocator *reallocator
) {
    // assert pointers
    assert(array != NULL);
    assert(size != NULL);
    assert(capacity != NULL);

    // default reallocator
    if (reallocator == NULL) {
        reallocator = cx_array_default_reallocator;
    }

    // determine size and capacity
    size_t oldcap;
    size_t oldsize;
    size_t max_size;
    if (width == 0 || width == sizeof(size_t)) {
        oldcap = *(size_t*) capacity;
        oldsize = *(size_t*) size;
        max_size = SIZE_MAX;
    } else if (width == sizeof(uint16_t)) {
        oldcap = *(uint16_t*) capacity;
        oldsize = *(uint16_t*) size;
        max_size = UINT16_MAX;
    } else if (width == sizeof(uint8_t)) {
        oldcap = *(uint8_t*) capacity;
        oldsize = *(uint8_t*) size;
        max_size = UINT8_MAX;
    }
#if CX_WORDSIZE == 64
    else if (width == sizeof(uint32_t)) {
        oldcap = *(uint32_t*) capacity;
        oldsize = *(uint32_t*) size;
        max_size = UINT32_MAX;
    }
#endif
    else {
        errno = EINVAL;
        return 1;
    }

    // assert that the array is allocated when it has capacity
    assert(*array != NULL || oldcap == 0);

    // check for overflow
    if (elem_count > max_size - oldsize) {
        errno = EOVERFLOW;
        return 1;
    }

    // determine new capacity
    size_t newcap = oldsize + elem_count;

    // reallocate if possible
    if (newcap > oldcap) {
        void *newmem = reallocator->realloc(
                *array, oldcap, newcap, elem_size, reallocator
        );
        if (newmem == NULL) {
            return 1; // LCOV_EXCL_LINE
        }

        // store new pointer
        *array = newmem;

        // store new capacity
        if (width == 0 || width == sizeof(size_t)) {
            *(size_t*) capacity = newcap;
        } else if (width == sizeof(uint16_t)) {
            *(uint16_t*) capacity = (uint16_t) newcap;
        } else if (width == sizeof(uint8_t)) {
            *(uint8_t*) capacity = (uint8_t) newcap;
        }
#if CX_WORDSIZE == 64
        else if (width == sizeof(uint32_t)) {
            *(uint32_t*) capacity = (uint32_t) newcap;
        }
#endif
    }

    return 0;
}

int cx_array_copy(
        void **target,
        void *size,
        void *capacity,
        unsigned width,
        size_t index,
        const void *src,
        size_t elem_size,
        size_t elem_count,
        CxArrayReallocator *reallocator
) {
    // assert pointers
    assert(target != NULL);
    assert(size != NULL);
    assert(capacity != NULL);
    assert(src != NULL);

    // default reallocator
    if (reallocator == NULL) {
        reallocator = cx_array_default_reallocator;
    }

    // determine size and capacity
    size_t oldcap;
    size_t oldsize;
    size_t max_size;
    if (width == 0 || width == sizeof(size_t)) {
        oldcap = *(size_t*) capacity;
        oldsize = *(size_t*) size;
        max_size = SIZE_MAX;
    } else if (width == sizeof(uint16_t)) {
        oldcap = *(uint16_t*) capacity;
        oldsize = *(uint16_t*) size;
        max_size = UINT16_MAX;
    } else if (width == sizeof(uint8_t)) {
        oldcap = *(uint8_t*) capacity;
        oldsize = *(uint8_t*) size;
        max_size = UINT8_MAX;
    }
#if CX_WORDSIZE == 64
    else if (width == sizeof(uint32_t)) {
        oldcap = *(uint32_t*) capacity;
        oldsize = *(uint32_t*) size;
        max_size = UINT32_MAX;
    }
#endif
    else {
        errno = EINVAL;
        return 1;
    }

    // assert that the array is allocated when it has capacity
    assert(*target != NULL || oldcap == 0);

    // check for overflow
    if (index > max_size || elem_count > max_size - index) {
        errno = EOVERFLOW;
        return 1;
    }

    // check if resize is required
    const size_t minsize = index + elem_count;
    const size_t newsize = oldsize < minsize ? minsize : oldsize;

    // reallocate if necessary
    const size_t newcap = cx_array_grow_capacity(oldcap, newsize);
    if (newcap > oldcap) {
        // check if we need to repair the src pointer
        uintptr_t targetaddr = (uintptr_t) *target;
        uintptr_t srcaddr = (uintptr_t) src;
        bool repairsrc = targetaddr <= srcaddr
                         && srcaddr < targetaddr + oldcap * elem_size;

        // perform reallocation
        void *newmem = reallocator->realloc(
                *target, oldcap, newcap, elem_size, reallocator
        );
        if (newmem == NULL) {
            return 1; // LCOV_EXCL_LINE
        }

        // repair src pointer, if necessary
        if (repairsrc) {
            src = ((char *) newmem) + (srcaddr - targetaddr);
        }

        // store new pointer
        *target = newmem;
    }

    // determine target pointer
    char *start = *target;
    start += index * elem_size;

    // copy elements and set new size
    // note: no overflow check here, b/c we cannot get here w/o allocation
    memmove(start, src, elem_count * elem_size);

    // if any of size or capacity changed, store them back
    if (newsize != oldsize || newcap != oldcap) {
        if (width == 0 || width == sizeof(size_t)) {
            *(size_t*) capacity = newcap;
            *(size_t*) size = newsize;
        } else if (width == sizeof(uint16_t)) {
            *(uint16_t*) capacity = (uint16_t) newcap;
            *(uint16_t*) size = (uint16_t) newsize;
        } else if (width == sizeof(uint8_t)) {
            *(uint8_t*) capacity = (uint8_t) newcap;
            *(uint8_t*) size = (uint8_t) newsize;
        }
#if CX_WORDSIZE == 64
        else if (width == sizeof(uint32_t)) {
            *(uint32_t*) capacity = (uint32_t) newcap;
            *(uint32_t*) size = (uint32_t) newsize;
        }
#endif
    }

    // return successfully
    return 0;
}

static int cx_array_insert_sorted_impl(
        void **target,
        size_t *size,
        size_t *capacity,
        cx_compare_func cmp_func,
        const void *sorted_data,
        size_t elem_size,
        size_t elem_count,
        CxArrayReallocator *reallocator,
        bool allow_duplicates
) {
    // assert pointers
    assert(target != NULL);
    assert(size != NULL);
    assert(capacity != NULL);
    assert(cmp_func != NULL);
    assert(sorted_data != NULL);

    // default reallocator
    if (reallocator == NULL) {
        reallocator = cx_array_default_reallocator;
    }

    // corner case
    if (elem_count == 0) return 0;

    // overflow check
    // LCOV_EXCL_START
    if (elem_count > SIZE_MAX - *size) {
        errno = EOVERFLOW;
        return 1;
    }
    // LCOV_EXCL_STOP

    // store some counts
    const size_t old_size = *size;
    const size_t old_capacity = *capacity;
    // the necessary capacity is the worst case assumption, including duplicates
    const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + elem_count);

    // if we need more than we have, try a reallocation
    if (needed_capacity > old_capacity) {
        void *new_mem = reallocator->realloc(
                *target, old_capacity, needed_capacity, elem_size, reallocator
        );
        if (new_mem == NULL) {
            // give it up right away, there is no contract
            // that requires us to insert as much as we can
            return 1;  // LCOV_EXCL_LINE
        }
        *target = new_mem;
        *capacity = needed_capacity;
    }

    // now we have guaranteed that we can insert everything
    size_t new_size = old_size + elem_count;
    *size = new_size;

    // declare the source and destination indices/pointers
    size_t si = 0, di = 0;
    const char *src = sorted_data;
    char *dest = *target;

    // find the first insertion point
    di = cx_array_binary_search_sup(dest, old_size, elem_size, src, cmp_func);
    dest += di * elem_size;

    // move the remaining elements in the array completely to the right
    // we will call it the "buffer" for parked elements
    size_t buf_size = old_size - di;
    size_t bi = new_size - buf_size;
    char *bptr = ((char *) *target) + bi * elem_size;
    memmove(bptr, dest, buf_size * elem_size);

    // while there are both source and buffered elements left,
    // copy them interleaving
    while (si < elem_count && bi < new_size) {
        // determine how many source elements can be inserted.
        // the first element that shall not be inserted is the smallest element
        // that is strictly larger than the first buffered element
        // (located at the index of the infimum plus one).
        // the infimum is guaranteed to exist:
        // - if all src elements are larger,
        //   there is no buffer, and this loop is skipped
        // - if any src element is smaller or equal, the infimum exists
        // - when all src elements that are smaller are copied, the second part
        //   of this loop body will copy the remaining buffer (emptying it)
        // Therefore, the buffer can never contain an element that is smaller
        // than any element in the source and the infimum exists.
        size_t copy_len, bytes_copied;
        copy_len = cx_array_binary_search_inf(
            src, elem_count - si, elem_size, bptr, cmp_func
        );
        copy_len++;

        // copy the source elements
        if (copy_len > 0) {
            if (allow_duplicates) {
                // we can copy the entire chunk
                bytes_copied = copy_len * elem_size;
                memcpy(dest, src, bytes_copied);
                dest += bytes_copied;
                src += bytes_copied;
                si += copy_len;
                di += copy_len;
            } else {
                // first, check the end of the source chunk
                // for being a duplicate of the bptr
                const char *end_of_src = src + (copy_len - 1) * elem_size;
                size_t skip_len = 0;
                while (copy_len > 0 && cmp_func(bptr, end_of_src) == 0) {
                    end_of_src -= elem_size;
                    skip_len++;
                    copy_len--;
                }
                char *last = dest == *target ? NULL : dest - elem_size;
                // then iterate through the source chunk
                // and skip all duplicates with the last element in the array
                size_t more_skipped = 0;
                for (unsigned j = 0; j < copy_len; j++) {
                    if (last != NULL && cmp_func(last, src) == 0) {
                        // duplicate - skip
                        src += elem_size;
                        si++;
                        more_skipped++;
                    } else {
                        memcpy(dest, src, elem_size);
                        src += elem_size;
                        last = dest;
                        dest += elem_size;
                        si++;
                        di++;
                    }
                }
                // skip the previously identified elements as well
                src += skip_len * elem_size;
                si += skip_len;
                skip_len += more_skipped;
                // reduce the actual size by the number of skipped elements
                *size -= skip_len;
            }
        }

        // when all source elements are in place, we are done
        if (si >= elem_count) break;

        // determine how many buffered elements need to be restored
        copy_len = cx_array_binary_search_sup(
                bptr,
                new_size - bi,
                elem_size,
                src,
                cmp_func
        );

        // restore the buffered elements
        bytes_copied = copy_len * elem_size;
        memmove(dest, bptr, bytes_copied);
        dest += bytes_copied;
        bptr += bytes_copied;
        di += copy_len;
        bi += copy_len;
    }

    // still source elements left?
    if (si < elem_count) {
        if (allow_duplicates) {
            // duplicates allowed or nothing inserted yet: simply copy everything
            memcpy(dest, src, elem_size * (elem_count - si));
        } else {
            if (dest != *target) {
                // skip all source elements that equal the last element
                char *last = dest - elem_size;
                while (si < elem_count) {
                    if (last != NULL && cmp_func(last, src) == 0) {
                        src += elem_size;
                        si++;
                        (*size)--;
                    } else {
                        break;
                    }
                }
            }
            // we must check the elements in the chunk one by one
            while (si < elem_count) {
                // find a chain of elements that can be copied
                size_t copy_len = 1, skip_len = 0;
                {
                    const char *left_src = src;
                    while (si + copy_len < elem_count) {
                        const char *right_src = left_src + elem_size;
                        int d = cmp_func(left_src,  right_src);
                        if (d < 0) {
                            if (skip_len > 0) {
                                // new larger element found;
                                // handle it in the next cycle
                                break;
                            }
                            left_src += elem_size;
                            copy_len++;
                        } else if (d == 0) {
                            left_src += elem_size;
                            skip_len++;
                        } else {
                            break;
                        }
                    }
                }
                size_t bytes_copied = copy_len * elem_size;
                memcpy(dest, src, bytes_copied);
                dest += bytes_copied;
                src += bytes_copied + skip_len * elem_size;
                si += copy_len + skip_len;
                di += copy_len;
                *size -= skip_len;
            }
        }
    }

    // buffered elements need to be moved when we skipped duplicates
    size_t total_skipped = new_size - *size;
    if (bi < new_size && total_skipped > 0) {
        // move the remaining buffer to the end of the array
        memmove(dest, bptr, elem_size * (new_size - bi));
    }

    return 0;
}

int cx_array_insert_sorted(
        void **target,
        size_t *size,
        size_t *capacity,
        cx_compare_func cmp_func,
        const void *sorted_data,
        size_t elem_size,
        size_t elem_count,
        CxArrayReallocator *reallocator
) {
    return cx_array_insert_sorted_impl(target, size, capacity,
        cmp_func, sorted_data, elem_size, elem_count, reallocator, true);
}

int cx_array_insert_unique(
        void **target,
        size_t *size,
        size_t *capacity,
        cx_compare_func cmp_func,
        const void *sorted_data,
        size_t elem_size,
        size_t elem_count,
        CxArrayReallocator *reallocator
) {
    return cx_array_insert_sorted_impl(target, size, capacity,
        cmp_func, sorted_data, elem_size, elem_count, reallocator, false);
}

// implementation that finds ANY index
static size_t cx_array_binary_search_inf_impl(
        const void *arr,
        size_t size,
        size_t elem_size,
        const void *elem,
        cx_compare_func cmp_func
) {
    // special case: empty array
    if (size == 0) return 0;

    // declare a variable that will contain the compare results
    int result;

    // cast the array pointer to something we can use offsets with
    const char *array = arr;

    // check the first array element
    result = cmp_func(elem, array);
    if (result < 0) {
        return size;
    } else if (result == 0) {
        return 0;
    }

    // special case: there is only one element and that is smaller
    if (size == 1) return 0;

    // check the last array element
    result = cmp_func(elem, array + elem_size * (size - 1));
    if (result >= 0) {
        return size - 1;
    }

    // the element is now guaranteed to be somewhere in the list
    // so start the binary search
    size_t left_index = 1;
    size_t right_index = size - 1;
    size_t pivot_index;

    while (left_index <= right_index) {
        pivot_index = left_index + (right_index - left_index) / 2;
        const char *arr_elem = array + pivot_index * elem_size;
        result = cmp_func(elem, arr_elem);
        if (result == 0) {
            // found it!
            return pivot_index;
        } else if (result < 0) {
            // element is smaller than pivot, continue search left
            right_index = pivot_index - 1;
        } else {
            // element is larger than pivot, continue search right
            left_index = pivot_index + 1;
        }
    }

    // report the largest upper bound
    return result < 0 ? (pivot_index - 1) : pivot_index;
}

size_t cx_array_binary_search_inf(
        const void *arr,
        size_t size,
        size_t elem_size,
        const void *elem,
        cx_compare_func cmp_func
) {
    size_t index = cx_array_binary_search_inf_impl(
        arr, size, elem_size, elem, cmp_func);
    // in case of equality, report the largest index
    const char *e = ((const char *) arr) + (index + 1) * elem_size;
    while (index + 1 < size && cmp_func(e, elem) == 0) {
        e += elem_size;
        index++;
    }
    return index;
}

size_t cx_array_binary_search(
        const void *arr,
        size_t size,
        size_t elem_size,
        const void *elem,
        cx_compare_func cmp_func
) {
    size_t index = cx_array_binary_search_inf(
            arr, size, elem_size, elem, cmp_func
    );
    if (index < size &&
            cmp_func(((const char *) arr) + index * elem_size, elem) == 0) {
        return index;
    } else {
        return size;
    }
}

size_t cx_array_binary_search_sup(
        const void *arr,
        size_t size,
        size_t elem_size,
        const void *elem,
        cx_compare_func cmp_func
) {
    size_t index = cx_array_binary_search_inf_impl(
            arr, size, elem_size, elem, cmp_func
    );
    const char *e = ((const char *) arr) + index * elem_size;
    if (index == size) {
        // no infimum means the first element is supremum
        return 0;
    } else if (cmp_func(e, elem) == 0) {
        // found an equal element, search the smallest index
        e -= elem_size; // e now contains the element at index-1
        while (index > 0 && cmp_func(e, elem) == 0) {
            e -= elem_size;
            index--;
        }
        return index;
    } else {
        // we already have the largest index of the infimum (by design)
        // the next element is the supremum (or there is no supremum)
        return index + 1;
    }
}

#ifndef CX_ARRAY_SWAP_SBO_SIZE
#define CX_ARRAY_SWAP_SBO_SIZE 128
#endif
const unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE;

void cx_array_swap(
        void *arr,
        size_t elem_size,
        size_t idx1,
        size_t idx2
) {
    assert(arr != NULL);

    // short circuit
    if (idx1 == idx2) return;

    char sbo_mem[CX_ARRAY_SWAP_SBO_SIZE];
    void *tmp;

    // decide if we can use the local buffer
    if (elem_size > CX_ARRAY_SWAP_SBO_SIZE) {
        tmp = cxMallocDefault(elem_size);
        // we don't want to enforce error handling
        if (tmp == NULL) abort();
    } else {
        tmp = sbo_mem;
    }

    // calculate memory locations
    char *left = arr, *right = arr;
    left += idx1 * elem_size;
    right += idx2 * elem_size;

    // three-way swap
    memcpy(tmp, left, elem_size);
    memcpy(left, right, elem_size);
    memcpy(right, tmp, elem_size);

    // free dynamic memory, if it was needed
    if (tmp != sbo_mem) {
        cxFreeDefault(tmp);
    }
}

// HIGH LEVEL ARRAY LIST FUNCTIONS

typedef struct {
    struct cx_list_s base;
    void *data;
    size_t capacity;
    CxArrayReallocator reallocator;
} cx_array_list;

static void cx_arl_destructor(struct cx_list_s *list) {
    cx_array_list *arl = (cx_array_list *) list;

    char *ptr = arl->data;

    if (list->collection.simple_destructor) {
        for (size_t i = 0; i < list->collection.size; i++) {
            cx_invoke_simple_destructor(list, ptr);
            ptr += list->collection.elem_size;
        }
    }
    if (list->collection.advanced_destructor) {
        for (size_t i = 0; i < list->collection.size; i++) {
            cx_invoke_advanced_destructor(list, ptr);
            ptr += list->collection.elem_size;
        }
    }

    cxFree(list->collection.allocator, arl->data);
    cxFree(list->collection.allocator, list);
}

static size_t cx_arl_insert_array(
        struct cx_list_s *list,
        size_t index,
        const void *array,
        size_t n
) {
    // out of bounds and special case check
    if (index > list->collection.size || n == 0) return 0;

    // get a correctly typed pointer to the list
    cx_array_list *arl = (cx_array_list *) list;

    // guarantee enough capacity
    if (arl->capacity < list->collection.size + n) {
        const size_t new_capacity = cx_array_grow_capacity(arl->capacity,list->collection.size + n);
        if (cxReallocateArray(
                list->collection.allocator,
                &arl->data, new_capacity,
                list->collection.elem_size)
        ) {
            return 0; // LCOV_EXCL_LINE
        }
        arl->capacity = new_capacity;
    }

    // determine insert position
    char *arl_data = arl->data;
    char *insert_pos = arl_data + index * list->collection.elem_size;

    // do we need to move some elements?
    if (index < list->collection.size) {
        size_t elems_to_move = list->collection.size - index;
        char *target = insert_pos + n * list->collection.elem_size;
        memmove(target, insert_pos, elems_to_move * list->collection.elem_size);
    }

    // place the new elements, if any
    if (array != NULL) {
        memcpy(insert_pos, array, n * list->collection.elem_size);
    }
    list->collection.size += n;

    return n;
}

static size_t cx_arl_insert_sorted(
        struct cx_list_s *list,
        const void *sorted_data,
        size_t n
) {
    // get a correctly typed pointer to the list
    cx_array_list *arl = (cx_array_list *) list;

    if (cx_array_insert_sorted(
            &arl->data,
            &list->collection.size,
            &arl->capacity,
            list->collection.cmpfunc,
            sorted_data,
            list->collection.elem_size,
            n,
            &arl->reallocator
    )) {
        // array list implementation is "all or nothing"
        return 0;  // LCOV_EXCL_LINE
    } else {
        return n;
    }
}

static size_t cx_arl_insert_unique(
        struct cx_list_s *list,
        const void *sorted_data,
        size_t n
) {
    // get a correctly typed pointer to the list
    cx_array_list *arl = (cx_array_list *) list;

    if (cx_array_insert_unique(
            &arl->data,
            &list->collection.size,
            &arl->capacity,
            list->collection.cmpfunc,
            sorted_data,
            list->collection.elem_size,
            n,
            &arl->reallocator
    )) {
        // array list implementation is "all or nothing"
        return 0;  // LCOV_EXCL_LINE
    } else {
        return n;
    }
}

static void *cx_arl_insert_element(
        struct cx_list_s *list,
        size_t index,
        const void *element
) {
    if (cx_arl_insert_array(list, index, element, 1) == 1) {
        return ((char*)((cx_array_list *) list)->data) + index * list->collection.elem_size;
    } else {
        return NULL;
    }
}

static int cx_arl_insert_iter(
        struct cx_iterator_s *iter,
        const void *elem,
        int prepend
) {
    struct cx_list_s *list = iter->src_handle;
    if (iter->index < list->collection.size) {
        if (cx_arl_insert_element(list,
                iter->index + 1 - prepend, elem) == NULL) {
            return 1; // LCOV_EXCL_LINE
        }
        iter->elem_count++;
        if (prepend != 0) {
            iter->index++;
            iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size;
        }
        return 0;
    } else {
        if (cx_arl_insert_element(list, list->collection.size, elem) == NULL) {
            return 1;  // LCOV_EXCL_LINE
        }
        iter->elem_count++;
        iter->index = list->collection.size;
        return 0;
    }
}

static size_t cx_arl_remove(
        struct cx_list_s *list,
        size_t index,
        size_t num,
        void *targetbuf
) {
    cx_array_list *arl = (cx_array_list *) list;

    // out-of-bounds check
    size_t remove;
    if (index >= list->collection.size) {
        remove = 0;
    } else if (index + num > list->collection.size) {
        remove = list->collection.size - index;
    } else {
        remove = num;
    }

    // easy exit
    if (remove == 0) return 0;

    // destroy or copy contents
    if (targetbuf == NULL) {
        for (size_t idx = index; idx < index + remove; idx++) {
            cx_invoke_destructor(
                    list,
                    ((char *) arl->data) + idx * list->collection.elem_size
            );
        }
    } else {
        memcpy(
                targetbuf,
                ((char *) arl->data) + index * list->collection.elem_size,
                remove * list->collection.elem_size
        );
    }

    // short-circuit removal of last elements
    if (index + remove == list->collection.size) {
        list->collection.size -= remove;
        return remove;
    }

    // just move the elements to the left
    cx_array_copy(
            &arl->data,
            &list->collection.size,
            &arl->capacity,
            0,
            index,
            ((char *) arl->data) + (index + remove) * list->collection.elem_size,
            list->collection.elem_size,
            list->collection.size - index - remove,
            &arl->reallocator
    );

    // decrease the size
    list->collection.size -= remove;

    return remove;
}

static void cx_arl_clear(struct cx_list_s *list) {
    if (list->collection.size == 0) return;

    cx_array_list *arl = (cx_array_list *) list;
    char *ptr = arl->data;

    if (list->collection.simple_destructor) {
        for (size_t i = 0; i < list->collection.size; i++) {
            cx_invoke_simple_destructor(list, ptr);
            ptr += list->collection.elem_size;
        }
    }
    if (list->collection.advanced_destructor) {
        for (size_t i = 0; i < list->collection.size; i++) {
            cx_invoke_advanced_destructor(list, ptr);
            ptr += list->collection.elem_size;
        }
    }

    memset(arl->data, 0, list->collection.size * list->collection.elem_size);
    list->collection.size = 0;
}

static int cx_arl_swap(
        struct cx_list_s *list,
        size_t i,
        size_t j
) {
    if (i >= list->collection.size || j >= list->collection.size) return 1;
    cx_array_list *arl = (cx_array_list *) list;
    cx_array_swap(arl->data, list->collection.elem_size, i, j);
    return 0;
}

static void *cx_arl_at(
        const struct cx_list_s *list,
        size_t index
) {
    if (index < list->collection.size) {
        const cx_array_list *arl = (const cx_array_list *) list;
        char *space = arl->data;
        return space + index * list->collection.elem_size;
    } else {
        return NULL;
    }
}

static size_t cx_arl_find_remove(
        struct cx_list_s *list,
        const void *elem,
        bool remove
) {
    assert(list != NULL);
    assert(list->collection.cmpfunc != NULL);
    if (list->collection.size == 0) return 0;
    char *cur = ((const cx_array_list *) list)->data;

    // optimize with binary search, when sorted
    if (list->collection.sorted) {
        size_t i = cx_array_binary_search(
            cur,
            list->collection.size,
            list->collection.elem_size,
            elem,
            list->collection.cmpfunc
        );
        if (remove && i < list->collection.size) {
            cx_arl_remove(list, i, 1, NULL);
        }
        return i;
    }

    // fallback: linear search
    for (size_t i = 0; i < list->collection.size; i++) {
        if (0 == list->collection.cmpfunc(elem, cur)) {
            if (remove) {
                cx_arl_remove(list, i, 1, NULL);
            }
            return i;
        }
        cur += list->collection.elem_size;
    }
    return list->collection.size;
}

static void cx_arl_sort(struct cx_list_s *list) {
    assert(list->collection.cmpfunc != NULL);
    qsort(((cx_array_list *) list)->data,
          list->collection.size,
          list->collection.elem_size,
          list->collection.cmpfunc
    );
}

static int cx_arl_compare(
        const struct cx_list_s *list,
        const struct cx_list_s *other
) {
    assert(list->collection.cmpfunc != NULL);
    if (list->collection.size == other->collection.size) {
        const char *left = ((const cx_array_list *) list)->data;
        const char *right = ((const cx_array_list *) other)->data;
        for (size_t i = 0; i < list->collection.size; i++) {
            int d = list->collection.cmpfunc(left, right);
            if (d != 0) {
                return d;
            }
            left += list->collection.elem_size;
            right += other->collection.elem_size;
        }
        return 0;
    } else {
        return list->collection.size < other->collection.size ? -1 : 1;
    }
}

static void cx_arl_reverse(struct cx_list_s *list) {
    if (list->collection.size < 2) return;
    void *data = ((const cx_array_list *) list)->data;
    size_t half = list->collection.size / 2;
    for (size_t i = 0; i < half; i++) {
        cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i);
    }
}

static bool cx_arl_iter_valid(const void *it) {
    const struct cx_iterator_s *iter = it;
    const struct cx_list_s *list = iter->src_handle;
    return iter->index < list->collection.size;
}

static void *cx_arl_iter_current(const void *it) {
    const struct cx_iterator_s *iter = it;
    return iter->elem_handle;
}

static void cx_arl_iter_next(void *it) {
    struct cx_iterator_s *iter = it;
    if (iter->base.remove) {
        iter->base.remove = false;
        cx_arl_remove(iter->src_handle, iter->index, 1, NULL);
        iter->elem_count--;
    } else {
        iter->index++;
        iter->elem_handle =
                ((char *) iter->elem_handle)
                + ((const struct cx_list_s *) iter->src_handle)->collection.elem_size;
    }
}

static void cx_arl_iter_prev(void *it) {
    struct cx_iterator_s *iter = it;
    if (iter->base.remove) {
        iter->base.remove = false;
        cx_arl_remove(iter->src_handle, iter->index, 1, NULL);
        iter->elem_count--;
    }
    iter->index--;
    cx_array_list *list = iter->src_handle;
    if (iter->index < list->base.collection.size) {
        iter->elem_handle = ((char *) list->data)
                            + iter->index * list->base.collection.elem_size;
    }
}

static int cx_arl_change_capacity(
        struct cx_list_s *list,
        size_t new_capacity
) {
    cx_array_list *arl = (cx_array_list *)list;
    return cxReallocateArray(list->collection.allocator,
        &arl->data, new_capacity, list->collection.elem_size);
}

static struct cx_iterator_s cx_arl_iterator(
        const struct cx_list_s *list,
        size_t index,
        bool backwards
) {
    struct cx_iterator_s iter;

    iter.index = index;
    iter.src_handle = (void*)list;
    iter.elem_handle = cx_arl_at(list, index);
    iter.elem_size = list->collection.elem_size;
    iter.elem_count = list->collection.size;
    iter.base.valid = cx_arl_iter_valid;
    iter.base.current = cx_arl_iter_current;
    iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next;
    iter.base.remove = false;
    iter.base.allow_remove = true;

    return iter;
}

static cx_list_class cx_array_list_class = {
        cx_arl_destructor,
        cx_arl_insert_element,
        cx_arl_insert_array,
        cx_arl_insert_sorted,
        cx_arl_insert_unique,
        cx_arl_insert_iter,
        cx_arl_remove,
        cx_arl_clear,
        cx_arl_swap,
        cx_arl_at,
        cx_arl_find_remove,
        cx_arl_sort,
        cx_arl_compare,
        cx_arl_reverse,
        cx_arl_change_capacity,
        cx_arl_iterator,
};

CxList *cxArrayListCreate(
        const CxAllocator *allocator,
        cx_compare_func comparator,
        size_t elem_size,
        size_t initial_capacity
) {
    if (allocator == NULL) {
        allocator = cxDefaultAllocator;
    }

    cx_array_list *list = cxCalloc(allocator, 1, sizeof(cx_array_list));
    if (list == NULL) return NULL;
    cx_list_init((CxList*)list, &cx_array_list_class,
        allocator, comparator, elem_size);
    list->capacity = initial_capacity;

    // allocate the array after the real elem_size is known
    list->data = cxCalloc(allocator, initial_capacity,
        list->base.collection.elem_size);
    if (list->data == NULL) { // LCOV_EXCL_START
        cxFree(allocator, list);
        return NULL;
    } // LCOV_EXCL_STOP

    // configure the reallocator
    list->reallocator = cx_array_reallocator(allocator, NULL);

    return (CxList *) list;
}

mercurial