43 if (pool->capacity > newcap |
43 if (pool->capacity > newcap |
44 || cx_szmul(newcap, sizeof(void*), &newmsize)) { |
44 || cx_szmul(newcap, sizeof(void*), &newmsize)) { |
45 errno = EOVERFLOW; |
45 errno = EOVERFLOW; |
46 return 1; |
46 return 1; |
47 } // LCOV_EXCL_STOP |
47 } // LCOV_EXCL_STOP |
48 void **newdata = cxReallocDefault(pool->data, newmsize); |
48 void **newdata = cxRealloc(pool->base_allocator, pool->data, newmsize); |
49 if (newdata == NULL) return 1; |
49 if (newdata == NULL) return 1; |
50 pool->data = newdata; |
50 pool->data = newdata; |
51 pool->capacity = newcap; |
51 pool->capacity = newcap; |
52 return 0; |
52 return 0; |
53 } |
53 } |
64 if (pool->registered_capacity > newcap || cx_szmul(newcap, |
64 if (pool->registered_capacity > newcap || cx_szmul(newcap, |
65 sizeof(struct cx_mempool_foreign_memory_s), &newmsize)) { |
65 sizeof(struct cx_mempool_foreign_memory_s), &newmsize)) { |
66 errno = EOVERFLOW; |
66 errno = EOVERFLOW; |
67 return 1; |
67 return 1; |
68 } // LCOV_EXCL_STOP |
68 } // LCOV_EXCL_STOP |
69 void *newdata = cxReallocDefault(pool->registered, newmsize); |
69 void *newdata = cxRealloc(pool->base_allocator, pool->registered, newmsize); |
70 if (newdata == NULL) return 1; |
70 if (newdata == NULL) return 1; |
71 pool->registered = newdata; |
71 pool->registered = newdata; |
72 pool->registered_capacity = newcap; |
72 pool->registered_capacity = newcap; |
73 return 0; |
73 return 0; |
74 } |
74 } |
82 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
82 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
83 return NULL; // LCOV_EXCL_LINE |
83 return NULL; // LCOV_EXCL_LINE |
84 } |
84 } |
85 |
85 |
86 struct cx_mempool_memory_s *mem = |
86 struct cx_mempool_memory_s *mem = |
87 cxMallocDefault(sizeof(struct cx_mempool_memory_s) + n); |
87 cxMalloc(pool->base_allocator, sizeof(struct cx_mempool_memory_s) + n); |
88 if (mem == NULL) return NULL; |
88 if (mem == NULL) return NULL; |
89 mem->destructor = NULL; |
89 mem->destructor = NULL; |
90 pool->data[pool->size] = mem; |
90 pool->data[pool->size] = mem; |
91 pool->size++; |
91 pool->size++; |
92 |
92 |
128 pool->destr(mem->c); |
128 pool->destr(mem->c); |
129 } |
129 } |
130 if (pool->destr2) { |
130 if (pool->destr2) { |
131 pool->destr2(pool->destr2_data, mem->c); |
131 pool->destr2(pool->destr2_data, mem->c); |
132 } |
132 } |
133 cxFreeDefault(mem); |
133 cxFree(pool->base_allocator, mem); |
134 size_t last_index = pool->size - 1; |
134 size_t last_index = pool->size - 1; |
135 if (i != last_index) { |
135 if (i != last_index) { |
136 pool->data[i] = pool->data[last_index]; |
136 pool->data[i] = pool->data[last_index]; |
137 pool->data[last_index] = NULL; |
137 pool->data[last_index] = NULL; |
138 } |
138 } |
159 |
159 |
160 const unsigned overhead = sizeof(struct cx_mempool_memory_s); |
160 const unsigned overhead = sizeof(struct cx_mempool_memory_s); |
161 struct cx_mempool_memory_s *mem = |
161 struct cx_mempool_memory_s *mem = |
162 (void *) (((char *) ptr) - overhead); |
162 (void *) (((char *) ptr) - overhead); |
163 struct cx_mempool_memory_s *newm = |
163 struct cx_mempool_memory_s *newm = |
164 cxReallocDefault(mem, n + overhead); |
164 cxRealloc(pool->base_allocator, mem, n + overhead); |
165 |
165 |
166 if (newm == NULL) return NULL; |
166 if (newm == NULL) return NULL; |
167 if (mem != newm) { |
167 if (mem != newm) { |
168 for (size_t i = 0; i < pool->size; i++) { |
168 for (size_t i = 0; i < pool->size; i++) { |
169 if (pool->data[i] == mem) { |
169 if (pool->data[i] == mem) { |
212 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
212 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
213 return NULL; // LCOV_EXCL_LINE |
213 return NULL; // LCOV_EXCL_LINE |
214 } |
214 } |
215 |
215 |
216 struct cx_mempool_memory2_s *mem = |
216 struct cx_mempool_memory2_s *mem = |
217 cxMallocDefault(sizeof(struct cx_mempool_memory2_s) + n); |
217 cxMalloc(pool->base_allocator, sizeof(struct cx_mempool_memory2_s) + n); |
218 if (mem == NULL) return NULL; |
218 if (mem == NULL) return NULL; |
219 mem->destructor = NULL; |
219 mem->destructor = NULL; |
220 mem->data = NULL; |
220 mem->data = NULL; |
221 pool->data[pool->size] = mem; |
221 pool->data[pool->size] = mem; |
222 pool->size++; |
222 pool->size++; |
259 pool->destr(mem->c); |
259 pool->destr(mem->c); |
260 } |
260 } |
261 if (pool->destr2) { |
261 if (pool->destr2) { |
262 pool->destr2(pool->destr2_data, mem->c); |
262 pool->destr2(pool->destr2_data, mem->c); |
263 } |
263 } |
264 cxFreeDefault(mem); |
264 cxFree(pool->base_allocator, mem); |
265 size_t last_index = pool->size - 1; |
265 size_t last_index = pool->size - 1; |
266 if (i != last_index) { |
266 if (i != last_index) { |
267 pool->data[i] = pool->data[last_index]; |
267 pool->data[i] = pool->data[last_index]; |
268 pool->data[last_index] = NULL; |
268 pool->data[last_index] = NULL; |
269 } |
269 } |
290 |
290 |
291 const unsigned overhead = sizeof(struct cx_mempool_memory2_s); |
291 const unsigned overhead = sizeof(struct cx_mempool_memory2_s); |
292 struct cx_mempool_memory2_s *mem = |
292 struct cx_mempool_memory2_s *mem = |
293 (void *) (((char *) ptr) - overhead); |
293 (void *) (((char *) ptr) - overhead); |
294 struct cx_mempool_memory2_s *newm = |
294 struct cx_mempool_memory2_s *newm = |
295 cxReallocDefault(mem, n + overhead); |
295 cxRealloc(pool->base_allocator, mem, n + overhead); |
296 |
296 |
297 if (newm == NULL) return NULL; |
297 if (newm == NULL) return NULL; |
298 if (mem != newm) { |
298 if (mem != newm) { |
299 for (size_t i = 0; i < pool->size; i++) { |
299 for (size_t i = 0; i < pool->size; i++) { |
300 if (pool->data[i] == mem) { |
300 if (pool->data[i] == mem) { |
343 |
343 |
344 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
344 if (cx_mempool_ensure_capacity(pool, pool->size + 1)) { |
345 return NULL; // LCOV_EXCL_LINE |
345 return NULL; // LCOV_EXCL_LINE |
346 } |
346 } |
347 |
347 |
348 void *mem = cxMallocDefault(n); |
348 void *mem = cxMalloc(pool->base_allocator, n); |
349 if (mem == NULL) return NULL; |
349 if (mem == NULL) return NULL; |
350 pool->data[pool->size] = mem; |
350 pool->data[pool->size] = mem; |
351 pool->size++; |
351 pool->size++; |
352 |
352 |
353 return mem; |
353 return mem; |
382 pool->destr(ptr); |
382 pool->destr(ptr); |
383 } |
383 } |
384 if (pool->destr2) { |
384 if (pool->destr2) { |
385 pool->destr2(pool->destr2_data, ptr); |
385 pool->destr2(pool->destr2_data, ptr); |
386 } |
386 } |
387 cxFreeDefault(ptr); |
387 cxFree(pool->base_allocator, ptr); |
388 size_t last_index = pool->size - 1; |
388 size_t last_index = pool->size - 1; |
389 if (i != last_index) { |
389 if (i != last_index) { |
390 pool->data[i] = pool->data[last_index]; |
390 pool->data[i] = pool->data[last_index]; |
391 pool->data[last_index] = NULL; |
391 pool->data[last_index] = NULL; |
392 } |
392 } |
408 if (n == 0) { |
408 if (n == 0) { |
409 cx_mempool_free_pure(p, ptr); |
409 cx_mempool_free_pure(p, ptr); |
410 return NULL; |
410 return NULL; |
411 } |
411 } |
412 struct cx_mempool_s *pool = p; |
412 struct cx_mempool_s *pool = p; |
413 void *newm = cxReallocDefault(ptr, n); |
413 void *newm = cxRealloc(pool->base_allocator, ptr, n); |
414 if (newm == NULL) return NULL; |
414 if (newm == NULL) return NULL; |
415 if (ptr != newm) { |
415 if (ptr != newm) { |
416 for (size_t i = 0; i < pool->size; i++) { |
416 for (size_t i = 0; i < pool->size; i++) { |
417 if (pool->data[i] == ptr) { |
417 if (pool->data[i] == ptr) { |
418 pool->data[i] = newm; |
418 pool->data[i] = newm; |
469 cx_mempool_free_all_advanced(pool); |
469 cx_mempool_free_all_advanced(pool); |
470 } else { |
470 } else { |
471 cx_mempool_free_all_pure(pool); |
471 cx_mempool_free_all_pure(pool); |
472 } |
472 } |
473 cx_mempool_free_foreign(pool); |
473 cx_mempool_free_foreign(pool); |
474 cxFreeDefault(pool->data); |
474 cxFree(pool->base_allocator, pool->data); |
475 cxFreeDefault(pool->registered); |
475 cxFree(pool->base_allocator, pool->registered); |
476 cxFreeDefault((void*) pool->allocator); |
476 cxFree(pool->base_allocator, (void*) pool->allocator); |
477 cxFreeDefault(pool); |
477 cxFree(pool->base_allocator, pool); |
478 } |
478 } |
479 |
479 |
480 void cxMempoolSetDestructor( |
480 void cxMempoolSetDestructor( |
481 void *ptr, |
481 void *ptr, |
482 cx_destructor_func func |
482 cx_destructor_func func |
567 cxFreeDefault(provided_allocator); |
567 cxFreeDefault(provided_allocator); |
568 return NULL; |
568 return NULL; |
569 } // LCOV_EXCL_STOP |
569 } // LCOV_EXCL_STOP |
570 |
570 |
571 provided_allocator->data = pool; |
571 provided_allocator->data = pool; |
|
572 *((const CxAllocator**)&pool->base_allocator) = cxDefaultAllocator; |
572 pool->allocator = provided_allocator; |
573 pool->allocator = provided_allocator; |
573 if (type == CX_MEMPOOL_TYPE_SIMPLE) { |
574 if (type == CX_MEMPOOL_TYPE_SIMPLE) { |
574 provided_allocator->cl = &cx_mempool_simple_allocator_class; |
575 provided_allocator->cl = &cx_mempool_simple_allocator_class; |
575 } else if (type == CX_MEMPOOL_TYPE_ADVANCED) { |
576 } else if (type == CX_MEMPOOL_TYPE_ADVANCED) { |
576 provided_allocator->cl = &cx_mempool_advanced_allocator_class; |
577 provided_allocator->cl = &cx_mempool_advanced_allocator_class; |
598 void cxMempoolGlobalDestructor2(CxMempool *pool, cx_destructor_func2 fnc, void *data) { |
599 void cxMempoolGlobalDestructor2(CxMempool *pool, cx_destructor_func2 fnc, void *data) { |
599 pool->destr2 = fnc; |
600 pool->destr2 = fnc; |
600 pool->destr2_data = data; |
601 pool->destr2_data = data; |
601 } |
602 } |
602 |
603 |
603 static void cx_mempool_free_transferred_allocator(void *al) { |
604 static void cx_mempool_free_transferred_allocator(void *base_al, void *al) { |
604 cxFreeDefault(al); |
605 cxFree(base_al, al); |
605 } |
606 } |
606 |
607 |
607 int cxMempoolTransfer( |
608 int cxMempoolTransfer( |
608 CxMempool *source, |
609 CxMempool *source, |
609 CxMempool *dest |
610 CxMempool *dest |
610 ) { |
611 ) { |
611 // safety checks |
612 // safety checks |
612 if (source == dest) return 1; |
613 if (source == dest) return 1; |
613 if (source->allocator->cl != dest->allocator->cl) return 1; |
614 if (source->allocator->cl != dest->allocator->cl) return 1; |
|
615 if (source->base_allocator->cl != dest->base_allocator->cl) return 1; |
614 |
616 |
615 // ensure enough capacity in the destination pool |
617 // ensure enough capacity in the destination pool |
616 if (cx_mempool_ensure_capacity(dest, dest->size + source->size)) { |
618 if (cx_mempool_ensure_capacity(dest, dest->size + source->size)) { |
617 return 1; // LCOV_EXCL_LINE |
619 return 1; // LCOV_EXCL_LINE |
618 } |
620 } |
620 dest->registered_size + source->registered_size)) { |
622 dest->registered_size + source->registered_size)) { |
621 return 1; // LCOV_EXCL_LINE |
623 return 1; // LCOV_EXCL_LINE |
622 } |
624 } |
623 |
625 |
624 // allocate a replacement allocator for the source pool |
626 // allocate a replacement allocator for the source pool |
625 CxAllocator *new_source_allocator = cxMallocDefault(sizeof(CxAllocator)); |
627 CxAllocator *new_source_allocator = |
|
628 cxMalloc(source->base_allocator, sizeof(CxAllocator)); |
626 if (new_source_allocator == NULL) { // LCOV_EXCL_START |
629 if (new_source_allocator == NULL) { // LCOV_EXCL_START |
627 return 1; |
630 return 1; |
628 } // LCOV_EXCL_STOP |
631 } // LCOV_EXCL_STOP |
629 new_source_allocator->cl = source->allocator->cl; |
632 new_source_allocator->cl = source->allocator->cl; |
630 new_source_allocator->data = source; |
633 new_source_allocator->data = source; |
638 sizeof(struct cx_mempool_foreign_memory_s) * source->size); |
641 sizeof(struct cx_mempool_foreign_memory_s) * source->size); |
639 dest->registered_size += source->registered_size; |
642 dest->registered_size += source->registered_size; |
640 |
643 |
641 // register the old allocator with the new pool |
644 // register the old allocator with the new pool |
642 // we have to remove const-ness for this, but that's okay here |
645 // we have to remove const-ness for this, but that's okay here |
|
646 // also register the base allocator, s.t. the pool knows how to free it |
643 CxAllocator *transferred_allocator = (CxAllocator*) source->allocator; |
647 CxAllocator *transferred_allocator = (CxAllocator*) source->allocator; |
644 transferred_allocator->data = dest; |
648 transferred_allocator->data = dest; |
645 cxMempoolRegister(dest, transferred_allocator, |
649 cxMempoolRegister2(dest, transferred_allocator, |
646 cx_mempool_free_transferred_allocator); |
650 cx_mempool_free_transferred_allocator, (void*)source->base_allocator); |
647 |
651 |
648 // prepare the source pool for re-use |
652 // prepare the source pool for re-use |
649 source->allocator = new_source_allocator; |
653 source->allocator = new_source_allocator; |
650 memset(source->data, 0, source->size * sizeof(void*)); |
654 memset(source->data, 0, source->size * sizeof(void*)); |
651 memset(source->registered, 0, |
655 memset(source->registered, 0, |
659 int cxMempoolTransferObject( |
663 int cxMempoolTransferObject( |
660 CxMempool *source, |
664 CxMempool *source, |
661 CxMempool *dest, |
665 CxMempool *dest, |
662 const void *obj |
666 const void *obj |
663 ) { |
667 ) { |
664 // safety check |
668 // safety checks |
665 if (source == dest) return 1; |
669 if (source == dest) return 1; |
|
670 if (source->allocator->cl != dest->allocator->cl) return 1; |
|
671 if (source->base_allocator->cl != dest->base_allocator->cl) return 1; |
666 |
672 |
667 // search for the object |
673 // search for the object |
668 for (size_t i = 0; i < source->size; i++) { |
674 for (size_t i = 0; i < source->size; i++) { |
669 struct cx_mempool_memory_s *mem = source->data[i]; |
675 struct cx_mempool_memory_s *mem = source->data[i]; |
670 if (mem->c == obj) { |
676 if (mem->c == obj) { |