Thank you very much for the clarification I’m kinda laughing at myself
I knew that function pointers play a big role in the objective c runtime system but its neat to see that they are also playing a
role in the game of dynamic memory management.
Your insight led me to exactly what I was looking for → “How the OS fulfills requests of large chunks of memory”
It boils down to static void *large_and_huge_malloc(szone_t *szone, size_t num_pages)[/b] which is invoked in the
static INLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
static void *
large_and_huge_malloc(szone_t *szone, size_t num_pages)
{
void *addr;
vm_range_t range_to_deallocate;
huge_entry_t huge_entry;
size_t size;
large_entry_t large_entry;
if (!num_pages)
num_pages = 1; // minimal allocation size for this szone
size = (size_t)num_pages << vm_page_shift;
range_to_deallocate.size = 0;
if (num_pages >= (1 << vm_page_shift)) {
addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MEMORY_MALLOC_HUGE);
if (addr == NULL)
return NULL;
huge_entry.size = size;
huge_entry.address = (vm_address_t)addr;
if (!huge_entry_append(szone, huge_entry))
return NULL; // we are leaking the allocation here
SZONE_LOCK(szone);
szone->num_bytes_in_huge_objects += size;
} else {
addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MEMORY_MALLOC_LARGE);
#if DEBUG_MALLOC
if (LOG(szone, addr))
malloc_printf("in szone_malloc true large allocation at %p for %ly\n", (void *)addr, size);
#endif
SZONE_LOCK(szone);
if (addr == NULL) {
SZONE_UNLOCK(szone);
return NULL;
}
#if DEBUG_MALLOC
if (large_entry_for_pointer_no_lock(szone, addr)) {
malloc_printf("freshly allocated is already in use: %p\n", addr);
large_debug_print(szone);
szone_sleep();
}
#endif
if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
// density of hash table too high; grow table
// we do that under lock to avoid a race
large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate);
if (entries == NULL) {
SZONE_UNLOCK(szone);
return NULL;
}
}
large_entry.address_and_num_pages = (uintptr_t)addr | num_pages;
#if DEBUG_MALLOC
if (large_entry_for_pointer_no_lock(szone, addr)) {
malloc_printf("entry about to be added already in use: %p\n", addr);
large_debug_print(szone);
szone_sleep();
}
#endif
large_entry_insert_no_lock(szone, large_entry);
#if DEBUG_MALLOC
if (!large_entry_for_pointer_no_lock(szone, (void *)addr)) {
malloc_printf("can't find entry just added\n");
large_debug_print(szone);
szone_sleep();
}
#endif
szone->num_large_objects_in_use ++;
szone->num_bytes_in_large_objects += size;
}
SZONE_UNLOCK(szone);
if (range_to_deallocate.size) {
deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); // we deallocate outside the lock
}
return (void *)addr;
}
static INLINE void *
szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
{
void *ptr;
msize_t msize;
if (size <= 31*TINY_QUANTUM) {
// think tiny
msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
if (!msize)
msize = 1;
ptr = tiny_malloc_should_clear(szone, msize, cleared_requested);
} else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) {
// think small
msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
if (! msize) msize = 1;
ptr = small_malloc_should_clear(szone, msize, cleared_requested);
} else {
// large or huge
size_t num_pages = round_page(size) >> vm_page_shift;
if (num_pages == 0) /* Overflowed */
ptr = 0;
else
ptr = large_and_huge_malloc(szone, num_pages);
}
#if DEBUG_MALLOC
if (LOG(szone, ptr))
malloc_printf("szone_malloc returned %p\n", ptr);
#endif
/*
* If requested, scribble on allocated memory.
*/
if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size)
memset(ptr, 0xaa, size);
return ptr;
}