NodePools

Hi Mark,

I was running nodepool.m and decided to conduct some performance testing as you have recommended. .

On page 169 you provided some data. The time command returns “real, user, and sys” measurements. Which one have you documented (“real” vs “user”)?

Thanks

Total time - if you use the csh version of ‘time’, it’s the third entry.

Cheers,
++md

Thanks for the reply.

Do you have any examples of dynamic memory allocation that uses sbrk() or brk()? I want to see how this is done manually.

TB

The *brk family of calls has been relegated to the bin of (as the man page calls it) “historical curiosities”. Interestingly enough, looks like brk() always fails : opensource.apple.com/source/ … ated/brk.c

Thanks for the link and the insight :wink:

I decided to look at the internals of malloc … That made me go here -> opensource.apple.com/source/ … n/malloc.c

I found two functions of interest:
/****************************************
void *malloc_zone_malloc(malloc_zone_t *zone, size_t size) {
void *ptr;
if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
internal_check();
}
if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
return NULL;
}
[color=#4000FF] ptr = zone->malloc(zone, size);[/color]
if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
return ptr;
}
****************************/

/********************************
void * malloc(size_t size) {
void *retval;
[color=#4040FF] retval = malloc_zone_malloc(inline_malloc_default_zone(), size);[/color]
if (retval == NULL) {
errno = ENOMEM;
}
return retval;
}
********************************/
So when a call is mad to void * malloc() the function malloc_zone_malloc() is invoked… But malloc_zone_malloc() calls back to malloc() …
This seems somewhat circular… or maybe a misinterpretation on my part… How is this actually resolved?

You’re in a twisty maze of function pointers, named the same :slight_smile:

malloc_zone_malloc vectors through the “zone” which is actually a bunch of function pointers (opensource.apple.com/source/ … c/malloc.h) :

typedef struct _malloc_zone_t { void *reserved1; void *reserved2; size_t (*size)(struct _malloc_zone_t *zone, const void *ptr); void *(*malloc)(struct _malloc_zone_t *zone, size_t size); ...

The malloc function pointer is set up in “scalable malloc” (opensource.apple.com/source/ … e_malloc.c) :

... szone->num_small_regions_allocated = INITIAL_NUM_REGIONS; szone->basic_zone.version = 3; szone->basic_zone.size = (void *)szone_size; szone->basic_zone.malloc = (void *)szone_malloc; ...

And that file has the implementation of szone_malloc (which actually just turns around and calls szone_malloc_should_clear, etc).

If you’ve got an interest in the guts of stuff like this, Amit Singh’s epic tome (Mac OS X Internals) has a section on malloc’s guts.

Thank you very much for the clarification :slight_smile: :slight_smile: I’m kinda laughing at myself :laughing: :laughing:
I knew that function pointers play a big role in the objective c runtime system but its neat to see that they are also playing a
role in the game of dynamic memory management.

Your insight led me to exactly what I was looking for → “How the OS fulfills requests of large chunks of memory”
It boils down to static void *large_and_huge_malloc(szone_t *szone, size_t num_pages)[/b] which is invoked in the
static INLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)

static void *
large_and_huge_malloc(szone_t *szone, size_t num_pages)
{
    void		*addr;
    vm_range_t		range_to_deallocate;
    huge_entry_t	huge_entry;
    size_t		size;
    large_entry_t	large_entry;
    
    if (!num_pages)
	    num_pages = 1; // minimal allocation size for this szone
    size = (size_t)num_pages << vm_page_shift;
    range_to_deallocate.size = 0;
    if (num_pages >= (1 << vm_page_shift)) {
	addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MEMORY_MALLOC_HUGE);
	if (addr == NULL)
	    return NULL;
	huge_entry.size = size;
	huge_entry.address = (vm_address_t)addr;
	if (!huge_entry_append(szone, huge_entry))
	    return NULL;	// we are leaking the allocation here
	SZONE_LOCK(szone);
	szone->num_bytes_in_huge_objects += size;
    } else {

	addr = allocate_pages(szone, size, 0, szone->debug_flags, VM_MEMORY_MALLOC_LARGE);
#if DEBUG_MALLOC
	if (LOG(szone, addr))
	    malloc_printf("in szone_malloc true large allocation at %p for %ly\n", (void *)addr, size);
#endif
	SZONE_LOCK(szone);
	if (addr == NULL) {
	    SZONE_UNLOCK(szone);
	    return NULL;
	}
#if DEBUG_MALLOC
	if (large_entry_for_pointer_no_lock(szone, addr)) {
	    malloc_printf("freshly allocated is already in use: %p\n", addr);
	    large_debug_print(szone);
	    szone_sleep();
	}
#endif
	if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
	    // density of hash table too high; grow table
	    // we do that under lock to avoid a race
	    large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate);
	    if (entries == NULL) {
	    	SZONE_UNLOCK(szone);
	    	return NULL;
	    }
	}
	large_entry.address_and_num_pages = (uintptr_t)addr | num_pages;
#if DEBUG_MALLOC
	if (large_entry_for_pointer_no_lock(szone, addr)) {
	    malloc_printf("entry about to be added already in use: %p\n", addr);
	    large_debug_print(szone);
	    szone_sleep();
	}
#endif
	large_entry_insert_no_lock(szone, large_entry);
#if DEBUG_MALLOC
	if (!large_entry_for_pointer_no_lock(szone, (void *)addr)) {
	    malloc_printf("can't find entry just added\n");
	    large_debug_print(szone);
	    szone_sleep();
	}
#endif
	szone->num_large_objects_in_use ++;
	szone->num_bytes_in_large_objects += size;
    }
    SZONE_UNLOCK(szone);
    if (range_to_deallocate.size) {
	deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0); // we deallocate outside the lock
    }
    return (void *)addr;
}
static INLINE void *
szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
{
    void	*ptr;
    msize_t	msize;

    if (size <= 31*TINY_QUANTUM) {
	// think tiny
	msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
	if (!msize)
	    msize = 1;
	ptr = tiny_malloc_should_clear(szone, msize, cleared_requested);
    } else if (!((szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) && PROTECT_SMALL) && (size < LARGE_THRESHOLD)) {
	// think small
	msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
	if (! msize) msize = 1;
	ptr = small_malloc_should_clear(szone, msize, cleared_requested);
    } else {
	// large or huge
	size_t num_pages = round_page(size) >> vm_page_shift;
	if (num_pages == 0)	/* Overflowed */
		ptr = 0;
	else
	ptr = large_and_huge_malloc(szone, num_pages);
    }
#if DEBUG_MALLOC
    if (LOG(szone, ptr))
	malloc_printf("szone_malloc returned %p\n", ptr);
#endif
    /*
     * If requested, scribble on allocated memory.
     */
    if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size)
	memset(ptr, 0xaa, size);

    return ptr;
}