之前对rt-thread中如何解决
分配内存时间的确定性问题
内存碎片问题
资源受限的设备的内存管理问题
比较好奇,所以查看了rt-thread关于内存管理算法的源码,在此和官方文档一起整理出来。
rt-thread对于内存管理主要有三种方式:小内存管理算法、slab管理算法和memheap管理算法,分别在src/mem.c、src/slab.c和src/memheap.c中。
小内存管理算法
小内存管理算法是一个简单的内存分配算法。初始时,它是一块大的内存。当需要分配内存块时,将从这个大的内存块上分割出相匹配的内存块,然后把分割出来的空闲内存块还回给堆管理系统中。并有一个指针指向空闲内存块的第一个。
struct rt_small_mem
{
struct rt_memory parent; / < inherit from rt_memory /
rt_uint8_t heap_ptr; / < pointer to the heap */
struct rt_small_mem_item *heap_end;
struct rt_small_mem_item *lfree;
rt_size_t mem_size_aligned; /< aligned memory size */
};
每个内存块都包含一个管理用的数据头,而通过这个数据头和数据大小计算来偏移得到下一个内存块,本质上是通过数组的方式。
每个内存块(不管是已分配的内存块还是空闲的内存块)都包含一个数据头,其中包括:
1)magic:变数(或称为幻数),它会被初始化成 0x1ea0(即英文单词 heap),用于标记这个内存块是一个内存管理用的内存数据块;变数不仅仅用于标识这个数据块是一个内存管理用的内存数据块,实质也是一个内存保护字:如果这个区域被改写,那么也就意味着这块内存块被非法改写(正常情况下只有内存管理器才会去碰这块内存)。
2)used:指示出当前内存块是否已经分配。
struct rt_small_mem_item
{
rt_ubase_t pool_ptr; / < small memory object addr /
#ifdef arch_cpu_64bit
rt_uint32_t resv;
#endif / arch_cpu_64bit /
rt_size_t next; / < next free item */
rt_size_t prev; / < prev free item /
#ifdef rt_using_memtrace
#ifdef arch_cpu_64bit
rt_uint8_t thread[8]; / < thread name */
#else
rt_uint8_t thread[4]; /parent) == rt_object_class_memory);
rt_assert(rt_object_is_systemobject(&m->parent));
if (size != rt_align(size, rt_align_size))
{
rt_debug_log(rt_debug_mem, (malloc size %d, but align to %dn,
size, rt_align(size, rt_align_size)));
}
else
{
rt_debug_log(rt_debug_mem, (malloc size %dn, size));
}
small_mem = (struct rt_small_mem )m;
/ alignment size /
size = rt_align(size, rt_align_size);
/ every data block must be at least min_size_aligned long */
if (size small_mem->mem_size_aligned)
{
rt_debug_log(rt_debug_mem, (no memoryn));
return rt_null;
}
for (ptr = (rt_uint8_t *)small_mem->lfree - small_mem->heap_ptr;
ptr mem_size_aligned - size;
ptr = ((struct rt_small_mem_item *)&small_mem->heap_ptr[ptr])->next)
{
mem = (struct rt_small_mem_item )&small_mem->heap_ptr[ptr];
if ((!mem_isused(mem)) && (mem->next - (ptr + sizeof_struct_mem)) >= size)
{
/ mem is not used and at least perfect fit is possible:
mem->next - (ptr + sizeof_struct_mem) gives us the 'user data size' of mem /
if (mem->next - (ptr + sizeof_struct_mem) >=
(size + sizeof_struct_mem + min_size_aligned))
{
/ (in addition to the above, we test if another struct rt_small_mem_item (sizeof_struct_mem) containing
at least min_size_aligned of data also fits in the 'user data space' of 'mem')
-> split large block, create empty remainder,
remainder must be large enough to contain min_size_aligned data: if
mem->next - (ptr + (2*sizeof_struct_mem)) == size,
struct rt_small_mem_item would fit in but no data between mem2 and mem2->next
@todo we could leave out min_size_aligned. we would create an empty
region that couldn't hold data, but when mem->next gets freed,
the 2 regions would be combined, resulting in more free memory
/
ptr2 = ptr + sizeof_struct_mem + size;
/ create mem2 struct */
mem2 = (struct rt_small_mem_item )&small_mem->heap_ptr[ptr2];
mem2->pool_ptr = mem_freed();
mem2->next = mem->next;
mem2->prev = ptr;
#ifdef rt_using_memtrace
rt_smem_setname(mem2, );
#endif / rt_using_memtrace /
/ and insert it between mem and mem->next */
mem->next = ptr2;
if (mem2->next != small_mem->mem_size_aligned + sizeof_struct_mem)
{
((struct rt_small_mem_item )&small_mem->heap_ptr[mem2->next])->prev = ptr2;
}
small_mem->parent.used += (size + sizeof_struct_mem);
if (small_mem->parent.max parent.used)
small_mem->parent.max = small_mem->parent.used;
}
else
{
/ (a mem2 struct does no fit into the user data space of mem and mem->next will always
be used at this point: if not we have 2 unused structs in a row, plug_holes should have
take care of this).
-> near fit or excact fit: do not split, no mem2 creation
also can't move mem->next directly behind mem, since mem->next
will always be used at this point!
*/
small_mem->parent.used += mem->next - ((rt_uint8_t )mem - small_mem->heap_ptr);
if (small_mem->parent.max parent.used)
small_mem->parent.max = small_mem->parent.used;
}
/ set small memory object /
mem->pool_ptr = mem_used();
#ifdef rt_using_memtrace
if (rt_thread_self())
rt_smem_setname(mem, rt_thread_self()->parent.name);
else
rt_smem_setname(mem, none);
#endif / rt_using_memtrace /
if (mem == small_mem->lfree)
{
/ find next free block after mem and update lowest free pointer */
while (mem_isused(small_mem->lfree) && small_mem->lfree != small_mem->heap_end)
small_mem->lfree = (struct rt_small_mem_item *)&small_mem->heap_ptr[small_mem->lfree->next];
rt_assert(((small_mem->lfree == small_mem->heap_end) || (!mem_isused(small_mem->lfree))));
}
rt_assert((rt_ubase_t)mem + sizeof_struct_mem + size heap_end);
rt_assert((rt_ubase_t)((rt_uint8_t *)mem + sizeof_struct_mem) % rt_align_size == 0);
rt_assert((((rt_ubase_t)mem) & (rt_align_size - 1)) == 0);
rt_debug_log(rt_debug_mem,
(allocate memory at 0x%x, size: %dn,
(rt_ubase_t)((rt_uint8_t *)mem + sizeof_struct_mem),
(rt_ubase_t)(mem->next - ((rt_uint8_t )mem - small_mem->heap_ptr))));
/ return the memory data except mem struct */
return (rt_uint8_t *)mem + sizeof_struct_mem;
}
}
return rt_null;
}
释放时则是相反的过程,但分配器会查看前后相邻的内存块是否空闲,如果空闲则合并成一个大的空闲内存块。
主要就是把要释放的内存块放入空闲内存块链表
调用plug_holes合并空闲内存块
只会判断前后内存块,保证了效率
void rt_smem_free(void *rmem)
{
struct rt_small_mem_item *mem;
struct rt_small_mem small_mem;
if (rmem == rt_null)
return;
rt_assert((((rt_ubase_t)rmem) & (rt_align_size - 1)) == 0);
/ get the corresponding struct rt_small_mem_item ... */
mem = (struct rt_small_mem_item *)((rt_uint8_t )rmem - sizeof_struct_mem);
/ ... which has to be in a used state ... */
small_mem = mem_pool(mem);
rt_assert(small_mem != rt_null);
rt_assert(mem_isused(mem));
rt_assert(rt_object_get_type(&small_mem->parent.parent) == rt_object_class_memory);
rt_assert(rt_object_is_systemobject(&small_mem->parent.parent));
rt_assert((rt_uint8_t *)rmem >= (rt_uint8_t *)small_mem->heap_ptr &&
(rt_uint8_t *)rmem heap_end);
rt_assert(mem_pool(&small_mem->heap_ptr[mem->next]) == small_mem);
rt_debug_log(rt_debug_mem,
(release memory 0x%x, size: %dn,
(rt_ubase_t)rmem,
(rt_ubase_t)(mem->next - ((rt_uint8_t )mem - small_mem->heap_ptr))));
/ ... and is now unused. /
mem->pool_ptr = mem_freed();
#ifdef rt_using_memtrace
rt_smem_setname(mem, );
#endif / rt_using_memtrace /
if (mem lfree)
{
/ the newly freed struct is now the lowest */
small_mem->lfree = mem;
}
small_mem->parent.used -= (mem->next - ((rt_uint8_t )mem - small_mem->heap_ptr));
/ finally, see if prev or next are free also */
plug_holes(small_mem, mem);
}
static void plug_holes(struct rt_small_mem *m, struct rt_small_mem_item *mem)
{
struct rt_small_mem_item *nmem;
struct rt_small_mem_item *pmem;
rt_assert((rt_uint8_t *)mem >= m->heap_ptr);
rt_assert((rt_uint8_t *)mem heap_end);
/ plug hole forward */
nmem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next];
if (mem != nmem && !mem_isused(nmem) &&
(rt_uint8_t *)nmem != (rt_uint8_t )m->heap_end)
{
/ if mem->next is unused and not end of m->heap_ptr,
combine mem and mem->next
*/
if (m->lfree == nmem)
{
m->lfree = mem;
}
nmem->pool_ptr = 0;
mem->next = nmem->next;
((struct rt_small_mem_item *)&m->heap_ptr[nmem->next])->prev = (rt_uint8_t )mem - m->heap_ptr;
}
/ plug hole backward */
pmem = (struct rt_small_mem_item )&m->heap_ptr[mem->prev];
if (pmem != mem && !mem_isused(pmem))
{
/ if mem->prev is unused, combine mem and mem->prev */
if (m->lfree == mem)
{
m->lfree = pmem;
}
mem->pool_ptr = 0;
pmem->next = mem->next;
((struct rt_small_mem_item *)&m->heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - m->heap_ptr;
}
}
slab 管理算法
rt-thread 的 slab 分配器实现是纯粹的缓冲型的内存池算法。slab 分配器会根据对象的大小分成多个区(zone),也可以看成每类对象有一个内存池,如下图所示:
一个 zone 的大小在 32k 到 128k 字节之间,分配器会在堆初始化时根据堆的大小自动调整。系统中的 zone 最多包括 72 种对象,一次最大能够分配 16k 的内存空间,如果超出了 16k 那么直接从页分配器中分配。每个 zone 上分配的内存块大小是固定的,能够分配相同大小内存块的 zone 会链接在一个链表中,而 72 种对象的 zone 链表则放在一个数组(zone_array[])中统一管理。
整个slab由rt_slab进行管理,zone_array存放着所有zone,zone_free存放着空闲的zone。每个zone当中的基本单位又是rt_slab_chunk,作为最低一级内存分配单位。
struct rt_slab
{
struct rt_memory parent; / *< inherit from rt_memory /
rt_ubase_t heap_start; / < memory start address */
rt_ubase_t heap_end; / ** 0 */
struct rt_slab_zone zone_free; / whole zones that have become free /
rt_uint32_t zone_free_cnt;
rt_uint32_t zone_size;
rt_uint32_t zone_limit;
rt_uint32_t zone_page_cnt;
struct rt_slab_page page_list;
};
struct rt_slab_zone
{
rt_uint32_t z_magic; / < magic number for sanity check */
rt_uint32_t z_nfree; / *< total free chunks / ualloc space in zone /
rt_uint32_t z_nmax; / < maximum free chunks */
struct rt_slab_zone *z_next; / **< zoneary[] link if z_nfree non-zero /
rt_uint8_t z_baseptr; / < pointer to start of chunk array */
rt_uint32_t z_uindex; / *< current initial allocation index /
rt_uint32_t z_chunksize; / < chunk size for validation */
rt_uint32_t z_zoneindex; / **< zone index /
struct rt_slab_chunk z_freechunk; / < free chunk list */
};
struct rt_slab_chunk
{
struct rt_slab_chunk *c_next;
};
其中在初始化时会初始化page_list,相当于slab的内存池,每次分配都会从这个内存池当中进行分配。其中关于page的初始化、分配和释放和小内存管理算法是类似的,本质上都是使用数组和计算偏移的方式。具体可见rt_slab_page_init、rt_slab_page_alloc和rt_slab_page_free。
struct rt_slab_page
{
struct rt_slab_page *next; / *< next valid page /
rt_size_t page; / = slab->zone_limit)会单独进行处理,不分配进zone
其次就是计算当前size应该放入哪个zone,由zoneindex进行计算
如果这个zone存在
如果zone加上此次操作即会满,则移除出zone_array
如果当前chunk还没达到最大值,则直接分配一个chunk
否则就从freechunk当中进行分配
最后更新内存信息直接返回
如果不存在这个zone
如果存在zone_free,则直接分配出一个
如果不存在zone_free,zone_free本质上是还未使用的zone
分配一个新的page并设置memusage
初始化这个zone,然后分配一个chunk
void *rt_slab_alloc(rt_slab_t m, rt_size_t size)
{
struct rt_slab_zone *z;
rt_int32_t zi;
struct rt_slab_chunk *chunk;
struct rt_slab_memusage *kup;
struct rt_slab *slab = (struct rt_slab )m;
/ zero size, return rt_null /
if (size == 0)
return rt_null;
/
handle large allocations directly. there should not be very many of
these so performance is not a big issue.
/
if (size >= slab->zone_limit)
{
size = rt_align(size, rt_mm_page_size);
chunk = rt_slab_page_alloc(m, size >> rt_mm_page_bits);
if (chunk == rt_null)
return rt_null;
/ set kup /
kup = btokup(chunk);
kup->type = page_type_large;
kup->size = size >> rt_mm_page_bits;
rt_debug_log(rt_debug_slab,
(alloc a large memory 0x%x, page cnt %d, kup %dn,
size,
size >> rt_mm_page_bits,
((rt_ubase_t)chunk - slab->heap_start) >> rt_mm_page_bits));
/ mem stat /
slab->parent.used += size;
if (slab->parent.used > slab->parent.max)
slab->parent.max = slab->parent.used;
return chunk;
}
/
attempt to allocate out of an existing zone. first try the free list,
then allocate out of unallocated space. if we find a good zone move
it to the head of the list so later allocations find it quickly
(we might have thousands of zones in the list).
note: zoneindex() will panic of size is too large.
/
zi = zoneindex(&size);
rt_assert(zi zone_array[zi]) != rt_null)
{
rt_assert(z->z_nfree > 0);
/ remove us from the zone_array[] when we become full /
if (--z->z_nfree == 0)
{
slab->zone_array[zi] = z->z_next;
z->z_next = rt_null;
}
/
no chunks are available but nfree said we had some memory, so
it must be available in the never-before-used-memory area
governed by uindex. the consequences are very serious if our zone
got corrupted so we use an explicit rt_kprintf rather then a kassert.
*/
if (z->z_uindex + 1 != z->z_nmax)
{
z->z_uindex = z->z_uindex + 1;
chunk = (struct rt_slab_chunk )(z->z_baseptr + z->z_uindex * size);
}
else
{
/ find on free chunk list /
chunk = z->z_freechunk;
/ remove this chunk from list /
z->z_freechunk = z->z_freechunk->c_next;
}
/ mem stats /
slab->parent.used += z->z_chunksize;
if (slab->parent.used > slab->parent.max)
slab->parent.max = slab->parent.used;
return chunk;
}
/
if all zones are exhausted we need to allocate a new zone for this
index.
at least one subsystem, the tty code (see cround) expects power-of-2
allocations to be power-of-2 aligned. we maintain compatibility by
adjusting the base offset below.
/
{
rt_uint32_t off;
if ((z = slab->zone_free) != rt_null)
{
/ remove zone from free zone list /
slab->zone_free = z->z_next;
-- slab->zone_free_cnt;
}
else
{
/ allocate a zone from page /
z = rt_slab_page_alloc(m, slab->zone_size / rt_mm_page_size);
if (z == rt_null)
{
return rt_null;
}
rt_debug_log(rt_debug_slab, (alloc a new zone: 0x%xn,
(rt_ubase_t)z));
/ set message usage /
for (off = 0, kup = btokup(z); off zone_page_cnt; off ++)
{
kup->type = page_type_small;
kup->size = off;
kup ++;
}
}
/ clear to zero /
rt_memset(z, 0, sizeof(struct rt_slab_zone));
/ offset of slab zone struct in zone /
off = sizeof(struct rt_slab_zone);
/
guarentee power-of-2 alignment for power-of-2-sized chunks.
otherwise just 8-byte align the data.
*/
if ((size | (size - 1)) + 1 == (size z_zoneindex = zi;
z->z_nmax = (slab->zone_size - off) / size;
z->z_nfree = z->z_nmax - 1;
z->z_baseptr = (rt_uint8_t *)z + off;
z->z_uindex = 0;
z->z_chunksize = size;
chunk = (struct rt_slab_chunk )(z->z_baseptr + z->z_uindex * size);
/ link to zone array /
z->z_next = slab->zone_array[zi];
slab->zone_array[zi] = z;
/ mem stats */
slab->parent.used += z->z_chunksize;
if (slab->parent.used > slab->parent.max)
slab->parent.max = slab->parent.used;
}
return chunk;
}
rtm_export(rt_slab_alloc);
(2)内存释放
分配器需要找到内存块所在的 zone 节点,然后把内存块链接到 zone 的空闲内存块链表中。如果此时 zone 的空闲链表指示出 zone 的所有内存块都已经释放,即 zone 是完全空闲的,那么当 zone 链表中全空闲 zone 达到一定数目后,系统就会把这个全空闲的 zone 释放到页面分配器中去。
首先找到当前要释放的内存块的memusage
如果是前面所述的特大内存,那就直接使用rt_slab_page_free释放。
根据memuage计算偏移找到zone和chunk,将chunk放入freechunk中,下次直接使用
如果当前的zone是完全空闲的
移除这个zone,并更新rt_slab的信息
并释放page
void rt_slab_free(rt_slab_t m, void *ptr)
{
struct rt_slab_zone *z;
struct rt_slab_chunk *chunk;
struct rt_slab_memusage *kup;
struct rt_slab *slab = (struct rt_slab )m;
/ free a rt_null pointer /
if (ptr == rt_null)
return ;
/ get memory usage /
#if rt_debug_slab
{
rt_ubase_t addr = ((rt_ubase_t)ptr & ~rt_mm_page_mask);
rt_debug_log(rt_debug_slab,
(free a memory 0x%x and align to 0x%x, kup index %dn,
(rt_ubase_t)ptr,
(rt_ubase_t)addr,
((rt_ubase_t)(addr) - slab->heap_start) >> rt_mm_page_bits));
}
#endif / rt_debug_slab /
kup = btokup((rt_ubase_t)ptr & ~rt_mm_page_mask);
/ release large allocation /
if (kup->type == page_type_large)
{
rt_ubase_t size;
/ clear page counter /
size = kup->size;
kup->size = 0;
/ mem stats /
slab->parent.used -= size * rt_mm_page_size;
rt_debug_log(rt_debug_slab,
(free large memory block 0x%x, page count %dn,
(rt_ubase_t)ptr, size));
/ free this page /
rt_slab_page_free(m, ptr, size);
return;
}
/ zone case. get out zone. */
z = (struct rt_slab_zone *)(((rt_ubase_t)ptr & ~rt_mm_page_mask) -
kup->size * rt_mm_page_size);
rt_assert(z->z_magic == zalloc_slab_magic);
chunk = (struct rt_slab_chunk )ptr;
chunk->c_next = z->z_freechunk;
z->z_freechunk = chunk;
/ mem stats /
slab->parent.used -= z->z_chunksize;
/
bump the number of free chunks. if it becomes non-zero the zonemust be added back onto the appropriate list.
/
if (z->z_nfree++ == 0)
{
z->z_next = slab->zone_array[z->z_zoneindex];
slab->zone_array[z->z_zoneindex] = z;
}
/if the zone becomes totally free, and there are other zones wecan allocate from, move this zone to the freezones list. sincethis code can be called from an ipi callback, do not try to messwith kernel_map here. hysteresis will be performed at malloc() time.
*/
if (z->z_nfree == z->z_nmax &&
(z->z_next || slab->zone_array[z->z_zoneindex] != z))
{
struct rt_slab_zone *pz;
rt_debug_log(rt_debug_slab, (free zone 0x%xn,
(rt_ubase_t)z, z->z_zoneindex));
/ remove zone from zone array list */
for (pz = &slab->zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
;
pz = z->z_next;
/ reset zone /
z->z_magic = rt_uint32_max;
/ insert to free zone list /
z->z_next = slab->zone_free;
slab->zone_free = z;
++ slab->zone_free_cnt;
/ release zone to page allocator /
if (slab->zone_free_cnt > zone_release_thresh)
{
register rt_uint32_t i;
z = slab->zone_free;
slab->zone_free = z->z_next;
-- slab->zone_free_cnt;
/ set message usage /
for (i = 0, kup = btokup(z); i zone_page_cnt; i ++)
{
kup->type = page_type_free;
kup->size = 0;
kup ++;
}
/ release pages */
rt_slab_page_free(m, z, slab->zone_size / rt_mm_page_size);
return;
}
}
}
memheap 管理算法
memheap管理算法适用于系统含有多个地址可不连续的内存堆。使用memheap内存管理可以简化系统存在多个内存堆时的使用:当系统中存在多个内存堆的时候,用户只需要在系统初始化时将多个所需的memheap初始化,并开启memheap功能就可以很方便地把多个memheap(地址可不连续)粘合起来用于系统的 heap 分配。首先,rt-thread的实现用rt_memheap来表示一个memheap,使用rt_memheap_item来表示堆中的一个节点或者说一个内存块。
除了包含基本信息外,rt_memheap当中主要包含着一个链接所有内存块的block_list和链接所有空闲内存块的free_list,而rt_memheap_item也主要围绕这两个结构,主要是指向前后两个块和前后两个空闲块。此外,rt_memheap还有用来处理并发的信号量。
struct rt_memheap_item
{
rt_uint32_t magic; / **< magic number for memheap /
struct rt_memheap pool_ptr; / < point of pool */
struct rt_memheap_item *next; / **< next memheap item /
struct rt_memheap_item prev; / < prev memheap item */
struct rt_memheap_item next_free; / < next free memheap item /
struct rt_memheap_item prev_free; / < prev free memheap item */
#ifdef rt_using_memtrace
rt_uint8_t owner_thread_name[4]; /< owner thread name /
#endif / rt_using_memtrace /
};
/
base structure of memory heap object
*/
struct rt_memheap
{
struct rt_object parent; / **< inherit from rt_object /
void start_addr; / < pool start address and size */
rt_size_t pool_size; / *< pool size /
rt_size_t available_size; / < available size */
rt_size_t max_used_size; / **< maximum allocated size /
struct rt_memheap_item block_list; / < used block list */
struct rt_memheap_item *free_list; / *< free block list /
struct rt_memheap_item free_header; / < free block list header */
struct rt_semaphore lock; / *< semaphore lock /
rt_bool_t locked; / parent) == rt_object_class_memheap);
/ align allocated size */
size = rt_align(size, rt_align_size);
if (size parent.name));
if (size available_size)
{
/ search on free list /
free_size = 0;
/ lock memheap /
if (heap->locked == rt_false)
{
result = rt_sem_take(&(heap->lock), rt_waiting_forever);
if (result != rt_eok)
{
rt_set_errno(result);
return rt_null;
}
}
/ get the first free memory block /
header_ptr = heap->free_list->next_free;
while (header_ptr != heap->free_list && free_size < size)
{
/ get current freed memory block size /
free_size = memitem_size(header_ptr);
if (free_size next_free;
}
}
/ determine if the memory is available. /
if (free_size >= size)
{
/ a block that satisfies the request has been found. /
/ determine if the block needs to be split. */
if (free_size >= (size + rt_memheap_size + rt_memheap_minialloc))
{
struct rt_memheap_item new_ptr;
/ split the block. */
new_ptr = (struct rt_memheap_item *)
(((rt_uint8_t )header_ptr) + size + rt_memheap_size);
rt_debug_log(rt_debug_memheap,
(split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]n,
header_ptr,
header_ptr->next,
header_ptr->prev,
new_ptr));
/ mark the new block as a memory block and freed. /
new_ptr->magic = (rt_memheap_magic | rt_memheap_freed);
/ put the pool pointer into the new block. /
new_ptr->pool_ptr = heap;
#ifdef rt_using_memtrace
rt_memset(new_ptr->owner_thread_name, ' ', sizeof(new_ptr->owner_thread_name));
#endif / rt_using_memtrace /
/ break down the block list /
new_ptr->prev = header_ptr;
new_ptr->next = header_ptr->next;
header_ptr->next->prev = new_ptr;
header_ptr->next = new_ptr;
/ remove header ptr from free list /
header_ptr->next_free->prev_free = header_ptr->prev_free;
header_ptr->prev_free->next_free = header_ptr->next_free;
header_ptr->next_free = rt_null;
header_ptr->prev_free = rt_null;
/ insert new_ptr to free list /
new_ptr->next_free = heap->free_list->next_free;
new_ptr->prev_free = heap->free_list;
heap->free_list->next_free->prev_free = new_ptr;
heap->free_list->next_free = new_ptr;
rt_debug_log(rt_debug_memheap, (new ptr: next_free 0x%08x, prev_free 0x%08xn,
new_ptr->next_free,
new_ptr->prev_free));
/ decrement the available byte count. /
heap->available_size = heap->available_size -
size -
rt_memheap_size;
if (heap->pool_size - heap->available_size > heap->max_used_size)
heap->max_used_size = heap->pool_size - heap->available_size;
}
else
{
/ decrement the entire free size from the available bytes count. /
heap->available_size = heap->available_size - free_size;
if (heap->pool_size - heap->available_size > heap->max_used_size)
heap->max_used_size = heap->pool_size - heap->available_size;
/ remove header_ptr from free list /
rt_debug_log(rt_debug_memheap,
(one block: block[0x%08x], next_free 0x%08x, prev_free 0x%08xn,
header_ptr,
header_ptr->next_free,
header_ptr->prev_free));
header_ptr->next_free->prev_free = header_ptr->prev_free;
header_ptr->prev_free->next_free = header_ptr->next_free;
header_ptr->next_free = rt_null;
header_ptr->prev_free = rt_null;
}
/ mark the allocated block as not available. /
header_ptr->magic = (rt_memheap_magic | rt_memheap_used);
#ifdef rt_using_memtrace
if (rt_thread_self())
rt_memcpy(header_ptr->owner_thread_name, rt_thread_self()->parent.name, sizeof(header_ptr->owner_thread_name));
else
rt_memcpy(header_ptr->owner_thread_name, none, sizeof(header_ptr->owner_thread_name));
#endif / rt_using_memtrace /
if (heap->locked == rt_false)
{
/ release lock /
rt_sem_release(&(heap->lock));
}
/ return a memory address to the caller. */
rt_debug_log(rt_debug_memheap,
(alloc mem: memory[0x%08x], heap[0x%08x], size: %dn,
(void *)((rt_uint8_t *)header_ptr + rt_memheap_size),
header_ptr,
size));
return (void *)((rt_uint8_t )header_ptr + rt_memheap_size);
}
if (heap->locked == rt_false)
{
/ release lock /
rt_sem_release(&(heap->lock));
}
}
rt_debug_log(rt_debug_memheap, (allocate memory: failedn));
/ return the completion status. */
return rt_null;
}
对于内存释放来说,主要操作如下:
首先会检查内存块的magic number,然后更新堆的信息
然后会判断前后两块内存块是否能够合并
如果能够合并则更新链表和内存块信息
如果没有和前一内存块进行合并,则需要重新放入free_list
遍历free_list,按照大小排序放入链表
void rt_memheap_free(void *ptr)
{
rt_err_t result;
struct rt_memheap *heap;
struct rt_memheap_item *header_ptr, new_ptr;
rt_bool_t insert_header;
/ null check /
if (ptr == rt_null) return;
/ set initial status as ok */
insert_header = rt_true;
new_ptr = rt_null;
header_ptr = (struct rt_memheap_item *)
((rt_uint8_t )ptr - rt_memheap_size);
rt_debug_log(rt_debug_memheap, (free memory: memory[0x%08x], block[0x%08x]n,
ptr, header_ptr));
/ check magic /
if (header_ptr->magic != (rt_memheap_magic | rt_memheap_used) ||
(header_ptr->next->magic & rt_memheap_mask) != rt_memheap_magic)
{
rt_debug_log(rt_debug_memheap, (bad magic:0x%08x @ memheapn,
header_ptr->magic));
rt_assert(header_ptr->magic == (rt_memheap_magic | rt_memheap_used));
/ check whether this block of memory has been over-written. /
rt_assert((header_ptr->next->magic & rt_memheap_mask) == rt_memheap_magic);
}
/ get pool ptr /
heap = header_ptr->pool_ptr;
rt_assert(heap);
rt_assert(rt_object_get_type(&heap->parent) == rt_object_class_memheap);
if (heap->locked == rt_false)
{
/ lock memheap /
result = rt_sem_take(&(heap->lock), rt_waiting_forever);
if (result != rt_eok)
{
rt_set_errno(result);
return ;
}
}
/ mark the memory as available. /
header_ptr->magic = (rt_memheap_magic | rt_memheap_freed);
/ adjust the available number of bytes. /
heap->available_size += memitem_size(header_ptr);
/ determine if the block can be merged with the previous neighbor. /
if (!rt_memheap_is_used(header_ptr->prev))
{
rt_debug_log(rt_debug_memheap, (merge: left node 0x%08xn,
header_ptr->prev));
/ adjust the available number of bytes. /
heap->available_size += rt_memheap_size;
/ yes, merge block with previous neighbor. /
(header_ptr->prev)->next = header_ptr->next;
(header_ptr->next)->prev = header_ptr->prev;
/ move header pointer to previous. /
header_ptr = header_ptr->prev;
/ don't insert header to free list /
insert_header = rt_false;
}
/ determine if the block can be merged with the next neighbor. /
if (!rt_memheap_is_used(header_ptr->next))
{
/ adjust the available number of bytes. /
heap->available_size += rt_memheap_size;
/ merge block with next neighbor. /
new_ptr = header_ptr->next;
rt_debug_log(rt_debug_memheap,
(merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08xn,
new_ptr, new_ptr->next_free, new_ptr->prev_free));
new_ptr->next->prev = header_ptr;
header_ptr->next = new_ptr->next;
/ remove new ptr from free list */
new_ptr->next_free->prev_free = new_ptr->prev_free;
new_ptr->prev_free->next_free = new_ptr->next_free;
}
if (insert_header)
{
struct rt_memheap_item n = heap->free_list->next_free;;
#if defined(rt_memheap_best_mode)
rt_size_t blk_size = memitem_size(header_ptr);
for (;n != heap->free_list; n = n->next_free)
{
rt_size_t m = memitem_size(n);
if (blk_size next_free = n;
header_ptr->prev_free = n->prev_free;
n->prev_free->next_free = header_ptr;
n->prev_free = header_ptr;
rt_debug_log(rt_debug_memheap,
(insert to free list: next_free 0x%08x, prev_free 0x%08xn,
header_ptr->next_free, header_ptr->prev_free));
}
#ifdef rt_using_memtrace
rt_memset(header_ptr->owner_thread_name, ' ', sizeof(header_ptr->owner_thread_name));
#endif / rt_using_memtrace /
if (heap->locked == rt_false)
{
/ release lock */
rt_sem_release(&(heap->lock));
}
}
上层抽象
因为rt-thread支持多种内存管理算法,所以必须有一个统一的上层抽象,其中主要由rtconfig.h以及中的宏来控制使用哪种算法
#define rt_page_max_order 11
#define rt_using_mempool
#define rt_using_small_mem
#define rt_using_memheap
#define rt_memheap_fast_mode
#define rt_using_small_mem_as_heap
#define rt_using_memtrace
#define rt_using_heap
最后在src/kservice.c当中将接口进行统一抽象,也就是统一为rt_malloc、rt_free和re_realloc等等
#define _mem_init(_name, _start, _size)
system_heap = rt_smem_init(_name, _start, _size)
#define _mem_malloc(_size)
rt_smem_alloc(system_heap, _size)
#define _mem_realloc(_ptr, _newsize)
rt_smem_realloc(system_heap, _ptr, _newsize)
#define _mem_free(_ptr)
rt_smem_free(_ptr)
#define _mem_info(_total, _used, _max)
_smem_info(_total, _used, _max)
#elif defined(rt_using_memheap_as_heap)
static struct rt_memheap system_heap;
void *_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
void _memheap_free(void *rmem);
void *_memheap_realloc(struct rt_memheap *heap, void *rmem, rt_size_t newsize);
#define _mem_init(_name, _start, _size)
rt_memheap_init(&system_heap, _name, _start, _size)
#define _mem_malloc(_size)
_memheap_alloc(&system_heap, _size)
#define _mem_realloc(_ptr, _newsize)
_memheap_realloc(&system_heap, _ptr, _newsize)
#define _mem_free(_ptr)
_memheap_free(_ptr)
#define _mem_info(_total, _used, _max)
rt_memheap_info(&system_heap, _total, _used, _max)
#elif defined(rt_using_slab_as_heap)
static rt_slab_t system_heap;
rt_inline void _slab_info(rt_size_t *total,
rt_size_t *used, rt_size_t *max_used)
{
if (total)
*total = system_heap->total;
if (used)
*used = system_heap->used;
if (max_used)
*max_used = system_heap->max;
}
#define _mem_init(_name, _start, _size)
system_heap = rt_slab_init(_name, _start, _size)
#define _mem_malloc(_size)
rt_slab_alloc(system_heap, _size)
#define _mem_realloc(_ptr, _newsize)
rt_slab_realloc(system_heap, _ptr, _newsize)
#define _mem_free(_ptr)
rt_slab_free(system_heap, _ptr)
#define _mem_info _slab_info
#else
#define _mem_init(...)
#define _mem_malloc(...) rt_null
#define _mem_realloc(...) rt_null
#define _mem_free(...)
#define _mem_info(...)
#endif
rt_weak void *rt_malloc(rt_size_t size)
{
rt_base_t level;
void ptr;
/ enter critical zone /
level = _heap_lock();
/ allocate memory block from system heap /
ptr = _mem_malloc(size);
/ exit critical zone /
_heap_unlock(level);
/ call 'rt_malloc' hook */
rt_object_hook_call(rt_malloc_hook, (ptr, size));
return ptr;
}
rtm_export(rt_malloc);
详解PLC和MCU单片机的区别
电咖首款高端车型ME7正式开启预订 定位为纯电动中型SUV
专注“工业牙齿”的这家企业如何冲刺行业老大宝座?
华为Mate 30系列两款入网,操作系统均为安卓 于9月19日发布
福耀玻璃以云计算为突破口 实现智能制造创造价值
RT-Thread内存管理算法源码阅读
荣耀9什么时候上市最新消息:华为荣耀9旗舰配置消息汇总,不会取消耳机孔!
空气尘埃粒子计数器在无尘生产车间的应用
SII推出最低功耗S-5844系列温度开关IC
PanopticNeRF-360:快速生成大量新视点全景分割图像!
薄膜倒装UVB发光二极管设计方案
小米移动电源3 45W/20000mAh高配版拆解:做工用料上佳
红外热成像测温系统可以从哪方面评估电梯安全?它具有哪些优势?
type-c接口的重要性
Razer推出生产力系列三款新产品 空客与 Eramet 签署协议
触发器输出波形又是如何的呢?
基于DWC_ether_qos的以太网驱动开发-LWIP的ARP模块介绍
基于RFID技术的智能医疗是未来趋势所在
温度测量仪原理_温度测量仪特点
载波聚合(CA)的概念和设计难点解析