深入淺出內存管理-memblock

memblock 介紹


memblock 內存管理機制主要用於Linux Kernel 啓動階段(kernel啓動 -> kernel 通用內存管理初始化完成.) 或者可以認爲free_initmem 爲止. 在啓動階段, 內存分配器並不需要很複雜, memblock 是基於靜態數組, 採用的逆向最先適配的分配策略.

memblock 數據結構


memblock

struct memblock {
    bool bottom_up;  /* is bottom up direction? */
    phys_addr_t current_limit;
    struct memblock_type memory;
    struct memblock_type reserved;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
    struct memblock_type physmem;
#endif
};

memblock 內存管理的核心數據結構

bottom_up 內存分配的方向

current_limit 內存分配最大限制值

memblock 的內存分爲3類, memory,reserved, 和 physmem

memory 可用的內存的集合

reserved 已分配出去內存的集合

memblock_type

struct memblock_type {
    unsigned long cnt;  /* number of regions */
    unsigned long max;  /* size of the allocated array */
    phys_addr_t total_size; /* size of all regions */
    struct memblock_region *regions;
    char *name;
};

memblock_type 用於描述在當前的memblock中此類型的memory region的數量

memblock_region

struct memblock_region {
    phys_addr_t base;
    phys_addr_t size;
    unsigned long flags;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
    int nid;
#endif
};

memblock_region 用於描述此內存region中的基地址和大小

flags 定義於 include/linux/memblock.h

/* Definition of memblock flags. */
enum {
    MEMBLOCK_NONE       = 0x0,  /* No special request */
    MEMBLOCK_HOTPLUG    = 0x1,  /* hotpluggable region */
    MEMBLOCK_MIRROR     = 0x2,  /* mirrored region */
    MEMBLOCK_NOMAP      = 0x4,  /* don't add to kernel direct mapping */
};

memblock API


在指定的node和範圍內尋找可用大小的內存

phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
                    phys_addr_t start, phys_addr_t end,
                    int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
                   phys_addr_t size, phys_addr_t align);

內存添加和刪除type爲memory的memblock region

int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);

memblock的內存分配和釋放

phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
int memblock_free(phys_addr_t base, phys_addr_t size);
void memblock_allow_resize(void);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
                  phys_addr_t base, phys_addr_t size);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
ulong choose_memblock_flags(void);

memblock 實現


memblock_init

#define INIT_MEMBLOCK_REGIONS   128
#define INIT_PHYSMEM_REGIONS    4

static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif

struct memblock memblock __initdata_memblock = {
    .memory.regions     = memblock_memory_init_regions,
    .memory.cnt     = 1,    /* empty dummy entry */
    .memory.max     = INIT_MEMBLOCK_REGIONS,
    .memory.name        = "memory",

    .reserved.regions   = memblock_reserved_init_regions,
    .reserved.cnt       = 1,    /* empty dummy entry */
    .reserved.max       = INIT_MEMBLOCK_REGIONS,
    .reserved.name      = "reserved",

#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
    .physmem.regions    = memblock_physmem_init_regions,
    .physmem.cnt        = 1,    /* empty dummy entry */
    .physmem.max        = INIT_PHYSMEM_REGIONS,
    .physmem.name       = "physmem",
#endif

    .bottom_up      = false,
    .current_limit      = MEMBLOCK_ALLOC_ANYWHERE,
};

系統會初始化 memory 和 reserved 的 128 個 memblock_region

bottom_up = false 默認的分配方式是從上到下

此時memblock_region 是沒有可用內存的, 初次添加可用內存是發生在setup_machine_fdt() 中, 它會解析device tree中的內存物理信息,然後調用函數memblock_add 添加到memblock_region中去.

current_limit 在 kernel的啓動過程的 sanity_check_meminfo() 中設定

當kernel啓動到此處, 已經可以使用memblock來進行內存分配,從而進行頁表的建立.

memblock_add

int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
    phys_addr_t end = base + size - 1;

    memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
             &base, &end, (void *)_RET_IP_);

    return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
}

int __init_memblock memblock_add_range(struct memblock_type *type,
                phys_addr_t base, phys_addr_t size,
                int nid, unsigned long flags)
{
    bool insert = false;
    phys_addr_t obase = base;
    phys_addr_t end = base + memblock_cap_size(base, &size);
    int idx, nr_new;
    struct memblock_region *rgn;

    if (!size)
        return 0;

    第一次添加可用內存到memory類型的memblock_region 
    /* special case for empty array */
    if (type->regions[0].size == 0) {
        WARN_ON(type->cnt != 1 || type->total_size);
        type->regions[0].base = base;
        type->regions[0].size = size;
        type->regions[0].flags = flags;
        memblock_set_region_node(&type->regions[0], nid);
        type->total_size = size;
        return 0;
    }
repeat:
    所有要添加的memblock region 都需要跑2次才能最終插入
    第一次是判斷是否需要插入,以及是否需要擴充存放memory region的數量大小
    第二次纔是真正的添加操作
    base = obase;
    nr_new = 0;

    for_each_memblock_type(type, rgn) {
        phys_addr_t rbase = rgn->base;
        phys_addr_t rend = rbase + rgn->size;

        if (rbase >= end)
            break;
        找到需要輸入的節點位置
        if (rend <= base)
            continue;

        將非重疊的部分插入類型爲memory的memblock_region
        if (rbase > base) {
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
            WARN_ON(nid != memblock_get_region_node(rgn));
#endif
            WARN_ON(flags != rgn->flags);
            nr_new++;
            if (insert)
                memblock_insert_region(type, idx++, base,
                               rbase - base, nid,
                               flags);
        }
        /* area below @rend is dealt with, forget about it */
        base = min(rend, end);
    }

    插入剩餘的部分 或者是 當前要添加的內存區域需要插入到memblock_region的尾部
    if (base < end) {
        nr_new++;
        if (insert)
            memblock_insert_region(type, idx, base, end - base,
                           nid, flags);
    }

    如果第一次執行檢查發現要添加的內存已經全部重疊,則直接退出
    if (!nr_new)
        return 0;

    第一次執行檢查是否需要調整內存區數組大小,第二次執行合併操作 
    if (!insert) {
        while (type->cnt + nr_new > type->max)
            if (memblock_double_array(type, obase, size) < 0)
                return -ENOMEM;
        insert = true;
        goto repeat;
    } else {
        檢查是否需要做合併的動作
        memblock_merge_regions(type);
        return 0;
    }
}

memblock_alloc

static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
                    phys_addr_t align, phys_addr_t start,
                    phys_addr_t end, int nid)
{
    phys_addr_t found;

    if (!align)
        align = SMP_CACHE_BYTES;

    在類型爲memory的memblock_region中尋找合適的分配位置
    found = memblock_find_in_range_node(size, align, start, end, nid);
    將找到的內存保存到類型爲reserved的memblock_region中,代表已經分配
    if (found && !memblock_reserve(found, size)) {
        /*
         * The min_count is set to 0 so that memblock allocations are
         * never reported as leaks.
         */
        檢查是否發生了memleak
        kmemleak_alloc(__va(found), size, 0, 0);
        return found;
    }
    return 0;
}

memblock_alloc 實際就是調用 memblock_alloc_range_nid 來實現分配

memblock_find_in_range_node

phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
                    phys_addr_t align, phys_addr_t start,
                    phys_addr_t end, int nid)
{
    phys_addr_t kernel_end, ret;

    /* pump up @end */
    if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
        end = memblock.current_limit;

    永遠都不會分配第一個頁面
    start = max_t(phys_addr_t, start, PAGE_SIZE);
    end = max(start, end);
    kernel_end = __pa_symbol(_end);

    判斷分配的方向
    if (memblock_bottom_up() && end > kernel_end) {
        phys_addr_t bottom_up_start;

        /* make sure we will allocate above the kernel */
        bottom_up_start = max(start, kernel_end);

        /* ok, try bottom-up allocation first */
        ret = __memblock_find_range_bottom_up(bottom_up_start, end,
                              size, align, nid);
        if (ret)
            return ret;
    }

    默認使用的是從上到下的方式尋找可用的內存
    return __memblock_find_range_top_down(start, end, size, align, nid);
}

memblock_reserve

static int __init_memblock memblock_reserve_region(phys_addr_t base,
                           phys_addr_t size,
                           int nid,
                           unsigned long flags)
{
    struct memblock_type *_rgn = &memblock.reserved;

    memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
             (unsigned long long)base,
             (unsigned long long)base + size - 1,
             flags, (void *)_RET_IP_);

    return memblock_add_range(_rgn, base, size, nid, flags);
}

memblock_reserve 和 memblock_add 一樣也是調用memblock_add_range 來添加,只不過對象從memory變成了memblock.reserved.

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章