什麼是內存描述符
什麼是進程?進程是正在執行的程序,是可執行程序的動態實例,它是一個承擔分配系統資源的實體,但操作系統創建進程時,會爲進程創建相應的內存空間,這個內存空間稱爲進程的地址空間,每一個進程的地址空間都是獨立的!
當一個進程有了進程的地址空間,那麼進程的地址空間就必須被相應的工具所管理這個工具被稱爲內存描述符mm_struct,它被定義在、/usr/src/kernels/include/linux/mm_types.h中,在Linux操作系統中是這樣管理進程的地址空間的,如下圖所示:
其中task_struct被稱爲進程描述符,當操作系統創建進程時也會一起創建它,task_struct中含有進程的相關信息,操作系統就是通過管理task_struct來達到管理進程的;
關於task_struct大家可以看看下面這篇文章:
http://blog.csdn.net/bit_clearoff/article/details/54292300
在這裏,task_struct有一個mm指針指向mm_struct!
源碼分析
下面我們就來解析一下mm_struct的源碼:
struct mm_struct {
//mmap指向虛擬區間鏈表
struct vm_area_struct * mmap; /* list of VMAs */
//指向紅黑樹
struct rb_root mm_rb;
//指向最近的虛擬空間
struct vm_area_struct * mmap_cache; /* last find_vma result */
//
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
unsigned long mmap_base; /* base of mmap area */
unsigned long task_size; /* size of task vm space */
unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
//指向進程的頁目錄
pgd_t * pgd;
//空間中有多少用戶
atomic_t mm_users; /* How many users with user space? */
//引用計數;描述有多少指針指向當前的mm_struct
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
//虛擬區間的個數
int map_count; /* number of VMAs */
struct rw_semaphore mmap_sem;
//保護任務頁表
spinlock_t page_table_lock; /* Protects page tables and some counters */
//所有mm的鏈表
struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
* together off init_mm.mmlist, and are protected
* by mmlist_lock
*/
/* Special counters, in some configurations protected by the
* page_table_lock, in other configurations by being atomic.
*/
mm_counter_t _file_rss;
mm_counter_t _anon_rss;
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
unsigned long total_vm, locked_vm, shared_vm, exec_vm;
unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
//start_code:代碼段的起始地址
//end_code:代碼段的結束地址
//start_data:數據段起始地址
//end_data:數據段結束地址
unsigned long start_code, end_code, start_data, end_data;
//start_brk:堆的起始地址
//brk:堆的結束地址
//start_stack:棧的起始地址
unsigned long start_brk, brk, start_stack;
//arg_start,arg_end:參數段的起始和結束地址
//env_start,env_end:環境段的起始和結束地址
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
struct linux_binfmt *binfmt;
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
mm_context_t context;
/* Swap token stuff */
/*
* Last value of global fault stamp as seen by this process.
* In other words, this value gives an indication of how long
* it has been since this task got the token.
* Look at mm/thrash.c
*/
unsigned int faultstamp;
unsigned int token_priority;
unsigned int last_interval;
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct hlist_head ioctx_list;
#endif
#ifdef CONFIG_MM_OWNER
/*
* "owner" points to a task that is regarded as the canonical
* user/owner of this mm. All of the following must be true in
* order for it to be changed:
*
* current == mm->owner
* current->mm != mm
* new_owner->mm == mm
* new_owner->alloc_lock is held
*/
struct task_struct *owner;
#endif
#ifdef CONFIG_PROC_FS
/* store ref to file /proc/<pid>/exe symlink points to */
struct file *exe_file;
unsigned long num_exe_file_vmas;
#endif
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
};
mm_struct對進程地址空間中的虛擬空間的組織方式有下面這兩種:
當虛擬區間較少時,採用單鏈表即mmap管理這些虛擬區間;
當虛擬區間較多時,採用紅黑樹管理這些虛擬區間。
並且mm_struct還把最近用到的虛擬區間放到高速緩存,由mm_struct中的mmap_cache所管理。
另外:我們還要了解一個進程中的進程地址空間由兩個結構體所管理,它們分別是mm_struct和vm_ares_struct.其中mm_struct描述了整個虛擬地址空間,vm_ares_struct描述了虛擬地址空間中的一個區間;每個進程只有一個mm_struct結構,但可以有多個vm_ares_struct結構.
下面樹vm_ares_struct結構的定義:
struct vm_area_struct {
struct mm_struct * vm_mm; /* The address space we belong to. */
unsigned long vm_start; /* Our start address within vm_mm. */
unsigned long vm_end; /* The first byte after our end address
within vm_mm. */
/* linked list of VM areas per task, sorted by address */
struct vm_area_struct *vm_next;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
unsigned long vm_flags; /* Flags, see mm.h. */
struct rb_node vm_rb;
/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap prio tree, or
* linkage to the list of like vmas hanging off its node, or
* linkage of vma in the address_space->i_mmap_nonlinear list.
*/
union {
struct {
struct list_head list;
void *parent; /* aligns with prio_tree_node parent */
struct vm_area_struct *head;
} vm_set;
struct raw_prio_tree_node prio_tree_node;
} shared;
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
/* Function pointers to deal with this struct. */
const struct vm_operations_struct *vm_ops;
/* Information about our backing store: */
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
units, *not* PAGE_CACHE_SIZE */
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
unsigned long vm_truncate_count;/* truncate_count or restart_addr */
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
};