linux内核虚拟内存管理算法 下载本文

内容发布更新时间 : 2024/5/17 20:04:57星期一 下面是文章的全部内容请认真阅读。

操作系统第十次实验

张焓

1.实验名称:

虚拟内存管理算法

2.实验目的:

1.分析算法设计原理; 2.写出算法伪代码;

3.从算法的执行效率等方面分析算法的性能。

3.实验方法

通过阅读linux内核代码中管理虚拟内存的代码段学习虚拟内存管理算法的原理。

4.实验步骤

(1)mm_struct结构体,定义了每个进程的虚拟存储用户区,首地址在任务结构体中,定义在/include/linux/schedul.h中

structmm_struct { */

spinlock_t page_table_lock;

/* Protects page tables and some counters */

unsignedlong task_size; pgd_t * pgd; atomic_t mm_users; atomic_t mm_count;

/* How many users with user space? */

/* How many references to \/* PTE page table pages */ /* PMD page table pages */ /* number of VMAs */

/* size of task vm space */

/* highest vma end address */

unsignedlong highest_vm_end; structvm_area_struct *mmap; structrb_root mm_rb;

u32 vmacache_seqnum; /* per-thread vmacache */ unsignedlong (*get_unmapped_area) (struct file *filp,

unsignedlong addr, unsignedlong len, unsignedlong pgoff, unsignedlong flags);

/* base of mmap area */

/* list of VMAs */

#ifdef CONFIG_MMU

#endif

unsignedlong mmap_base;

unsignedlong mmap_legacy_base; /* base of mmap area in bottom-up allocations

count as 1) */

atomic_long_t nr_ptes; atomic_long_t nr_pmds; int map_count;

#if CONFIG_PGTABLE_LEVELS > 2 #endif

structrw_semaphore mmap_sem; structlist_head mmlist;

/* List of maybe swapped mm's. These are globally

strung

* together off init_mm.mmlist, and are protected * by mmlist_lock */

unsignedlong hiwater_rss; /* High-watermark of RSS usage */ unsignedlong hiwater_vm; /* High-water virtual memory usage */ unsignedlong total_vm; unsignedlong locked_vm; unsignedlong pinned_vm; unsignedlong data_vm; unsignedlong exec_vm; unsignedlong stack_vm; unsignedlong def_flags;

unsignedlong start_code, end_code, start_data, end_data; unsignedlong start_brk, brk, start_stack;

unsignedlong arg_start, arg_end, env_start, env_end;

unsignedlong saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ /*

* Special counters, in some configurations protected by the * page_table_lock, in other configurations by being atomic. */

structmm_rss_stat rss_stat; structlinux_binfmt *binfmt; cpumask_var_t cpu_vm_mask_var;

/* Architecture-specific MM context */ mm_context_t context;

unsignedlong flags; /* Must use atomic bitops to access the bits */ structcore_state *core_state; /* coredumping support */ spinlock_t

ioctx_lock;

/* Total pages mapped */

/* Pages that have PG_mlocked set */ /* Refcount permanently increased */ /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ /* VM_STACK */

#ifdef CONFIG_AIO

struct kioctx_table __rcu *ioctx_table;

#endif

#ifdef CONFIG_MEMCG

/* store ref to file /proc//exe symlink points to */ structfile __rcu *exe_file;

struct mmu_notifier_mm *mmu_notifier_mm; /*

* \ * user/owner of this mm. All of the following must be true in * order for it to be changed: *

* current == mm->owner * current->mm != mm * new_owner->mm == mm

* new_owner->alloc_lock is held */

struct task_struct __rcu *owner;

#endif

#ifdef CONFIG_MMU_NOTIFIER #endif

#ifdefined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS

pgtable_t pmd_huge_pte; /* protected by page_table_lock */ #endif

#ifdef CONFIG_CPUMASK_OFFSTACK

struct cpumask cpumask_allocation; #endif

#ifdef CONFIG_NUMA_BALANCING

/* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq;

/* Restart point for scanning and setting pte_numa */ unsignedlong numa_scan_offset; /*

* numa_next_scan is the next time that the PTEs will be marked * pte_numa. NUMA hinting faults will gather statistics and migrate * pages to new nodes if necessary. */

unsignedlong numa_next_scan;

#endif

#ifdefined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)

/*

* An operation with batched TLB flushing is going on. Anything that * can move process memory needs to flush the TLB when moving a