add lab12 code

This commit is contained in:
KAAAsS 2021-05-13 20:37:51 +08:00
parent 9e13404b75
commit 4282c20e93
Signed by: KAAAsS
GPG Key ID: D56625F3E671882F
10 changed files with 358 additions and 110 deletions

2
lab3/.gitignore vendored
View File

@ -14,3 +14,5 @@ build
*.out *.out
*.pyc *.pyc
chcore.out chcore.out
cmake-build-debug

8
lab3/.idea/.gitignore generated vendored Normal file
View File

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/

1
lab3/.idea/.name generated Normal file
View File

@ -0,0 +1 @@
chos

4
lab3/.idea/misc.xml generated Normal file
View File

@ -0,0 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CMakeWorkspace" PROJECT_DIR="$PROJECT_DIR$" />
</project>

6
lab3/.idea/vcs.xml generated Normal file
View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
</component>
</project>

View File

@ -119,7 +119,25 @@ static int printk_write_num(char **out, long long i, int base, int sign,
// store the digitals in the buffer `print_buf`: // store the digitals in the buffer `print_buf`:
// 1. the last postion of this buffer must be '\0' // 1. the last postion of this buffer must be '\0'
// 2. the format is only decided by `base` and `letbase` here // 2. the format is only decided by `base` and `letbase` here
int len = 0;
s = print_buf + 1;
while (u > 0) {
t = u % base;
u /= base;
if (t <= 9)
s[len++] = t + '0';
else
s[len++] = t - 10 + (letbase ? 'a': 'A');
}
s[len] = '\0';
// swap print_buf
char ch;
for (int i = 0; i < len / 2; i++) {
ch = s[i];
s[i] = s[len - 1 - i];
s[len - 1 - i] = ch;
}
if (neg) { if (neg) {
if (width && (flags & PAD_ZERO)) { if (width && (flags & PAD_ZERO)) {
simple_outputchar(out, '-'); simple_outputchar(out, '-');

View File

@ -11,68 +11,69 @@
* The usable memory: [pool_start_addr, pool_start_addr + pool_mem_size). * The usable memory: [pool_start_addr, pool_start_addr + pool_mem_size).
*/ */
void init_buddy(struct phys_mem_pool *pool, struct page *start_page, void init_buddy(struct phys_mem_pool *pool, struct page *start_page,
vaddr_t start_addr, u64 page_num) vaddr_t start_addr, u64 page_num) {
{ int order;
int order; int page_idx;
int page_idx; struct page *page;
struct page *page;
/* Init the physical memory pool. */ /* Init the physical memory pool. */
pool->pool_start_addr = start_addr; pool->pool_start_addr = start_addr;
pool->page_metadata = start_page; pool->page_metadata = start_page;
pool->pool_mem_size = page_num * BUDDY_PAGE_SIZE; pool->pool_mem_size = page_num * BUDDY_PAGE_SIZE;
/* This field is for unit test only. */ /* This field is for unit test only. */
pool->pool_phys_page_num = page_num; pool->pool_phys_page_num = page_num;
/* Init the free lists */ /* Init the free lists */
for (order = 0; order < BUDDY_MAX_ORDER; ++order) { for (order = 0; order < BUDDY_MAX_ORDER; ++order) {
pool->free_lists[order].nr_free = 0; pool->free_lists[order].nr_free = 0;
init_list_head(&(pool->free_lists[order].free_list)); init_list_head(&(pool->free_lists[order].free_list)); // free lists 初始化为空链表
} }
/* Clear the page_metadata area. */ /* Clear the page_metadata area. */
memset((char *)start_page, 0, page_num * sizeof(struct page)); memset((char *) start_page, 0, page_num * sizeof(struct page));
/* Init the page_metadata area. */ /* Init the page_metadata area. */
for (page_idx = 0; page_idx < page_num; ++page_idx) { for (page_idx = 0; page_idx < page_num; ++page_idx) {
page = start_page + page_idx; page = start_page + page_idx;
page->allocated = 1; page->allocated = 1; // 所有页面初始化为已分配
page->order = 0; page->order = 0;
} }
/* Put each physical memory page into the free lists. */ /* Put each physical memory page into the free lists. */
for (page_idx = 0; page_idx < page_num; ++page_idx) { for (page_idx = 0; page_idx < page_num; ++page_idx) {
page = start_page + page_idx; page = start_page + page_idx;
buddy_free_pages(pool, page); buddy_free_pages(pool, page); // 逐一去配页面以初始化 free lists
} }
} }
/*
*
*/
static struct page *get_buddy_chunk(struct phys_mem_pool *pool, static struct page *get_buddy_chunk(struct phys_mem_pool *pool,
struct page *chunk) struct page *chunk) {
{ u64 chunk_addr;
u64 chunk_addr; u64 buddy_chunk_addr;
u64 buddy_chunk_addr; int order;
int order;
/* Get the address of the chunk. */ /* Get the address of the chunk. */
chunk_addr = (u64) page_to_virt(pool, chunk); chunk_addr = (u64) page_to_virt(pool, chunk);
order = chunk->order; order = chunk->order;
/* /*
* Calculate the address of the buddy chunk according to the address * Calculate the address of the buddy chunk according to the address
* relationship between buddies. * relationship between buddies.
*/ */
#define BUDDY_PAGE_SIZE_ORDER (12) #define BUDDY_PAGE_SIZE_ORDER (12)
buddy_chunk_addr = chunk_addr ^ buddy_chunk_addr = chunk_addr ^
(1UL << (order + BUDDY_PAGE_SIZE_ORDER)); (1UL << (order + BUDDY_PAGE_SIZE_ORDER));
/* Check whether the buddy_chunk_addr belongs to pool. */ /* Check whether the buddy_chunk_addr belongs to pool. */
if ((buddy_chunk_addr < pool->pool_start_addr) || if ((buddy_chunk_addr < pool->pool_start_addr) ||
(buddy_chunk_addr >= (pool->pool_start_addr + (buddy_chunk_addr >= (pool->pool_start_addr +
pool->pool_mem_size))) { pool->pool_mem_size))) {
return NULL; return NULL;
} }
return virt_to_page(pool, (void *)buddy_chunk_addr); return virt_to_page(pool, (void *) buddy_chunk_addr);
} }
/* /*
@ -81,104 +82,181 @@ static struct page *get_buddy_chunk(struct phys_mem_pool *pool,
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* order @ order for origin page block * order @ order for origin page block
* page @ splitted page * page @ splitted page
* *
* Hints: don't forget to substract the free page number for the corresponding free_list. * Hints: don't forget to substract the free page number for the corresponding free_list.
* you can invoke split_page recursively until the given page can not be splitted into two * you can invoke split_page recursively until the given page can not be splitted into two
* smaller sub-pages. * smaller sub-pages.
*/ */
static struct page *split_page(struct phys_mem_pool *pool, u64 order, static struct page *split_page(struct phys_mem_pool *pool, u64 order,
struct page *page) struct page *page) {
{ // <lab2>
// <lab2> struct page *buddy = NULL;
struct page *split_page = NULL;
return split_page; // 我理解的这个函数:把 page 递归切成 order返回结果。过程修改的都是未分配区域。
// </lab2>
// 递归出口
if (page->order == order) {
return page;
} else if (page->order < order) {
return NULL;
}
// 分裂
// 删除当前节点
list_del(&page->node);
pool->free_lists[page->order].nr_free--;
// 降格
page->order--;
// 找伙伴
buddy = get_buddy_chunk(pool, page);
buddy->order = page->order;
// 加入 free list
list_add(&page->node, &pool->free_lists[page->order].free_list);
list_add(&buddy->node, &pool->free_lists[page->order].free_list);
pool->free_lists[page->order].nr_free += 2;
// 递归调用
return split_page(pool, order, page);
// </lab2>
} }
/* /*
* buddy_get_pages: get free page from buddy system. * buddy_get_pages: get free page from buddy system.
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* order @ get the (1<<order) continous pages from the buddy system * order @ get the (1<<order) continous pages from the buddy system
* *
* Hints: Find the corresonding free_list which can allocate 1<<order * Hints: Find the corresponding free_list which can allocate 1<<order
* continuous pages and don't forget to split the list node after allocation * continuous pages and don't forget to split the list node after allocation
*/ */
struct page *buddy_get_pages(struct phys_mem_pool *pool, u64 order) struct page *buddy_get_pages(struct phys_mem_pool *pool, u64 order) {
{ // <lab2>
// <lab2> struct page *page = NULL, *free_page = NULL;
struct page *page = NULL;
return page; // 找可分配 order
// </lab2> u64 free_order = order;
while (free_order < BUDDY_MAX_ORDER && pool->free_lists[free_order].nr_free == 0) {
free_order++;
}
// 当前无可分配
if (free_order == BUDDY_MAX_ORDER) {
return NULL;
}
// 有可分配,取一块切分
free_page = (struct page *) pool->free_lists[free_order].free_list.next;
page = split_page(pool, order, free_page);
// 删除出 free list分配
list_del(&page->node);
pool->free_lists[page->order].nr_free--;
page->allocated = true;
return page;
// </lab2>
} }
/* /*
* merge_page: merge the given page with the buddy page * merge_page: merge the given page with the buddy page
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* page @ merged page (attempted) * page @ merged page (attempted)
* *
* Hints: you can invoke the merge_page recursively until * Hints: you can invoke the merge_page recursively until
* there is not corresponding buddy page. get_buddy_chunk * there is not corresponding buddy page. get_buddy_chunk
* is helpful in this function. * is helpful in this function.
*/ */
static struct page *merge_page(struct phys_mem_pool *pool, struct page *page) static struct page *merge_page(struct phys_mem_pool *pool, struct page *page) {
{ // <lab2>
// <lab2>
struct page *merge_page = NULL; struct page *try_merge = NULL, *buddy = get_buddy_chunk(pool, page);
return merge_page;
// </lab2> // 不存在伙伴、伙伴已分配、order 不同则无法合并
if (buddy == NULL || buddy->allocated || page->order != buddy->order) {
return NULL;
}
// 如果已经最大 order则不合并
if (page->order >= BUDDY_MAX_ORDER - 1) {
return NULL;
}
// 双方都未分配,可以合并一次
// 从 free 中删除两个节点
list_del(&page->node);
list_del(&buddy->node);
pool->free_lists[page->order].nr_free -= 2;
// 选择左侧 page
struct page *t;
if (buddy < page) {
t = page;
page = buddy;
}
// 增加 page 的 order
page->order++;
// 链入上一层空列表
list_add(&page->node, &pool->free_lists[page->order].free_list);
pool->free_lists[page->order].nr_free++;
// 尝试递归合并
try_merge = merge_page(pool, page);
if (try_merge != NULL) {
return try_merge;
}
return page;
// </lab2>
} }
/* /*
* buddy_free_pages: give back the pages to buddy system * buddy_free_pages: give back the pages to buddy system
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* page @ free page structure * page @ free page structure
* *
* Hints: you can invoke merge_page. * Hints: you can invoke merge_page.
*/ */
void buddy_free_pages(struct phys_mem_pool *pool, struct page *page) void buddy_free_pages(struct phys_mem_pool *pool, struct page *page) {
{ // <lab2>
// <lab2>
// </lab2> if (!page->allocated)
return;
// 设置标志
page->allocated = false;
// 修改 free list
list_add(&page->node, &pool->free_lists[page->order].free_list);
pool->free_lists[page->order].nr_free++;
// 尝试合并当前页面
merge_page(pool, page);
// </lab2>
} }
void *page_to_virt(struct phys_mem_pool *pool, struct page *page) void *page_to_virt(struct phys_mem_pool *pool, struct page *page) {
{ u64 addr;
u64 addr;
/* page_idx * BUDDY_PAGE_SIZE + start_addr */ /* page_idx * BUDDY_PAGE_SIZE + start_addr */
addr = (page - pool->page_metadata) * BUDDY_PAGE_SIZE + addr = (page - pool->page_metadata) * BUDDY_PAGE_SIZE +
pool->pool_start_addr; pool->pool_start_addr;
return (void *)addr; return (void *) addr;
} }
struct page *virt_to_page(struct phys_mem_pool *pool, void *addr) struct page *virt_to_page(struct phys_mem_pool *pool, void *addr) {
{ struct page *page;
struct page *page;
page = pool->page_metadata + page = pool->page_metadata +
(((u64) addr - pool->pool_start_addr) / BUDDY_PAGE_SIZE); (((u64) addr - pool->pool_start_addr) / BUDDY_PAGE_SIZE);
return page; return page;
} }
u64 get_free_mem_size_from_buddy(struct phys_mem_pool * pool) u64 get_free_mem_size_from_buddy(struct phys_mem_pool *pool) {
{ int order;
int order; struct free_list *list;
struct free_list *list; u64 current_order_size;
u64 current_order_size; u64 total_size = 0;
u64 total_size = 0;
for (order = 0; order < BUDDY_MAX_ORDER; order++) { for (order = 0; order < BUDDY_MAX_ORDER; order++) {
/* 2^order * 4K */ /* 2^order * 4K */
current_order_size = BUDDY_PAGE_SIZE * (1 << order); current_order_size = BUDDY_PAGE_SIZE * (1 << order);
list = pool->free_lists + order; list = pool->free_lists + order;
total_size += list->nr_free * current_order_size; total_size += list->nr_free * current_order_size;
/* debug : print info about current order */ /* debug : print info about current order */
kdebug("buddy memory chunk order: %d, size: 0x%lx, num: %d\n", kdebug("buddy memory chunk order: %d, size: 0x%lx, num: %d\n",
order, current_order_size, list->nr_free); order, current_order_size, list->nr_free);
} }
return total_size; return total_size;
} }

View File

@ -16,6 +16,7 @@
#include "buddy.h" #include "buddy.h"
#include "slab.h" #include "slab.h"
#include "page_table.h"
extern unsigned long *img_end; extern unsigned long *img_end;
@ -51,6 +52,35 @@ unsigned long get_ttbr1(void)
void map_kernel_space(vaddr_t va, paddr_t pa, size_t len) void map_kernel_space(vaddr_t va, paddr_t pa, size_t len)
{ {
// <lab2> // <lab2>
#define IS_VALID (1UL << 0)
#define UXN (0x1UL << 54)
#define ACCESSED (0x1UL << 10)
#define INNER_SHARABLE (0x3UL << 8)
#define SIZE_2M (2UL*1024*1024)
#define PAGE_SHIFT (12)
#define GET_PADDR_IN_PTE(entry) \
(((u64)entry.table.next_table_addr) << PAGE_SHIFT)
paddr_t addr_l0, addr_l1, addr_l2;
pte_t pte_l0, pte_l1;
u64 *table_l2;
// 获得页表地址
// 不妨假设 len 小于 1G 且属于同一块
addr_l0 = get_ttbr1();
pte_l0.pte = *((u64 *) phys_to_virt(addr_l0) + GET_L0_INDEX(va));
addr_l1 = GET_PADDR_IN_PTE(pte_l0);
pte_l1.pte = *((u64 *) phys_to_virt(addr_l1) + GET_L1_INDEX(va));
addr_l2 = GET_PADDR_IN_PTE(pte_l1);
table_l2 = (u64 *) phys_to_virt(addr_l2);
// 设置页表
u32 start_entry_idx = GET_L2_INDEX(va);
u32 end_entry_idx = GET_L2_INDEX((va + len));
for (u32 idx = start_entry_idx; idx < end_entry_idx; ++idx) {
table_l2[idx] = (pa + idx * SIZE_2M) | UXN | ACCESSED | INNER_SHARABLE | NORMAL_MEMORY | IS_VALID;
}
// </lab2> // </lab2>
} }

View File

@ -162,7 +162,24 @@ static int get_next_ptp(ptp_t * cur_ptp, u32 level, vaddr_t va,
int query_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t * pa, pte_t ** entry) int query_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t * pa, pte_t ** entry)
{ {
// <lab2> // <lab2>
ptp_t * cur_ptp = (ptp_t *) pgtbl, *next_ptp;
int page_type;
// 遍历 L0-L3
for (u32 cur_level = 0; cur_level < 4; cur_level++) {
page_type = get_next_ptp(cur_ptp, cur_level, va, &next_ptp, entry, false);
if (page_type < 0) {
// 无法映射
return page_type;
} else if (page_type == BLOCK_PTP) {
// 遇到块项直接返回
break;
} else {
// 遇到页表项,向下遍历
cur_ptp = next_ptp;
}
}
*pa = GET_PADDR_IN_PTE((*entry)) | (va & PAGE_MASK);
// </lab2> // </lab2>
return 0; return 0;
} }
@ -186,7 +203,40 @@ int map_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t pa,
size_t len, vmr_prop_t flags) size_t len, vmr_prop_t flags)
{ {
// <lab2> // <lab2>
vaddr_t va_start = va, va_end = va + len, va_cur;
ptp_t * cur_ptp, *next_ptp;
int page_type;
u32 cur_level;
pte_t *entry;
u64 cur_pfn;
// 遍历所有页
cur_pfn = pa >> PAGE_SHIFT;
va_start &= ~PAGE_MASK;
va_end &= ~PAGE_MASK;
if (va_end < va + len) {
va_end++; // 有不完整的一页
}
for (va_cur = va_start; va_cur < va_end; va_cur += PAGE_SIZE) {
// 创建一页
cur_ptp = (ptp_t *) pgtbl;
for (cur_level = 0; cur_level < 4; cur_level++) {
page_type = get_next_ptp(cur_ptp, cur_level, va, &next_ptp, &entry, true);
if (page_type < 0) {
// 无法映射
return page_type;
} else if (page_type == BLOCK_PTP) {
// 目前只支持页映射,遇到块项直接返回
return -ENOMAPPING;
} else {
// 遇到页表项,向下遍历
cur_ptp = next_ptp;
}
}
// 配置页
set_pte_flags(entry, flags, USER_PTE);
entry->l3_page.pfn = cur_pfn++;
}
flush_tlb();
// </lab2> // </lab2>
return 0; return 0;
} }
@ -207,7 +257,40 @@ int map_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t pa,
int unmap_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, size_t len) int unmap_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, size_t len)
{ {
// <lab2> // <lab2>
vaddr_t va_start = va, va_end = va + len, va_cur;
ptp_t * cur_ptp, *next_ptp;
int page_type;
u32 cur_level;
pte_t *entry;
// 遍历所有页
va_start &= ~PAGE_MASK;
va_end &= ~PAGE_MASK;
if (va_end < va + len) {
va_end++; // 有不完整的一页
}
for (va_cur = va_start; va_cur < va_end; va_cur += PAGE_SIZE) {
// 创建一页
cur_ptp = (ptp_t *) pgtbl;
for (cur_level = 0; cur_level < 4; cur_level++) {
page_type = get_next_ptp(cur_ptp, cur_level, va, &next_ptp, &entry, false);
if (page_type < 0) {
// 无法映射
break;
} else if (page_type == BLOCK_PTP) {
// 目前只支持页映射,遇到块项直接返回
return -ENOMAPPING;
} else {
// 遇到页表项,向下遍历
cur_ptp = next_ptp;
}
}
if (cur_level != 4) {
continue;
}
// 配置页
entry->pte = 0;
}
flush_tlb();
// </lab2> // </lab2>
return 0; return 0;
} }

View File

@ -15,6 +15,9 @@
#include <common/printk.h> #include <common/printk.h>
#include <common/types.h> #include <common/types.h>
#include <common/machine.h>
extern char kernel_stack[PLAT_CPU_NUM][KERNEL_STACK_SIZE];
static inline __attribute__ ((always_inline)) static inline __attribute__ ((always_inline))
u64 read_fp() u64 read_fp()
@ -30,6 +33,21 @@ int stack_backtrace()
printk("Stack backtrace:\n"); printk("Stack backtrace:\n");
// Your code here. // Your code here.
u64 *fp, *preFp;
fp = read_fp();
// until stack end
while (true) {
preFp = *fp;
// entry frame
if (preFp == 0)
break;
printk("LR %lx FP %lx Args", *(preFp + 1), preFp);
for (int i = 0; i < 5; i++) {
printk(" %lx", *(fp + 2 + i));
}
printk("\n");
fp = preFp;
}
return 0; return 0;
} }