merge lab3 to lab4

This commit is contained in:
KAAAsS 2021-05-16 18:58:22 +08:00
parent c84b991273
commit 63ce2d4a49
Signed by: KAAAsS
GPG Key ID: D56625F3E671882F
20 changed files with 522 additions and 136 deletions

2
lab4/.gitignore vendored
View File

@ -14,3 +14,5 @@ build
*.out *.out
*.pyc *.pyc
chcore.out chcore.out
cmake-build-debug

View File

@ -2,7 +2,7 @@ cmake_minimum_required (VERSION 3.14)
set(CMAKE_VERBOSE_MAKEFILE on) set(CMAKE_VERBOSE_MAKEFILE on)
set(CMAKE_BUILD_TYPE "Release") # "Release" or "Debug" set(CMAKE_BUILD_TYPE "Debug") # "Release" or "Debug"
set(CHCORE_PLAT "raspi3") set(CHCORE_PLAT "raspi3")
set(CHCORE_ARCH "aarch64") set(CHCORE_ARCH "aarch64")

View File

@ -119,7 +119,25 @@ static int printk_write_num(char **out, long long i, int base, int sign,
// store the digitals in the buffer `print_buf`: // store the digitals in the buffer `print_buf`:
// 1. the last postion of this buffer must be '\0' // 1. the last postion of this buffer must be '\0'
// 2. the format is only decided by `base` and `letbase` here // 2. the format is only decided by `base` and `letbase` here
int len = 0;
s = print_buf + 1;
while (u > 0) {
t = u % base;
u /= base;
if (t <= 9)
s[len++] = t + '0';
else
s[len++] = t - 10 + (letbase ? 'a': 'A');
}
s[len] = '\0';
// swap print_buf
char ch;
for (int i = 0; i < len / 2; i++) {
ch = s[i];
s[i] = s[len - 1 - i];
s[len - 1 - i] = ch;
}
if (neg) { if (neg) {
if (width && (flags & PAD_ZERO)) { if (width && (flags & PAD_ZERO)) {
simple_outputchar(out, '-'); simple_outputchar(out, '-');

View File

@ -38,6 +38,7 @@ void exception_init_per_cpu(void)
* Lab3: Your code here * Lab3: Your code here
* Setup the exception vector with the asm function written in exception.S * Setup the exception vector with the asm function written in exception.S
*/ */
set_exception_vector();
disable_irq(); disable_irq();
} }
@ -49,7 +50,7 @@ void exception_init(void)
void handle_entry_c(int type, u64 esr, u64 address) void handle_entry_c(int type, u64 esr, u64 address)
{ {
/** /**
* Lab4 * Lab4
* Acquire the big kernel lock, if the exception is not from kernel * Acquire the big kernel lock, if the exception is not from kernel
*/ */
@ -67,6 +68,15 @@ void handle_entry_c(int type, u64 esr, u64 address)
* Handle exceptions as required in the lab document. Checking exception codes in * Handle exceptions as required in the lab document. Checking exception codes in
* esr.h may help. * esr.h may help.
*/ */
case ESR_EL1_EC_UNKNOWN:
// Unknown instruction
kinfo("%s", UNKNOWN);
sys_exit(-ESUPPORT);
break;
case ESR_EL1_EC_DABT_LEL:
case ESR_EL1_EC_DABT_CEL:
do_page_fault(esr, address);
break;
default: default:
kdebug("Unsupported Exception ESR %lx\n", esr); kdebug("Unsupported Exception ESR %lx\n", esr);
break; break;

View File

@ -86,8 +86,8 @@
.endm .endm
/** /**
* Lab4 * Lab4
* unlock the big kernel lock before returning to the user mode * unlock the big kernel lock before returning to the user mode
*/ */
.macro exception_return .macro exception_return
@ -131,6 +131,30 @@
.align 11 .align 11
EXPORT(el1_vector) EXPORT(el1_vector)
// ELx SP_EL0
exception_entry sync_el1t
exception_entry irq_el1t
exception_entry fiq_el1t
exception_entry error_el1t
// ELx SP_ELx
exception_entry sync_el1h
exception_entry irq_el1h
exception_entry fiq_el1h
exception_entry error_el1h
// EL0 AArch64
exception_entry sync_el0_64
exception_entry irq_el0_64
exception_entry fiq_el0_64
exception_entry error_el0_64
// EL0 AArch32
exception_entry sync_el0_32
exception_entry irq_el0_32
exception_entry fiq_el0_32
exception_entry error_el0_32
sync_el1t: sync_el1t:
handle_entry 1, SYNC_EL1t handle_entry 1, SYNC_EL1t
@ -167,9 +191,10 @@ sync_el0_64:
exception_return exception_return
el0_syscall: el0_syscall:
/* Lab4 /* Lab4
* Acquire the big kernel lock for syscall * Acquire the big kernel lock for syscall
*/ */
//
sub sp, sp, #16 * 8 sub sp, sp, #16 * 8
stp x0, x1, [sp, #16 * 0] stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1] stp x2, x3, [sp, #16 * 1]
@ -192,6 +217,7 @@ el0_syscall:
ldp x14, x15, [sp, #16 * 7] ldp x14, x15, [sp, #16 * 7]
add sp, sp, #16 * 8 add sp, sp, #16 * 8
// 2
adr x27, syscall_table // syscall table in x27 adr x27, syscall_table // syscall table in x27
uxtw x16, w8 // syscall number in x16 uxtw x16, w8 // syscall number in x16
ldr x16, [x27, x16, lsl #3] // find the syscall entry ldr x16, [x27, x16, lsl #3] // find the syscall entry

View File

@ -69,6 +69,7 @@ int handle_trans_fault(struct vmspace *vmspace, vaddr_t fault_addr)
struct pmobject *pmo; struct pmobject *pmo;
paddr_t pa; paddr_t pa;
u64 offset; u64 offset;
int ret;
/* /*
* Lab3: your code here * Lab3: your code here
@ -86,6 +87,28 @@ int handle_trans_fault(struct vmspace *vmspace, vaddr_t fault_addr)
* are recorded in a radix tree for easy management. Such code * are recorded in a radix tree for easy management. Such code
* has been omitted in our lab for simplification. * has been omitted in our lab for simplification.
*/ */
vmr = find_vmr_for_va(vmspace, fault_addr);
if (vmr == NULL) {
return -ENOMAPPING;
}
pmo = vmr->pmo;
if (pmo->type != PMO_ANONYM) {
return -ENOMAPPING;
}
// 分配物理页
pa = virt_to_phys(kmalloc(PAGE_SIZE));
if ((void *) pa == NULL) {
return -ENOMAPPING;
}
// 应该调用但是此处省略commit_page_to_pmo(pmo, ..., pa);
// 映射
ret = map_range_in_pgtbl(vmspace->pgtbl, ROUND_DOWN(fault_addr, PAGE_SIZE), pa, PAGE_SIZE, vmr->perm);
if (ret != 0) {
return -ENOMAPPING;
}
return 0; return 0;
} }

View File

@ -90,9 +90,9 @@ void main(void *addr)
BUG("No given TEST!"); BUG("No given TEST!");
#endif #endif
/** /**
* Where the pimary CPU first returns to the user mode * Where the pimary CPU first returns to the user mode
* Leave the scheduler to do its job * Leave the scheduler to do its job
*/ */
sched(); sched();
@ -107,7 +107,7 @@ void secondary_start(void)
kinfo("AP %u is activated!\n", smp_get_cpu_id()); kinfo("AP %u is activated!\n", smp_get_cpu_id());
exception_init_per_cpu(); exception_init_per_cpu();
/** /**
* Lab4 * Lab4
* Inform the BSP at last to start cpu one by one * Inform the BSP at last to start cpu one by one
* Hints: use cpu_status * Hints: use cpu_status

View File

@ -11,68 +11,69 @@
* The usable memory: [pool_start_addr, pool_start_addr + pool_mem_size). * The usable memory: [pool_start_addr, pool_start_addr + pool_mem_size).
*/ */
void init_buddy(struct phys_mem_pool *pool, struct page *start_page, void init_buddy(struct phys_mem_pool *pool, struct page *start_page,
vaddr_t start_addr, u64 page_num) vaddr_t start_addr, u64 page_num) {
{ int order;
int order; int page_idx;
int page_idx; struct page *page;
struct page *page;
/* Init the physical memory pool. */ /* Init the physical memory pool. */
pool->pool_start_addr = start_addr; pool->pool_start_addr = start_addr;
pool->page_metadata = start_page; pool->page_metadata = start_page;
pool->pool_mem_size = page_num * BUDDY_PAGE_SIZE; pool->pool_mem_size = page_num * BUDDY_PAGE_SIZE;
/* This field is for unit test only. */ /* This field is for unit test only. */
pool->pool_phys_page_num = page_num; pool->pool_phys_page_num = page_num;
/* Init the free lists */ /* Init the free lists */
for (order = 0; order < BUDDY_MAX_ORDER; ++order) { for (order = 0; order < BUDDY_MAX_ORDER; ++order) {
pool->free_lists[order].nr_free = 0; pool->free_lists[order].nr_free = 0;
init_list_head(&(pool->free_lists[order].free_list)); init_list_head(&(pool->free_lists[order].free_list)); // free lists 初始化为空链表
} }
/* Clear the page_metadata area. */ /* Clear the page_metadata area. */
memset((char *)start_page, 0, page_num * sizeof(struct page)); memset((char *) start_page, 0, page_num * sizeof(struct page));
/* Init the page_metadata area. */ /* Init the page_metadata area. */
for (page_idx = 0; page_idx < page_num; ++page_idx) { for (page_idx = 0; page_idx < page_num; ++page_idx) {
page = start_page + page_idx; page = start_page + page_idx;
page->allocated = 1; page->allocated = 1; // 所有页面初始化为已分配
page->order = 0; page->order = 0;
} }
/* Put each physical memory page into the free lists. */ /* Put each physical memory page into the free lists. */
for (page_idx = 0; page_idx < page_num; ++page_idx) { for (page_idx = 0; page_idx < page_num; ++page_idx) {
page = start_page + page_idx; page = start_page + page_idx;
buddy_free_pages(pool, page); buddy_free_pages(pool, page); // 逐一去配页面以初始化 free lists
} }
} }
/*
*
*/
static struct page *get_buddy_chunk(struct phys_mem_pool *pool, static struct page *get_buddy_chunk(struct phys_mem_pool *pool,
struct page *chunk) struct page *chunk) {
{ u64 chunk_addr;
u64 chunk_addr; u64 buddy_chunk_addr;
u64 buddy_chunk_addr; int order;
int order;
/* Get the address of the chunk. */ /* Get the address of the chunk. */
chunk_addr = (u64) page_to_virt(pool, chunk); chunk_addr = (u64) page_to_virt(pool, chunk);
order = chunk->order; order = chunk->order;
/* /*
* Calculate the address of the buddy chunk according to the address * Calculate the address of the buddy chunk according to the address
* relationship between buddies. * relationship between buddies.
*/ */
#define BUDDY_PAGE_SIZE_ORDER (12) #define BUDDY_PAGE_SIZE_ORDER (12)
buddy_chunk_addr = chunk_addr ^ buddy_chunk_addr = chunk_addr ^
(1UL << (order + BUDDY_PAGE_SIZE_ORDER)); (1UL << (order + BUDDY_PAGE_SIZE_ORDER));
/* Check whether the buddy_chunk_addr belongs to pool. */ /* Check whether the buddy_chunk_addr belongs to pool. */
if ((buddy_chunk_addr < pool->pool_start_addr) || if ((buddy_chunk_addr < pool->pool_start_addr) ||
(buddy_chunk_addr >= (pool->pool_start_addr + (buddy_chunk_addr >= (pool->pool_start_addr +
pool->pool_mem_size))) { pool->pool_mem_size))) {
return NULL; return NULL;
} }
return virt_to_page(pool, (void *)buddy_chunk_addr); return virt_to_page(pool, (void *) buddy_chunk_addr);
} }
/* /*
@ -81,104 +82,181 @@ static struct page *get_buddy_chunk(struct phys_mem_pool *pool,
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* order @ order for origin page block * order @ order for origin page block
* page @ splitted page * page @ splitted page
* *
* Hints: don't forget to substract the free page number for the corresponding free_list. * Hints: don't forget to substract the free page number for the corresponding free_list.
* you can invoke split_page recursively until the given page can not be splitted into two * you can invoke split_page recursively until the given page can not be splitted into two
* smaller sub-pages. * smaller sub-pages.
*/ */
static struct page *split_page(struct phys_mem_pool *pool, u64 order, static struct page *split_page(struct phys_mem_pool *pool, u64 order,
struct page *page) struct page *page) {
{ // <lab2>
// <lab2> struct page *buddy = NULL;
struct page *split_page = NULL;
return split_page; // 我理解的这个函数:把 page 递归切成 order返回结果。过程修改的都是未分配区域。
// </lab2>
// 递归出口
if (page->order == order) {
return page;
} else if (page->order < order) {
return NULL;
}
// 分裂
// 删除当前节点
list_del(&page->node);
pool->free_lists[page->order].nr_free--;
// 降格
page->order--;
// 找伙伴
buddy = get_buddy_chunk(pool, page);
buddy->order = page->order;
// 加入 free list
list_add(&page->node, &pool->free_lists[page->order].free_list);
list_add(&buddy->node, &pool->free_lists[page->order].free_list);
pool->free_lists[page->order].nr_free += 2;
// 递归调用
return split_page(pool, order, page);
// </lab2>
} }
/* /*
* buddy_get_pages: get free page from buddy system. * buddy_get_pages: get free page from buddy system.
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* order @ get the (1<<order) continous pages from the buddy system * order @ get the (1<<order) continous pages from the buddy system
* *
* Hints: Find the corresonding free_list which can allocate 1<<order * Hints: Find the corresponding free_list which can allocate 1<<order
* continuous pages and don't forget to split the list node after allocation * continuous pages and don't forget to split the list node after allocation
*/ */
struct page *buddy_get_pages(struct phys_mem_pool *pool, u64 order) struct page *buddy_get_pages(struct phys_mem_pool *pool, u64 order) {
{ // <lab2>
// <lab2> struct page *page = NULL, *free_page = NULL;
struct page *page = NULL;
return page; // 找可分配 order
// </lab2> u64 free_order = order;
while (free_order < BUDDY_MAX_ORDER && pool->free_lists[free_order].nr_free == 0) {
free_order++;
}
// 当前无可分配
if (free_order == BUDDY_MAX_ORDER) {
return NULL;
}
// 有可分配,取一块切分
free_page = (struct page *) pool->free_lists[free_order].free_list.next;
page = split_page(pool, order, free_page);
// 删除出 free list分配
list_del(&page->node);
pool->free_lists[page->order].nr_free--;
page->allocated = true;
return page;
// </lab2>
} }
/* /*
* merge_page: merge the given page with the buddy page * merge_page: merge the given page with the buddy page
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* page @ merged page (attempted) * page @ merged page (attempted)
* *
* Hints: you can invoke the merge_page recursively until * Hints: you can invoke the merge_page recursively until
* there is not corresponding buddy page. get_buddy_chunk * there is not corresponding buddy page. get_buddy_chunk
* is helpful in this function. * is helpful in this function.
*/ */
static struct page *merge_page(struct phys_mem_pool *pool, struct page *page) static struct page *merge_page(struct phys_mem_pool *pool, struct page *page) {
{ // <lab2>
// <lab2>
struct page *merge_page = NULL; struct page *try_merge = NULL, *buddy = get_buddy_chunk(pool, page);
return merge_page;
// </lab2> // 不存在伙伴、伙伴已分配、order 不同则无法合并
if (buddy == NULL || buddy->allocated || page->order != buddy->order) {
return NULL;
}
// 如果已经最大 order则不合并
if (page->order >= BUDDY_MAX_ORDER - 1) {
return NULL;
}
// 双方都未分配,可以合并一次
// 从 free 中删除两个节点
list_del(&page->node);
list_del(&buddy->node);
pool->free_lists[page->order].nr_free -= 2;
// 选择左侧 page
struct page *t;
if (buddy < page) {
t = page;
page = buddy;
}
// 增加 page 的 order
page->order++;
// 链入上一层空列表
list_add(&page->node, &pool->free_lists[page->order].free_list);
pool->free_lists[page->order].nr_free++;
// 尝试递归合并
try_merge = merge_page(pool, page);
if (try_merge != NULL) {
return try_merge;
}
return page;
// </lab2>
} }
/* /*
* buddy_free_pages: give back the pages to buddy system * buddy_free_pages: give back the pages to buddy system
* pool @ physical memory structure reserved in the kernel * pool @ physical memory structure reserved in the kernel
* page @ free page structure * page @ free page structure
* *
* Hints: you can invoke merge_page. * Hints: you can invoke merge_page.
*/ */
void buddy_free_pages(struct phys_mem_pool *pool, struct page *page) void buddy_free_pages(struct phys_mem_pool *pool, struct page *page) {
{ // <lab2>
// <lab2>
// </lab2> if (!page->allocated)
return;
// 设置标志
page->allocated = false;
// 修改 free list
list_add(&page->node, &pool->free_lists[page->order].free_list);
pool->free_lists[page->order].nr_free++;
// 尝试合并当前页面
merge_page(pool, page);
// </lab2>
} }
void *page_to_virt(struct phys_mem_pool *pool, struct page *page) void *page_to_virt(struct phys_mem_pool *pool, struct page *page) {
{ u64 addr;
u64 addr;
/* page_idx * BUDDY_PAGE_SIZE + start_addr */ /* page_idx * BUDDY_PAGE_SIZE + start_addr */
addr = (page - pool->page_metadata) * BUDDY_PAGE_SIZE + addr = (page - pool->page_metadata) * BUDDY_PAGE_SIZE +
pool->pool_start_addr; pool->pool_start_addr;
return (void *)addr; return (void *) addr;
} }
struct page *virt_to_page(struct phys_mem_pool *pool, void *addr) struct page *virt_to_page(struct phys_mem_pool *pool, void *addr) {
{ struct page *page;
struct page *page;
page = pool->page_metadata + page = pool->page_metadata +
(((u64) addr - pool->pool_start_addr) / BUDDY_PAGE_SIZE); (((u64) addr - pool->pool_start_addr) / BUDDY_PAGE_SIZE);
return page; return page;
} }
u64 get_free_mem_size_from_buddy(struct phys_mem_pool * pool) u64 get_free_mem_size_from_buddy(struct phys_mem_pool *pool) {
{ int order;
int order; struct free_list *list;
struct free_list *list; u64 current_order_size;
u64 current_order_size; u64 total_size = 0;
u64 total_size = 0;
for (order = 0; order < BUDDY_MAX_ORDER; order++) { for (order = 0; order < BUDDY_MAX_ORDER; order++) {
/* 2^order * 4K */ /* 2^order * 4K */
current_order_size = BUDDY_PAGE_SIZE * (1 << order); current_order_size = BUDDY_PAGE_SIZE * (1 << order);
list = pool->free_lists + order; list = pool->free_lists + order;
total_size += list->nr_free * current_order_size; total_size += list->nr_free * current_order_size;
/* debug : print info about current order */ /* debug : print info about current order */
kdebug("buddy memory chunk order: %d, size: 0x%lx, num: %d\n", kdebug("buddy memory chunk order: %d, size: 0x%lx, num: %d\n",
order, current_order_size, list->nr_free); order, current_order_size, list->nr_free);
} }
return total_size; return total_size;
} }

View File

@ -16,6 +16,7 @@
#include "buddy.h" #include "buddy.h"
#include "slab.h" #include "slab.h"
#include "page_table.h"
extern unsigned long *img_end; extern unsigned long *img_end;
@ -51,6 +52,35 @@ unsigned long get_ttbr1(void)
void map_kernel_space(vaddr_t va, paddr_t pa, size_t len) void map_kernel_space(vaddr_t va, paddr_t pa, size_t len)
{ {
// <lab2> // <lab2>
#define IS_VALID (1UL << 0)
#define UXN (0x1UL << 54)
#define ACCESSED (0x1UL << 10)
#define INNER_SHARABLE (0x3UL << 8)
#define SIZE_2M (2UL*1024*1024)
#define PAGE_SHIFT (12)
#define GET_PADDR_IN_PTE(entry) \
(((u64)entry.table.next_table_addr) << PAGE_SHIFT)
paddr_t addr_l0, addr_l1, addr_l2;
pte_t pte_l0, pte_l1;
u64 *table_l2;
// 获得页表地址
// 不妨假设 len 小于 1G 且属于同一块
addr_l0 = get_ttbr1();
pte_l0.pte = *((u64 *) phys_to_virt(addr_l0) + GET_L0_INDEX(va));
addr_l1 = GET_PADDR_IN_PTE(pte_l0);
pte_l1.pte = *((u64 *) phys_to_virt(addr_l1) + GET_L1_INDEX(va));
addr_l2 = GET_PADDR_IN_PTE(pte_l1);
table_l2 = (u64 *) phys_to_virt(addr_l2);
// 设置页表
u32 start_entry_idx = GET_L2_INDEX(va);
u32 end_entry_idx = GET_L2_INDEX((va + len));
for (u32 idx = start_entry_idx; idx < end_entry_idx; ++idx) {
table_l2[idx] = (pa + idx * SIZE_2M) | UXN | ACCESSED | INNER_SHARABLE | NORMAL_MEMORY | IS_VALID;
}
// </lab2> // </lab2>
} }

View File

@ -162,7 +162,24 @@ static int get_next_ptp(ptp_t * cur_ptp, u32 level, vaddr_t va,
int query_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t * pa, pte_t ** entry) int query_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t * pa, pte_t ** entry)
{ {
// <lab2> // <lab2>
ptp_t * cur_ptp = (ptp_t *) pgtbl, *next_ptp;
int page_type;
// 遍历 L0-L3
for (u32 cur_level = 0; cur_level < 4; cur_level++) {
page_type = get_next_ptp(cur_ptp, cur_level, va, &next_ptp, entry, false);
if (page_type < 0) {
// 无法映射
return page_type;
} else if (page_type == BLOCK_PTP) {
// 遇到块项直接返回
break;
} else {
// 遇到页表项,向下遍历
cur_ptp = next_ptp;
}
}
*pa = GET_PADDR_IN_PTE((*entry)) | (va & PAGE_MASK);
// </lab2> // </lab2>
return 0; return 0;
} }
@ -186,7 +203,40 @@ int map_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t pa,
size_t len, vmr_prop_t flags) size_t len, vmr_prop_t flags)
{ {
// <lab2> // <lab2>
vaddr_t va_start = va, va_end = va + len, va_cur;
ptp_t * cur_ptp, *next_ptp;
int page_type;
u32 cur_level;
pte_t *entry;
u64 cur_pfn;
// 遍历所有页
cur_pfn = pa >> PAGE_SHIFT;
va_start &= ~PAGE_MASK;
va_end &= ~PAGE_MASK;
if (va_end < va + len) {
va_end++; // 有不完整的一页
}
for (va_cur = va_start; va_cur < va_end; va_cur += PAGE_SIZE) {
// 创建一页
cur_ptp = (ptp_t *) pgtbl;
for (cur_level = 0; cur_level < 4; cur_level++) {
page_type = get_next_ptp(cur_ptp, cur_level, va_cur, &next_ptp, &entry, true);
if (page_type < 0) {
// 无法映射
return page_type;
} else if (page_type == BLOCK_PTP) {
// 目前只支持页映射,遇到块项直接返回
return -ENOMAPPING;
} else {
// 遇到页表项,向下遍历
cur_ptp = next_ptp;
}
}
// 配置页
set_pte_flags(entry, flags, USER_PTE);
entry->l3_page.pfn = cur_pfn++;
}
flush_tlb();
// </lab2> // </lab2>
return 0; return 0;
} }
@ -207,7 +257,40 @@ int map_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, paddr_t pa,
int unmap_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, size_t len) int unmap_range_in_pgtbl(vaddr_t * pgtbl, vaddr_t va, size_t len)
{ {
// <lab2> // <lab2>
vaddr_t va_start = va, va_end = va + len, va_cur;
ptp_t * cur_ptp, *next_ptp;
int page_type;
u32 cur_level;
pte_t *entry;
// 遍历所有页
va_start &= ~PAGE_MASK;
va_end &= ~PAGE_MASK;
if (va_end < va + len) {
va_end++; // 有不完整的一页
}
for (va_cur = va_start; va_cur < va_end; va_cur += PAGE_SIZE) {
// 创建一页
cur_ptp = (ptp_t *) pgtbl;
for (cur_level = 0; cur_level < 4; cur_level++) {
page_type = get_next_ptp(cur_ptp, cur_level, va, &next_ptp, &entry, false);
if (page_type < 0) {
// 无法映射
break;
} else if (page_type == BLOCK_PTP) {
// 目前只支持页映射,遇到块项直接返回
return -ENOMAPPING;
} else {
// 遇到页表项,向下遍历
cur_ptp = next_ptp;
}
}
if (cur_level != 4) {
continue;
}
// 配置页
entry->pte = 0;
}
flush_tlb();
// </lab2> // </lab2>
return 0; return 0;
} }

View File

@ -348,6 +348,37 @@ u64 sys_handle_brk(u64 addr)
* *
*/ */
if (vmspace->heap_vmr != NULL) {
retval = vmspace->heap_vmr->start + vmspace->heap_vmr->size;
} else {
retval = vmspace->user_current_heap;
}
if (addr == 0) {
// 初始化堆
if (vmspace->heap_vmr != NULL) {
// 已经初始化
return -EINVAL;
}
// 创建堆 PMO 对象
pmo = obj_alloc(TYPE_PMO, sizeof(*pmo));
if (!pmo) {
return -ENOMEM;
}
pmo_init(pmo, PMO_ANONYM, 0, 0);
// 初始化堆映射
vmspace->heap_vmr = init_heap_vmr(vmspace, vmspace->user_current_heap, pmo);
} else if (vmspace->heap_vmr != NULL && addr > retval) {
// 增长堆
vmspace->heap_vmr->size = addr - vmspace->heap_vmr->start;
retval = addr;
} else if (vmspace->heap_vmr != NULL && addr < retval) {
// 缩减堆
retval = -EINVAL;
}
/* /*
* return origin heap addr on failure; * return origin heap addr on failure;
* return new heap addr on success. * return new heap addr on success.

View File

@ -15,6 +15,9 @@
#include <common/printk.h> #include <common/printk.h>
#include <common/types.h> #include <common/types.h>
#include <common/machine.h>
extern char kernel_stack[PLAT_CPU_NUM][KERNEL_STACK_SIZE];
static inline __attribute__ ((always_inline)) static inline __attribute__ ((always_inline))
u64 read_fp() u64 read_fp()
@ -30,6 +33,21 @@ int stack_backtrace()
printk("Stack backtrace:\n"); printk("Stack backtrace:\n");
// Your code here. // Your code here.
u64 *fp, *preFp;
fp = read_fp();
// until stack end
while (true) {
preFp = *fp;
// entry frame
if (preFp == 0)
break;
printk("LR %lx FP %lx Args", *(preFp + 1), preFp);
for (int i = 0; i < 5; i++) {
printk(" %lx", *(fp + 2 + i));
}
printk("\n");
fp = preFp;
}
return 0; return 0;
} }

View File

@ -247,21 +247,26 @@ void process_create_root(char *bin_name)
char *binary = NULL; char *binary = NULL;
int ret; int ret;
// 从内存硬盘读取相关文件,使用 CPIO 格式
ret = ramdisk_read_file(bin_name, &binary); ret = ramdisk_read_file(bin_name, &binary);
BUG_ON(ret < 0); BUG_ON(ret < 0);
BUG_ON(binary == NULL); BUG_ON(binary == NULL);
// 创建 PCB 并设置自身 Cap
root_process = process_create(); root_process = process_create();
// 创建主线程与其 Cap
thread_cap = thread_create_main(root_process, ROOT_THREAD_STACK_BASE, thread_cap = thread_create_main(root_process, ROOT_THREAD_STACK_BASE,
ROOT_THREAD_STACK_SIZE, ROOT_THREAD_STACK_SIZE,
ROOT_THREAD_PRIO, TYPE_ROOT, ROOT_THREAD_PRIO, TYPE_ROOT,
smp_get_cpu_id(), binary, bin_name); smp_get_cpu_id(), binary, bin_name);
// 将主线程 Cap 加入进程
root_thread = obj_get(root_process, thread_cap, TYPE_THREAD); root_thread = obj_get(root_process, thread_cap, TYPE_THREAD);
/* Enqueue: put init thread into the ready queue */ /* Enqueue: put init thread into the ready queue */
BUG_ON(sched_enqueue(root_thread)); BUG_ON(sched_enqueue(root_thread));
obj_put(root_thread); obj_put(root_thread);
current_thread = root_thread;
} }
/* syscalls */ /* syscalls */

View File

@ -175,6 +175,15 @@ static u64 load_binary(struct process *process,
* page aligned segment size. Take care of the page alignment when allocating * page aligned segment size. Take care of the page alignment when allocating
* and mapping physical memory. * and mapping physical memory.
*/ */
// 段实际数据大小
seg_sz = elf->p_headers[i].p_filesz;
// 段虚拟地址
p_vaddr = elf->p_headers[i].p_vaddr;
// 段映射结果大小,其实不太需要对齐,因为之后的逻辑已经处理了
seg_map_sz = ROUND_UP(elf->p_headers[i].p_memsz, PAGE_SIZE);
if (ROUND_DOWN(p_vaddr, PAGE_SIZE) + seg_map_sz < p_vaddr + elf->p_headers[i].p_memsz) {
seg_map_sz += PAGE_SIZE;
}
pmo = obj_alloc(TYPE_PMO, sizeof(*pmo)); pmo = obj_alloc(TYPE_PMO, sizeof(*pmo));
if (!pmo) { if (!pmo) {
@ -193,9 +202,19 @@ static u64 load_binary(struct process *process,
* You should copy data from the elf into the physical memory in pmo. * You should copy data from the elf into the physical memory in pmo.
* The physical address of a pmo can be get from pmo->start. * The physical address of a pmo can be get from pmo->start.
*/ */
const char *section_data;
char *alloc_section;
// 复制段内存
section_data = bin + elf->p_headers[i].p_offset;
alloc_section = (char *) phys_to_virt(pmo->start);
kdebug("Copy segment[%d] from addr %lx -> %lx, len = %d\n", i, section_data, alloc_section, seg_sz);
memcpy(alloc_section, section_data, seg_sz);
flags = PFLAGS2VMRFLAGS(elf->p_headers[i].p_flags); flags = PFLAGS2VMRFLAGS(elf->p_headers[i].p_flags);
kdebug("Map segment[%d] from paddr %lx -> %lx, len = %d\n",
i, pmo->start, ROUND_DOWN(p_vaddr, PAGE_SIZE), seg_map_sz);
ret = vmspace_map_range(vmspace, ret = vmspace_map_range(vmspace,
ROUND_DOWN(p_vaddr, PAGE_SIZE), ROUND_DOWN(p_vaddr, PAGE_SIZE),
seg_map_sz, flags, pmo); seg_map_sz, flags, pmo);
@ -251,10 +270,12 @@ int thread_create_main(struct process *process, u64 stack_base,
u64 stack; u64 stack;
u64 pc; u64 pc;
// 创建虚拟地址空间
init_vmspace = obj_get(process, VMSPACE_OBJ_ID, TYPE_VMSPACE); init_vmspace = obj_get(process, VMSPACE_OBJ_ID, TYPE_VMSPACE);
obj_put(init_vmspace); obj_put(init_vmspace);
/* Allocate and setup a user stack for the init thread */ /* Allocate and setup a user stack for the init thread */
// 创建用户栈 PMO 对象,并设置 Cap
stack_pmo = obj_alloc(TYPE_PMO, sizeof(*stack_pmo)); stack_pmo = obj_alloc(TYPE_PMO, sizeof(*stack_pmo));
if (!stack_pmo) { if (!stack_pmo) {
ret = -ENOMEM; ret = -ENOMEM;
@ -267,11 +288,13 @@ int thread_create_main(struct process *process, u64 stack_base,
goto out_free_obj_pmo; goto out_free_obj_pmo;
} }
// 映射线程用户栈
ret = vmspace_map_range(init_vmspace, stack_base, stack_size, ret = vmspace_map_range(init_vmspace, stack_base, stack_size,
VMR_READ | VMR_WRITE, stack_pmo); VMR_READ | VMR_WRITE, stack_pmo);
BUG_ON(ret != 0); BUG_ON(ret != 0);
/* init thread */ /* init thread */
// 初始化线程对象
thread = obj_alloc(TYPE_THREAD, sizeof(*thread)); thread = obj_alloc(TYPE_THREAD, sizeof(*thread));
if (!thread) { if (!thread) {
ret = -ENOMEM; ret = -ENOMEM;
@ -281,15 +304,19 @@ int thread_create_main(struct process *process, u64 stack_base,
/* Fill the parameter of the thread struct */ /* Fill the parameter of the thread struct */
stack = stack_base + stack_size; stack = stack_base + stack_size;
// 加载程序各 ELF 段
pc = load_binary(process, init_vmspace, bin_start, &meta); pc = load_binary(process, init_vmspace, bin_start, &meta);
// 设置栈
prepare_env((char *)phys_to_virt(stack_pmo->start) + stack_size, prepare_env((char *)phys_to_virt(stack_pmo->start) + stack_size,
stack, &meta, bin_name); stack, &meta, bin_name);
stack -= ENV_SIZE_ON_STACK; stack -= ENV_SIZE_ON_STACK;
// 创建线程上下文
ret = thread_init(thread, process, stack, pc, prio, type, aff); ret = thread_init(thread, process, stack, pc, prio, type, aff);
BUG_ON(ret != 0); BUG_ON(ret != 0);
// 取得线程 Cap
thread_cap = cap_alloc(process, thread, 0); thread_cap = cap_alloc(process, thread, 0);
if (thread_cap < 0) { if (thread_cap < 0) {
ret = thread_cap; ret = thread_cap;
@ -297,6 +324,7 @@ int thread_create_main(struct process *process, u64 stack_base,
} }
/* L1 icache & dcache have no coherence */ /* L1 icache & dcache have no coherence */
// 清空 L1 缓存
flush_idcache(); flush_idcache();
// return thread; // return thread;

View File

@ -57,6 +57,12 @@ void init_thread_ctx(struct thread *thread, u64 stack, u64 func, u32 prio,
*/ */
/* Fill the context of the thread */ /* Fill the context of the thread */
// 栈寄存器
thread->thread_ctx->ec.reg[SP_EL0] = stack;
// PC 计数器
thread->thread_ctx->ec.reg[ELR_EL1] = func;
// 状态字寄存器
thread->thread_ctx->ec.reg[SPSR_EL1] = SPSR_EL1_EL0t;
/* Set thread type */ /* Set thread type */
thread->thread_ctx->type = type; thread->thread_ctx->type = type;

View File

@ -129,13 +129,13 @@ u64 switch_context(void)
* Return the correct value in order to make eret_to_thread work correctly * Return the correct value in order to make eret_to_thread work correctly
* in main.c * in main.c
*/ */
return 0; return (u64) &target_ctx->ec;
} }
/* SYSCALL functions */ /* SYSCALL functions */
/** /**
* Lab4 * Lab4
* Finish the sys_yield function * Finish the sys_yield function
*/ */
void sys_yield(void) void sys_yield(void)
@ -144,9 +144,9 @@ void sys_yield(void)
int sched_init(struct sched_ops *sched_ops) int sched_init(struct sched_ops *sched_ops)
{ {
BUG_ON(sched_ops == NULL); BUG_ON(sched_ops == NULL);
cur_sched_ops = sched_ops; cur_sched_ops = sched_ops;
cur_sched_ops->sched_init(); cur_sched_ops->sched_init();
return 0; return 0;
} }

View File

@ -31,22 +31,25 @@ void sys_putc(char ch)
* Lab3: Your code here * Lab3: Your code here
* Send ch to the screen in anyway as your like * Send ch to the screen in anyway as your like
*/ */
uart_send(ch);
} }
u32 sys_getc(void) u32 sys_getc(void)
{ {
return uart_recv(); return uart_recv();
} }
/* /*
* Lab4 * Lab4
* Finish the sys_get_cpu_id syscall * Finish the sys_get_cpu_id syscall
*/ */
u32 sys_get_cpu_id(void) u32 sys_get_cpu_id(void)
{ {
return -1; return -1;
} }
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winitializer-overrides"
/* /*
* Lab3: Your code here * Lab3: Your code here
* Update the syscall table as you like to redirect syscalls * Update the syscall table as you like to redirect syscalls
@ -70,7 +73,7 @@ const void *syscall_table[NR_SYSCALL] = {
[SYS_cap_copy_from] = sys_cap_copy_from, [SYS_cap_copy_from] = sys_cap_copy_from,
[SYS_set_affinity] = sys_set_affinity, [SYS_set_affinity] = sys_set_affinity,
[SYS_get_affinity] = sys_get_affinity, [SYS_get_affinity] = sys_get_affinity,
/* /*
* Lab4 * Lab4
* Add syscall * Add syscall
*/ */
@ -87,3 +90,4 @@ const void *syscall_table[NR_SYSCALL] = {
[SYS_debug] = sys_debug [SYS_debug] = sys_debug
}; };
#pragma clang diagnostic pop

View File

@ -14,10 +14,10 @@
#define NR_SYSCALL 256 #define NR_SYSCALL 256
void sys_exit(void); void sys_exit(int ret);
void sys_create_pmo(void); int sys_create_pmo(u64 size, u64 type);
void sys_map_pmo(void); int sys_map_pmo(u64 target_process_cap, u64 pmo_cap, u64 addr, u64 perm);
void sys_handle_brk(void); u64 sys_handle_brk(u64 addr);
/* lab3 syscalls finished */ /* lab3 syscalls finished */
void sys_yield(void); void sys_yield(void);

View File

@ -13,5 +13,6 @@ void _start_c(long *p)
* Lab3: Your code here * Lab3: Your code here
* Complete the main function * Complete the main function
*/ */
usys_exit(ret);
return; return;
} }

View File

@ -13,7 +13,28 @@ u64 syscall(u64 sys_no, u64 arg0, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
* And finally use svc to execute the system call. After syscall returned, don't forget * And finally use svc to execute the system call. After syscall returned, don't forget
* to move return value from x0 to the ret variable of this function * to move return value from x0 to the ret variable of this function
*/ */
return ret;
__asm __volatile("mov x9, %0"::"r"(sys_no));
// 参数
#define SET_ARG(n) __asm __volatile("mov x"#n", %0"::"r"(arg##n));
SET_ARG(0);
SET_ARG(1);
SET_ARG(2);
SET_ARG(3);
SET_ARG(4);
SET_ARG(5);
SET_ARG(6);
SET_ARG(7);
// 系统调用
__asm __volatile("mov x8, x9");
__asm ("svc #0");
// 返回值
__asm __volatile("mov %0, x0":"=r"(ret));
return ret;
} }
/* /*
@ -22,25 +43,27 @@ u64 syscall(u64 sys_no, u64 arg0, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
*/ */
void usys_putc(char ch) void usys_putc(char ch)
{ {
syscall(SYS_putc, ch, 0, 0, 0, 0, 0, 0, 0, 0);
} }
void usys_exit(int ret) void usys_exit(int ret)
{ {
syscall(SYS_exit, ret, 0, 0, 0, 0, 0, 0, 0, 0);
} }
int usys_create_pmo(u64 size, u64 type) int usys_create_pmo(u64 size, u64 type)
{ {
return 0; return (int) syscall(SYS_create_pmo, size, type, 0, 0, 0, 0, 0, 0, 0);
} }
int usys_map_pmo(u64 process_cap, u64 pmo_cap, u64 addr, u64 rights) int usys_map_pmo(u64 process_cap, u64 pmo_cap, u64 addr, u64 rights)
{ {
return 0; return (int) syscall(SYS_map_pmo, process_cap, pmo_cap, addr, rights, 0, 0, 0, 0, 0);
} }
u64 usys_handle_brk(u64 addr) u64 usys_handle_brk(u64 addr)
{ {
return 0; return syscall(SYS_handle_brk, addr, 0, 0, 0, 0, 0, 0, 0, 0);
} }
/* Here finishes all syscalls need by lab3 */ /* Here finishes all syscalls need by lab3 */