UnnamedOS
vmm.c
Go to the documentation of this file.
1 
14 #include <common.h>
15 #include <string.h>
16 #include <mem/vmm.h>
17 #include <mem/mmu.h>
18 #include <interrupts/isr.h>
19 #include <boot/multiboot.h>
20 
21 #define ENTRIES 1024
22 #define PAGE_SIZE (ENTRIES * sizeof(page_directory_entry_t))
24 #define MEMORY_SIZE 0x100000000
25 #define PAGE_NUMBER (MEMORY_SIZE / PAGE_SIZE)
26 
27 
29 typedef struct {
30  void* start;
31  void* end;
32 } vmm_domain_t;
33 
35 typedef union {
36  struct {
37  uint16_t page_offset : 12;
38  uint16_t page : 10;
39  uint16_t page_table : 10;
40  } __attribute__((packed)) bits;
41  void* ptr;
43 
46 static uint8_t old_interrupts = 0;
47 
49 static vmm_domain_t kernel_domain = {.start = (void*) 0x400000,
50  .end = (void*) 0x3FFFFFFF};
53 static vmm_domain_t user_domain = {.start = (void*) 0x40000000,
54  .end = (void*) 0xFFFFFFFF - ENTRIES * PAGE_SIZE};
55 static uint8_t domain_check_enabled = 0;
56 
61 static void vmm_destroy_page_table(uint16_t page_table) {
63  pmm_free(pmm_get_address(page_directory[page_table].pt, 0), PAGE_SIZE);
65  memset(page_directory + page_table, 0, sizeof(page_directory_entry_t));
66 }
67 
73  page_directory_t* dir_phys = pmm_alloc(PAGE_SIZE, PMM_KERNEL);
74  logln("VMM", "Creating page directory at %08x", dir_phys);
75  page_directory_t* dir = vmm_map_physical_memory(dir_phys, PAGE_SIZE, VMM_KERNEL);
76  memset(dir, 0, PAGE_SIZE);
78  page_directory_entry_t dir_entry = {
79  .pr = 1, .rw = 0, .user = 0, .pt = pmm_get_page(dir_phys, 0)
80  };
81  dir[ENTRIES - 1] = dir_entry;
82  vmm_unmap_physical_memory(dir, PAGE_SIZE);
83  return dir_phys;
84 }
85 
91  logln("VMM", "Destroying page directory at %08x", dir_phys);
93  page_directory_t* dir = vmm_map_physical_memory(dir_phys, PAGE_SIZE, VMM_KERNEL);
94  page_directory = dir;
96  end = (vmm_virtual_address_t) user_domain.end;
99  for (int i = start.bits.page_table; i <= end.bits.page_table; i++)
100  if (page_directory[i].pr)
103  page_directory = old_directory; // change back so we can unmap the
104  vmm_unmap_physical_memory(dir, PAGE_SIZE); // destroyed directory
105 }
106 
113  page_directory_t* dir = vmm_map_physical_memory(dir_phys, PAGE_SIZE, VMM_KERNEL);
116  uint32_t offset = start.bits.page_table * sizeof(page_directory_entry_t);
117  memcpy((void*) ((uintptr_t) dir + offset),
118  (void*) ((uintptr_t) page_directory + offset),
119  (end.bits.page_table - start.bits.page_table + 1) *
120  sizeof(page_directory_entry_t));
121  vmm_unmap_physical_memory(dir, PAGE_SIZE);
122 }
123 
130  if (new_directory != VMM_PAGEDIR) {
131  logln("VMM", "Loading page directory at %08x", new_directory);
133  if (mmu_get_paging()) {
134  vmm_refresh_page_directory(new_directory);
135  mmu_load_page_directory(new_directory);
136  } else
137  mmu_enable_paging(new_directory);
139  return old_directory;
140  }
141  return 0;
142 }
143 
150  if (old_directory) {
151  println("VMM: Already modifying a page directory at %08x", old_directory);
152  return;
153  }
154  // we don't want to be interrupted when modifying kernel-relevant
155  old_interrupts = isr_enable_interrupts(0); // page directories
156  old_directory = vmm_load_page_directory(new_directory);
157 }
158 
165  if (!old_directory) {
166  println("VMM: Not yet modifying a page directory");
167  return;
168  }
170  old_directory = 0;
172 }
173 
181  page_directory_entry_t* dir_entry, vmm_virtual_address_t vaddr) {
182  // If paging is already enabled and we access the virtually mapped page
183  // directory, we need to calculate the page table pointer in a different way
184  if (page_directory == VMM_PAGEDIR) // (see vmm_init for details).
185  return VMM_PAGETAB(vaddr.bits.page_table);
186  else
187  return (page_table_t*) pmm_get_address(dir_entry->pt, 0);
188 }
189 
197  page_directory_entry_t* dir_entry, vmm_virtual_address_t vaddr) {
198  return vmm_get_page_table(dir_entry, vaddr) + vaddr.bits.page;
199 }
200 
207  return flags & VMM_USER ? &user_domain : &kernel_domain;
208 }
209 
216 static uint8_t vmm_is_in_domain(void* vaddr, vmm_domain_t* domain) {
217  return (uintptr_t) vaddr >= (uintptr_t) domain->start &&
218  (uintptr_t) vaddr <= (uintptr_t) domain->end;
219 }
220 
227  return vmm_is_in_domain(vaddr, &kernel_domain) ? &kernel_domain :
228  (vmm_is_in_domain(vaddr, &user_domain) ? &user_domain : 0);
229 }
230 
237 static uint8_t vmm_domain_check(void* vaddr, vmm_flags_t flags) {
238  if (domain_check_enabled &&
239  vmm_get_domain_from_address(vaddr) != vmm_get_domain(flags)) {
240  println("%4aVMM: Domain mismatch%a");
241  return 0;
242  }
243  return 1;
244 }
245 
253 uint8_t vmm_map(void* _vaddr, void* paddr, vmm_flags_t flags) {
254  if (!vmm_domain_check(_vaddr, flags)) return 0;
258  if (!dir_entry->pr) {
259  // We assume the table's pages to be writable and in userspace for now,
260  // this is overridden by individual pages below.
261  dir_entry->pr = dir_entry->rw = dir_entry->user = 1;
262  dir_entry->pt = pmm_get_page(pmm_alloc(PAGE_SIZE, PMM_KERNEL), 0);
263  page_table_t* tab = vmm_get_page_table(dir_entry, vaddr);
264  memset(tab, 0, PAGE_SIZE); // initialize with zeroes
265  }
266  page_table_entry_t* tab_entry = vmm_get_page_table_entry(dir_entry, vaddr);
267  if (tab_entry->pr) {
268  println("%4aVMM: %08x is already mapped%a", vaddr);
269  return 0;
270  }
271  tab_entry->pr = 1;
272  tab_entry->rw = !!(flags & VMM_WRITABLE);
273  tab_entry->user = flags & VMM_USER;
274  tab_entry->page = pmm_get_page(paddr, 0);
277  mmu_flush_tlb(_vaddr);
278  return 1;
279 }
280 
286 void vmm_unmap(void* _vaddr) {
289  if (!dir_entry->pr)
290  return;
291  page_table_t* page_table = vmm_get_page_table(dir_entry, vaddr);
292  page_table_entry_t* tab_entry = page_table + vaddr.bits.page;
293  if (!tab_entry->pr)
294  return;
295  memset(tab_entry, 0, sizeof(page_table_entry_t));
296  int i;
297  for (i = 0; i < ENTRIES; i++)
298  if (page_table[i].pr)
299  break;
300  if (i == ENTRIES - 1)
303  mmu_flush_tlb(_vaddr);
304 }
305 
314 static void vmm_map_range_detailed(void* vaddr, void* paddr, size_t len,
315  vmm_flags_t flags, uint8_t map) {
316  if (map && !vmm_domain_check(vaddr, flags)) return;
317  uint32_t virtual_page = pmm_get_page(vaddr, 0),
318  physical_page = pmm_get_page(paddr, 0),
319  pages = pmm_get_page(vaddr, len - 1) - virtual_page + 1;
320  logln("VMM", map ? "Map virtual %08x-%08x (page %05x-%05x) "
321  "to physical %08x-%08x (page %05x-%05x)" :
322  "Unmap virtual %08x-%08x (page %05x-%05x)",
323  vaddr, vaddr + len - 1, virtual_page, virtual_page + pages - 1,
324  paddr, paddr + len - 1, physical_page, physical_page + pages - 1);
325  if (map)
326  for (int i = 0; i < pages; i++)
327  vmm_map(pmm_get_address(virtual_page + i, 0),
328  pmm_get_address(physical_page + i, 0), flags);
329  else
330  for (int i = 0; i < pages; i++)
331  vmm_unmap(pmm_get_address(virtual_page + i, 0));
332 }
333 
341 void vmm_map_range(void* vaddr, void* paddr, size_t len, vmm_flags_t flags) {
342  if (len == 0) return;
343  vmm_map_range_detailed(vaddr, paddr, len, flags, 1);
344 }
345 
352 void vmm_unmap_range(void* vaddr, size_t len) {
353  if (len == 0) return;
354  vmm_map_range_detailed(vaddr, 0, len, 0, 0);
355 }
356 
362 void* vmm_get_physical_address(void* _vaddr) {
363  if (!mmu_get_paging())
364  return _vaddr;
367  if (!dir_entry->pr)
368  return 0; // this virtual address is not currently mapped
369  page_table_entry_t* tab_entry = vmm_get_page_table_entry(dir_entry, vaddr);
370  if (!tab_entry->pr)
371  return 0;
372  return pmm_get_address(tab_entry->page, vaddr.bits.page_offset);
373 }
374 
378 void vmm_dump() {
379  log("VMM", "Page directory at %08x (physical %08x):",
381  uint32_t logged = 0;
382  for (int i = 0; i < ENTRIES; i++) {
383  page_directory_entry_t* dir_entry = page_directory + i;
384  if (dir_entry->pr) {
385  vmm_virtual_address_t vaddr = {.bits = {.page_table = i}};
386  page_table_t* page_table = vmm_get_page_table(dir_entry, vaddr);
387  for (int j = 0; j < ENTRIES; j++)
388  if (page_table[j].pr) {
389  uint32_t vpage = i * ENTRIES + j, ppage = page_table[j].page;
390  if (logged % 8 == 0)
391  logln(0, ""), log("VMM", "");
392  log(0, vpage == ppage ? "%s%05x to itself" : "%s%05x to %05x",
393  logged % 8 ? ", " : "", vpage, ppage);
394  logged++;
395  }
396  }
397  }
398  logln(0, "");
399 }
400 
407 static void* vmm_find_free(size_t len, vmm_domain_t* domain) {
408  if (len == 0) return 0;
409  uint32_t pages = len / PAGE_SIZE + (len % PAGE_SIZE ? 1 : 0),
410  free_pages = 0, end_page = pmm_get_page(domain->end, 0);
411  // first-fit as in PMM
412  for (int i = pmm_get_page(domain->start, 0); i <= end_page; i++) {
414  free_pages++;
415  else
416  free_pages = 0;
417  if (free_pages >= pages)
418  return pmm_get_address(i - free_pages + 1, 0);
419  }
420  println("%4aVMM: Not enough memory%a");
421  return 0;
422 }
423 
430  return flags & VMM_USER ? PMM_USER : PMM_KERNEL;
431 }
432 
440 void* vmm_map_physical_memory(void* paddr, size_t len, vmm_flags_t flags) {
441  if (!mmu_get_paging())
442  return paddr;
443  void* vaddr = vmm_find_free(len, vmm_get_domain(flags));
444  if (!vaddr)
445  return 0;
446  vmm_map_range(vaddr, paddr, len, flags);
447  return vaddr;
448 }
449 
456 void vmm_unmap_physical_memory(void* vaddr, size_t len) {
457  if (mmu_get_paging())
458  vmm_unmap_range(vaddr, len);
459 }
460 
468 void vmm_use(void* vaddr, void* paddr, size_t len, vmm_flags_t flags) {
469  if (len == 0 || !vmm_domain_check(vaddr, flags)) return;
470  pmm_use(paddr, len, vmm_get_pmm_flags(flags), "vmm_use");
471  vmm_map_range(vaddr, paddr, len, flags);
472 }
473 
481 void* vmm_use_physical_memory(void* paddr, size_t len, vmm_flags_t flags) {
482  void* vaddr = vmm_find_free(len, vmm_get_domain(flags));
483  if (!vaddr)
484  return 0;
485  vmm_use(vaddr, paddr, len, flags);
486  return vaddr;
487 }
488 
496 void* vmm_use_virtual_memory(void* vaddr, size_t len, vmm_flags_t flags) {
497  if (!vmm_domain_check(vaddr, flags)) return 0;
498  void* paddr = pmm_alloc(len, vmm_get_pmm_flags(flags));
499  if (!paddr)
500  return 0;
501  vmm_map_range(vaddr, paddr, len, flags);
502  return paddr;
503 }
504 
511 void* vmm_alloc(size_t len, vmm_flags_t flags) {
512  // Allocate some memory and find unmapped virtual space to map it into.
513  // Note that this does not necessarily identity-map!
514  void* paddr = pmm_alloc(len, vmm_get_pmm_flags(flags));
515  void* vaddr = vmm_find_free(len, vmm_get_domain(flags));
516  if (!paddr || !vaddr)
517  return 0;
518  vmm_map_range(vaddr, paddr, len, flags);
519  return vaddr;
520 }
521 
528 void vmm_free(void* vaddr, size_t len) {
529  if (len == 0) return;
530  void* paddr = vmm_get_physical_address(vaddr);
531  vmm_unmap_range(vaddr, len);
532  pmm_free(paddr, len);
533 }
534 
539 void vmm_enable_domain_check(uint8_t enable) {
540  domain_check_enabled = enable;
541 }
542 
544 void vmm_init() {
545  print("VMM init ... ");
546  mmu_init();
551  for (int i = 0; i <= highest_kernel_page; i++) {
552  void* addr = pmm_get_address(i, 0);
553  if (pmm_check(addr) != PMM_UNUSED && pmm_check(addr) != PMM_RESERVED)
554  vmm_map(addr, addr, VMM_KERNEL); // only accessible to the kernel
555  }
570  io_use_video_memory();
594  println("%2aok%a.");
595 }
596 
static uint32_t highest_kernel_page
remember the highest kernel page
Definition: pmm.c:41
page_directory_t * vmm_create_page_directory()
Creates an empty page directory.
Definition: vmm.c:72
void mmu_load_page_directory(page_directory_t *page_directory)
Loads a page directory into the CR3 register.
Definition: mmu.c:34
uint8_t isr_enable_interrupts(uint8_t enable)
Enables or disables interrupts.
Definition: isr.c:42
static uint8_t domain_check_enabled
whether domain checking is performed
Definition: vmm.c:55
void * vmm_use_virtual_memory(void *vaddr, size_t len, vmm_flags_t flags)
Marks some page(s) as used and maps them into memory.
Definition: vmm.c:496
void vmm_enable_domain_check(uint8_t enable)
Enables or disables domain checking.
Definition: vmm.c:539
#define PAGE_SIZE
The number of bytes per page directory "happens" to equal the size of a page.
Definition: vmm.c:23
void pmm_free(void *ptr, size_t len)
Frees page frames.
Definition: pmm.c:196
uint8_t user
whether user space may access this page
Definition: vmm.h:42
#define ENTRIES
number of entries in page directories and tables
Definition: vmm.c:21
void vmm_unmap_range(void *vaddr, size_t len)
Unmaps the given page(s) from memory.
Definition: vmm.c:352
uint8_t rw
whether these pages should be writable
Definition: vmm.h:27
void mmu_flush_tlb(void *vaddr)
Flushes the Translation Lookaside Buffer for the given page.
Definition: mmu.c:69
static page_directory_t * page_directory
the current page directory
Definition: vmm.c:44
static uint8_t vmm_is_in_domain(void *vaddr, vmm_domain_t *domain)
Returns whether a virtual address belongs to a given domain.
Definition: vmm.c:216
A virtual address.
Definition: vmm.c:35
void vmm_modified_page_directory()
Ends a page directory modification.
Definition: vmm.c:164
static uint8_t vmm_domain_check(void *vaddr, vmm_flags_t flags)
Checks whether a virtual address might be accessed with the given flags.
Definition: vmm.c:237
#define VMM_PAGETAB(i)
the address of a page table from the active page directory
Definition: vmm.h:16
void * end
end address of the domain
Definition: vmm.c:31
static page_table_entry_t * vmm_get_page_table(page_directory_entry_t *dir_entry, vmm_virtual_address_t vaddr)
Returns a virtual or physical address to a page table in memory.
Definition: vmm.c:180
void * vmm_alloc(size_t len, vmm_flags_t flags)
Marks some page(s) as used and maps them somewhere into memory.
Definition: vmm.c:511
uint8_t mmu_get_paging()
Returns whether paging is enabled or disabled.
Definition: mmu.c:57
static page_directory_t * old_directory
for temporary modifications
Definition: vmm.c:45
uint16_t page_table
index in the page directory
Definition: vmm.c:39
void vmm_map_range(void *vaddr, void *paddr, size_t len, vmm_flags_t flags)
Maps the given page(s) into memory.
Definition: vmm.c:341
An entry in a page table.
Definition: vmm.h:39
uint32_t page
where this page is located (4KiB aligned!)
Definition: vmm.h:50
uint32_t pmm_get_page(void *ptr, uint32_t offset)
Returns to which page a given memory address belongs.
Definition: pmm.c:120
static page_table_entry_t * vmm_get_page_table_entry(page_directory_entry_t *dir_entry, vmm_virtual_address_t vaddr)
Returns a virtual or physical address to a page table entry in memory.
Definition: vmm.c:196
void * vmm_map_physical_memory(void *paddr, size_t len, vmm_flags_t flags)
If necessary, maps the given page(s) somewhere into memory.
Definition: vmm.c:440
static vmm_domain_t * vmm_get_domain_from_address(void *vaddr)
Returns the domain a virtual address belongs to.
Definition: vmm.c:226
struct vmm_virtual_address_t::@11 bits
bit field
static void * vmm_find_free(size_t len, vmm_domain_t *domain)
Finds unmapped pages.
Definition: vmm.c:407
pmm_flags_t pmm_check(void *ptr)
Returns whether a page frame is used or unused.
Definition: pmm.c:206
static uint8_t old_interrupts
for temporary modifications
Definition: vmm.c:46
page_directory_t * vmm_load_page_directory(page_directory_t *new_directory)
Loads a new page directory.
Definition: vmm.c:129
void vmm_unmap(void *_vaddr)
Unmaps the given page from memory.
Definition: vmm.c:286
void * start
start address of the domain
Definition: vmm.c:30
pmm_flags_t
information on who uses a page frame (needs to fit in TYPE_BITS)
Definition: pmm.h:13
void vmm_init()
Initializes the VMM.
Definition: vmm.c:544
void pmm_use(void *ptr, size_t len, pmm_flags_t flags, char *tag)
Marks page frames for a given memory range as used or unused.
Definition: pmm.c:141
vmm_flags_t
Whether we are working with kernel or user memory.
Definition: vmm.h:20
void vmm_free(void *vaddr, size_t len)
Frees the given page(s) and unmaps them from memory.
Definition: vmm.c:528
void vmm_use(void *vaddr, void *paddr, size_t len, vmm_flags_t flags)
Marks the given page(s) as used and maps them into memory.
Definition: vmm.c:468
static void vmm_refresh_page_directory(page_directory_t *dir_phys)
Refreshes the page directory entries that are shared across page directories.
Definition: vmm.c:112
uint8_t user
whether user space may access these pages
Definition: vmm.h:28
static vmm_domain_t * vmm_get_domain(vmm_flags_t flags)
Extracts a domain from the given flags.
Definition: vmm.c:206
void vmm_destroy_page_directory(page_directory_t *dir_phys)
Destroys a page directory.
Definition: vmm.c:90
uint16_t page_offset
location in the page
Definition: vmm.c:37
static vmm_domain_t kernel_domain
We use 0-1GiB as kernel memory.
Definition: vmm.c:49
An entry in a page directory.
Definition: vmm.h:25
void * vmm_get_physical_address(void *_vaddr)
Translates a virtual address into a physical address.
Definition: vmm.c:362
void vmm_unmap_physical_memory(void *vaddr, size_t len)
If necessary, unmaps the given page(s) from memory.
Definition: vmm.c:456
void * pmm_alloc(size_t len, pmm_flags_t flags)
Allocates page frames.
Definition: pmm.c:183
uint8_t rw
whether this page should be writable
Definition: vmm.h:41
static void vmm_destroy_page_table(uint16_t page_table)
Destroys a page table in the current page directory.
Definition: vmm.c:61
uint8_t vmm_map(void *_vaddr, void *paddr, vmm_flags_t flags)
Maps the given page into memory.
Definition: vmm.c:253
uint8_t pr
whether this page is present in virtual memory
Definition: vmm.h:40
static vmm_domain_t user_domain
The memory 1GiB-4GiB is process-specific.
Definition: vmm.c:53
void vmm_modify_page_directory(page_directory_t *new_directory)
Loads a page directory for temporary modification.
Definition: vmm.c:149
uint8_t pr
whether this page table is present in virtual memory
Definition: vmm.h:26
uint32_t pmm_get_highest_kernel_page()
Returns the highest page used by the kernel.
Definition: pmm.c:235
uint16_t page
index in the page table
Definition: vmm.c:38
void vmm_dump()
Dumps the current page directory.
Definition: vmm.c:378
static pmm_flags_t vmm_get_pmm_flags(vmm_flags_t flags)
Translates VMM into PMM flags.
Definition: vmm.c:429
uint32_t pt
where this page table is located (4KiB aligned!)
Definition: vmm.h:35
void * pmm_get_address(uint32_t page, uint32_t offset)
Returns a memory address belonging to a given page.
Definition: pmm.c:130
void * vmm_use_physical_memory(void *paddr, size_t len, vmm_flags_t flags)
Marks the given page(s) as used and maps them somewhere into memory.
Definition: vmm.c:481
static void vmm_map_range_detailed(void *vaddr, void *paddr, size_t len, vmm_flags_t flags, uint8_t map)
Maps or unmaps the given page(s) into memory.
Definition: vmm.c:314
void mmu_init()
Initializes the MMU.
Definition: mmu.c:96
We use two domains, kernel and user memory.
Definition: vmm.c:29
#define VMM_PAGEDIR
the active page directory&#39;s address
Definition: vmm.h:14
void mmu_enable_paging(page_directory_t *page_directory)
Loads a page directory and enables paging.
Definition: mmu.c:44