diff options
Diffstat (limited to 'kernel/mm')
| -rw-r--r-- | kernel/mm/memory_map.c (renamed from kernel/mm/memory_map.cc) | 9 | ||||
| -rw-r--r-- | kernel/mm/physical_mm/bitmap.c (renamed from kernel/mm/physical_mm/bitmap.cc) | 17 | ||||
| -rw-r--r-- | kernel/mm/physical_mm/physical_mm.c (renamed from kernel/mm/physical_mm/physical_mm.cc) | 45 | ||||
| -rw-r--r-- | kernel/mm/virtual_mm/page_table_allocator.c (renamed from kernel/mm/virtual_mm/page_table_allocator.cc) | 23 | ||||
| -rw-r--r-- | kernel/mm/virtual_mm/pages.c (renamed from kernel/mm/virtual_mm/pages.cc) | 17 | ||||
| -rw-r--r-- | kernel/mm/virtual_mm/virtual_mm.c (renamed from kernel/mm/virtual_mm/virtual_mm.cc) | 33 |
6 files changed, 58 insertions, 86 deletions
diff --git a/kernel/mm/memory_map.cc b/kernel/mm/memory_map.c index 884f92f..797c0b0 100644 --- a/kernel/mm/memory_map.cc +++ b/kernel/mm/memory_map.c @@ -23,9 +23,6 @@ #include <mm/multiboot.h> #include <stdint.h> -namespace MemoryMap -{ - static free_memory_regions_t l_free_memory_regions = { 0 }; ALWAYS_INLINE static char * @@ -48,7 +45,7 @@ fetch_type(multiboot_memory_map_t *mmap) } void -load(multiboot_info_t *multiboot_info) +mmap_load(multiboot_info_t *multiboot_info) { printk("mm", "Loading Memory Map:"); @@ -95,9 +92,7 @@ load(multiboot_info_t *multiboot_info) } free_memory_regions_t * -get_free_regions(void) +mmap_get_free_regions(void) { return &l_free_memory_regions; } - -} diff --git a/kernel/mm/physical_mm/bitmap.cc b/kernel/mm/physical_mm/bitmap.c index 1c1285d..1fea30c 100644 --- a/kernel/mm/physical_mm/bitmap.cc +++ b/kernel/mm/physical_mm/bitmap.c @@ -20,11 +20,10 @@ #include <stdbool.h> #include <stdint.h> -namespace PhysicalMM -{ - void -set_used(const uint32_t bit, uint32_t *total_free_blocks, uint32_t *memory_map) +pmm_set_used(const uint32_t bit, + uint32_t *total_free_blocks, + uint32_t *memory_map) { uint32_t memory_map_index = bit / BITMAP_ENTRY_SIZE; uint32_t bitmask = 1 << (bit % BITMAP_ENTRY_SIZE); @@ -33,9 +32,9 @@ set_used(const uint32_t bit, uint32_t *total_free_blocks, uint32_t *memory_map) } void -set_usable(const uint32_t bit, - uint32_t *total_free_blocks, - uint32_t *memory_map) +pmm_set_usable(const uint32_t bit, + uint32_t *total_free_blocks, + uint32_t *memory_map) { uint32_t memory_map_index = bit / BITMAP_ENTRY_SIZE; uint32_t bitmask = 1 << (bit % BITMAP_ENTRY_SIZE); @@ -44,11 +43,9 @@ set_usable(const uint32_t bit, } bool -test_bit(const uint32_t bit, uint32_t *memory_map) +pmm_test_bit(const uint32_t bit, uint32_t *memory_map) { uint32_t memory_map_index = bit / BITMAP_ENTRY_SIZE; uint32_t bitmask = 1 << (bit % BITMAP_ENTRY_SIZE); return memory_map[memory_map_index] & bitmask; } - -} diff --git a/kernel/mm/physical_mm/physical_mm.cc b/kernel/mm/physical_mm/physical_mm.c index dc9bf0e..d7a5c31 100644 --- a/kernel/mm/physical_mm/physical_mm.cc +++ b/kernel/mm/physical_mm/physical_mm.c @@ -30,17 +30,14 @@ #include <stdbool.h> #include <stdint.h> -namespace PhysicalMM -{ - -extern "C" uint32_t kernel_start; -extern "C" uint32_t kernel_end; +uint32_t kernel_start; +uint32_t kernel_end; uint32_t l_block_count = 0; uint32_t l_total_free_blocks = 0; uint32_t l_memory_map[MAX_BLOCKS / BITMAP_ENTRY_SIZE]; -Spinlock l_lock; +spinlock_t l_lock; ALWAYS_INLINE static void log_memory_map(free_memory_regions_t *free_memory_regions) @@ -68,12 +65,12 @@ initialize_region(uint32_t start, uint32_t length) uint32_t n_blocks = length / BLOCK_SIZE; for (; n_blocks > 0; n_blocks--) - if (test_bit(bit, l_memory_map)) - set_usable(bit++, &l_total_free_blocks, l_memory_map); + if (pmm_test_bit(bit, l_memory_map)) + pmm_set_usable(bit++, &l_total_free_blocks, l_memory_map); /* First block is always used (first 64KiB) */ - if (!test_bit(0, l_memory_map)) - set_used(0, &l_total_free_blocks, l_memory_map); + if (!pmm_test_bit(0, l_memory_map)) + pmm_set_used(0, &l_total_free_blocks, l_memory_map); } ALWAYS_INLINE static void @@ -86,7 +83,7 @@ deinitialize_region(uint32_t start, uint32_t length) n_blocks++; for (; n_blocks > 0; n_blocks--) - set_used(bit++, &l_total_free_blocks, l_memory_map); + pmm_set_used(bit++, &l_total_free_blocks, l_memory_map); } ALWAYS_INLINE static uint32_t @@ -98,7 +95,7 @@ find_free_block(void) if (l_memory_map[i] != 0xffffffff) /* Test each bit to see if it's zero */ for (uint32_t j = 0; j < BITMAP_ENTRY_SIZE; j++) - if (!test_bit(i * BITMAP_ENTRY_SIZE + j, l_memory_map)) + if (!pmm_test_bit(i * BITMAP_ENTRY_SIZE + j, l_memory_map)) return i * BITMAP_ENTRY_SIZE + j; /* Shouldn't be reached, since we're keeping track of the number of free @@ -108,12 +105,12 @@ find_free_block(void) } void -initialize(void) +pmm_initialize(void) { - free_memory_regions_t *free_memory_regions = MemoryMap::get_free_regions(); + free_memory_regions_t *free_memory_regions = mmap_get_free_regions(); log_memory_map(free_memory_regions); - l_lock.acquire(); + spinlock_acquire(&l_lock); /* All blocks are initially used */ /* TODO: Move this block to a place after block_count is set. This is why @@ -132,7 +129,7 @@ initialize(void) /* Deinitialize first 8MiB */ deinitialize_region(0, 8 * MiB); - l_lock.release(); + spinlock_release(&l_lock); /* Manually loop through and calculate the number of free blocks. */ for (uint32_t i = 0; i < MAX_BLOCKS / BITMAP_ENTRY_SIZE; i++) @@ -140,36 +137,34 @@ initialize(void) if (l_memory_map[i] != 0xffffffff) /* Test each bit to see if it's zero */ for (uint32_t j = 0; j < BITMAP_ENTRY_SIZE; j++) - if (!test_bit(i * BITMAP_ENTRY_SIZE + j, l_memory_map)) + if (!pmm_test_bit(i * BITMAP_ENTRY_SIZE + j, l_memory_map)) l_total_free_blocks++; printk("physical_mm", "Total free blocks: 0x%x", l_total_free_blocks); } void * -allocate_block(void) +pmm_allocate_block(void) { if (l_total_free_blocks == 0) { printk("physical_mm", "No more free blocks!"); return NULL; } - l_lock.acquire(); + spinlock_acquire(&l_lock); uint32_t block = find_free_block(); - set_used(block, &l_total_free_blocks, l_memory_map); + pmm_set_used(block, &l_total_free_blocks, l_memory_map); - l_lock.release(); + spinlock_release(&l_lock); uint32_t physical_address = block * BLOCK_SIZE; return (void *) physical_address; } void -free_block(void *physical_address) +pmm_free_block(void *physical_address) { uint32_t block = ((uint32_t) physical_address) / BLOCK_SIZE; - set_usable(block, &l_total_free_blocks, l_memory_map); -} - + pmm_set_usable(block, &l_total_free_blocks, l_memory_map); } diff --git a/kernel/mm/virtual_mm/page_table_allocator.cc b/kernel/mm/virtual_mm/page_table_allocator.c index 057724c..5c0bdcd 100644 --- a/kernel/mm/virtual_mm/page_table_allocator.cc +++ b/kernel/mm/virtual_mm/page_table_allocator.c @@ -24,10 +24,7 @@ #include <mm/virtual_mm.h> #include <stddef.h> -namespace PageTableAllocator -{ - -uint32_t *l_page_directory = 0; +static uint32_t *l_page_directory = 0; uint32_t *l_heap = NULL; uint16_t l_table_index = 0; @@ -36,10 +33,10 @@ make_table(uint32_t *table_address) { uint32_t *table = table_address; for (uint32_t i = 0; i < 1024; i++) - table[i] = PTE_FRAME((uint32_t) PhysicalMM::allocate_block()) - | PTE_PRESENT(1) | PTE_WRITABLE(1); + table[i] = PTE_FRAME((uint32_t) pmm_allocate_block()) | PTE_PRESENT(1) + | PTE_WRITABLE(1); - void *starting_address = VirtualMM::find_free_pages(1); + void *starting_address = vmm_find_free_pages(1); uint32_t *pd_entry = &l_page_directory[GET_PD_INDEX(starting_address)]; *pd_entry = PDE_FRAME((uint32_t) table) | PDE_PRESENT(1) | PDE_WRITABLE(1); @@ -48,13 +45,13 @@ make_table(uint32_t *table_address) } void -initialize(void) +pta_initialize(void) { /* We can't just do this in allocate() because make_table() depends on - * VirtualMM::find_free_pages() */ + * find_free_pages() */ - if (l_page_directory != VirtualMM::get_page_directory()) - l_page_directory = VirtualMM::get_page_directory(); + if (l_page_directory != vmm_get_page_directory()) + l_page_directory = vmm_get_page_directory(); /* Initial table */ if (l_heap == NULL) @@ -62,11 +59,9 @@ initialize(void) } uint32_t * -allocate(void) +pta_allocate(void) { uint32_t *next_table = l_heap + (l_table_index * 4 * KiB); l_table_index++; return next_table; } - -} diff --git a/kernel/mm/virtual_mm/pages.cc b/kernel/mm/virtual_mm/pages.c index 6046dea..fb90f98 100644 --- a/kernel/mm/virtual_mm/pages.cc +++ b/kernel/mm/virtual_mm/pages.c @@ -24,30 +24,25 @@ #include <stdbool.h> #include <stdint.h> -namespace VirtualMM -{ - void * -alloc_pages(uint32_t n_pages) +vmm_alloc_pages(uint32_t n_pages) { - uint32_t starting_address = (uint32_t) find_free_pages(n_pages); + uint32_t starting_address = (uint32_t) vmm_find_free_pages(n_pages); if (!starting_address) return NULL; for (uint32_t i = 0; i < n_pages; i++) { - void *physical_address = PhysicalMM::allocate_block(); + void *physical_address = pmm_allocate_block(); void *virtual_address = (void *) (starting_address + (i * PAGE_SIZE)); - map_page(physical_address, virtual_address); + vmm_map_page(physical_address, virtual_address); } return (void *) starting_address; } void -free_pages(void *starting_address, uint32_t n_pages) +vmm_free_pages(void *starting_address, uint32_t n_pages) { for (uint32_t i = 0; i < n_pages; i++) - unmap_page((void *) (((uint32_t) starting_address) + (i * 4096))); -} - + vmm_unmap_page((void *) (((uint32_t) starting_address) + (i * 4096))); } diff --git a/kernel/mm/virtual_mm/virtual_mm.cc b/kernel/mm/virtual_mm/virtual_mm.c index 0b4ce18..953d23e 100644 --- a/kernel/mm/virtual_mm/virtual_mm.cc +++ b/kernel/mm/virtual_mm/virtual_mm.c @@ -26,40 +26,37 @@ #include <stdbool.h> #include <stdint.h> -namespace VirtualMM -{ - extern uint32_t kernel_start; extern uint32_t kernel_end; uint32_t *l_current_page_directory = 0; /* Kernel's page directory */ -uint32_t l_page_directory[1024] ALIGNED(4096); +static uint32_t l_page_directory[1024] ALIGNED(4096); /* Page table for the first 4 MiB */ -uint32_t l_fourMiB_page_table[1024] ALIGNED(4096); +static uint32_t l_fourMiB_page_table[1024] ALIGNED(4096); /* Page table for the next 4 MiB */ -uint32_t l_eightMiB_page_table[1024] ALIGNED(4096); +static uint32_t l_eightMiB_page_table[1024] ALIGNED(4096); uint32_t * -get_page_directory(void) +vmm_get_page_directory(void) { return l_current_page_directory; } ALWAYS_INLINE void -load_page_directory(uint32_t *page_directory) +vmm_load_page_directory(uint32_t *page_directory) { __asm__ volatile("movl %0, %%cr3" ::"r"(page_directory)); } bool -switch_page_directory(uint32_t *page_directory) +vmm_switch_page_directory(uint32_t *page_directory) { if (!page_directory) return false; l_current_page_directory = page_directory; - load_page_directory(page_directory); + vmm_load_page_directory(page_directory); return true; } @@ -74,7 +71,7 @@ enable_paging(void) } void -initialize(void) +vmm_initialize(void) { /* Zero out the page tables and directories */ for (uint32_t i = 0; i < 1024; i++) { @@ -103,16 +100,16 @@ initialize(void) *eightMiB_pd_entry = PDE_FRAME((uint32_t) l_eightMiB_page_table) | PDE_PRESENT(1) | PDE_WRITABLE(1); - switch_page_directory(l_page_directory); + vmm_switch_page_directory(l_page_directory); enable_paging(); - PageTableAllocator::initialize(); + pta_initialize(); } uint32_t * make_table(uint32_t *pd_entry) { - uint32_t *table = PageTableAllocator::allocate(); + uint32_t *table = pta_allocate(); for (uint32_t i = 0; i < 1024; i++) table[i] = 0x0; @@ -135,7 +132,7 @@ get_or_make_table(uint32_t *pd_entry) } void -map_page(void *physical_address, void *virtual_address) +vmm_map_page(void *physical_address, void *virtual_address) { uint32_t *pd_entry = &l_current_page_directory[GET_PD_INDEX(virtual_address)]; @@ -151,7 +148,7 @@ map_page(void *physical_address, void *virtual_address) } void -unmap_page(void *virtual_address) +vmm_unmap_page(void *virtual_address) { uint32_t *pd_entry = &l_current_page_directory[GET_PD_INDEX(virtual_address)]; @@ -169,7 +166,7 @@ unmap_page(void *virtual_address) } void * -find_free_pages(uint32_t n_pages) +vmm_find_free_pages(uint32_t n_pages) { /* Skip the first two page directory entries; we don't wanna touch the first * 8MiB. */ @@ -227,5 +224,3 @@ find_free_pages(uint32_t n_pages) ASSERT_NOT_REACHED(); return 0; } - -} |
