diff options
-rw-r--r-- | kernel/CMakeLists.txt | 10 | ||||
-rw-r--r-- | kernel/include/kernel/spinlock.h | 15 | ||||
-rw-r--r-- | kernel/include/libk/kmalloc.h | 8 | ||||
-rw-r--r-- | kernel/include/mm/physical_mm.h | 35 | ||||
-rw-r--r-- | kernel/include/mm/virtual_mm.h | 23 | ||||
-rw-r--r-- | kernel/kernel/kernel.cc | 6 | ||||
-rw-r--r-- | kernel/kernel/spinlock.cc (renamed from kernel/kernel/spinlock.c) | 20 | ||||
-rw-r--r-- | kernel/libk/kmalloc.cc (renamed from kernel/libk/kmalloc.c) | 4 | ||||
-rw-r--r-- | kernel/mm/physical_mm/memory_map.cc (renamed from kernel/mm/physical_mm/memory_map.c) | 25 | ||||
-rw-r--r-- | kernel/mm/physical_mm/physical_mm.cc (renamed from kernel/mm/physical_mm/physical_mm.c) | 62 | ||||
-rw-r--r-- | kernel/mm/virtual_mm/virtual_mm.cc (renamed from kernel/mm/virtual_mm/virtual_mm.c) | 41 |
11 files changed, 123 insertions, 126 deletions
diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index b1c9ce7..c80b225 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -12,18 +12,18 @@ set(SRC kernel/halt.c kernel/io.c kernel/kernel.cc - kernel/spinlock.c + kernel/spinlock.cc kernel/stack_smashing_protector.c libk/printf.c libk/printk.cc libk/strlen.c - libk/kmalloc.c + libk/kmalloc.cc mm/memory_map.c - mm/physical_mm/memory_map.c - mm/physical_mm/physical_mm.c - mm/virtual_mm/virtual_mm.c + mm/physical_mm/memory_map.cc + mm/physical_mm/physical_mm.cc + mm/virtual_mm/virtual_mm.cc ) add_executable(kernel ${SRC}) diff --git a/kernel/include/kernel/spinlock.h b/kernel/include/kernel/spinlock.h index e7b8228..ed6bb86 100644 --- a/kernel/include/kernel/spinlock.h +++ b/kernel/include/kernel/spinlock.h @@ -19,17 +19,16 @@ #ifndef __kernel_spinlock_h #define __kernel_spinlock_h -#include <stdatomic.h> +#include <stdint.h> -#ifdef __cplusplus -extern "C" { -#endif +typedef uint8_t spinlock_t; + +namespace Spinlock +{ -void spinlock_acquire(atomic_flag *lock); -void spinlock_release(atomic_flag *lock); +void acquire(spinlock_t volatile *lock); +void release(spinlock_t volatile *lock); -#ifdef __cplusplus } -#endif #endif diff --git a/kernel/include/libk/kmalloc.h b/kernel/include/libk/kmalloc.h index e124c70..85e247a 100644 --- a/kernel/include/libk/kmalloc.h +++ b/kernel/include/libk/kmalloc.h @@ -21,10 +21,6 @@ #include <stdint.h> -#ifdef __cplusplus -extern "C" { -#endif - #define MIN_PAGES 4 typedef struct memory_chunk_t { @@ -36,8 +32,4 @@ typedef struct memory_chunk_t { void *kmalloc(uint32_t size); -#ifdef __cplusplus -} -#endif - #endif diff --git a/kernel/include/mm/physical_mm.h b/kernel/include/mm/physical_mm.h index 0058cc5..b5631c5 100644 --- a/kernel/include/mm/physical_mm.h +++ b/kernel/include/mm/physical_mm.h @@ -31,27 +31,22 @@ /* This is the maximum number of blocks for a 4GiB system. */ #define MAX_BLOCKS 1048576 -#ifdef __cplusplus -extern "C" { -#endif - -void physical_mm_init(void); - -uint32_t physical_mm_find_free_block(void); +namespace PhysicalMM +{ + +void init(void); +uint32_t find_free_block(void); +void *allocate_block(void); +void free_block(void *physical_address); + +void set_used(const uint32_t bit, + uint32_t *total_free_blocks, + uint32_t *memory_map); +void set_usable(const uint32_t bit, + uint32_t *total_free_blocks, + uint32_t *memory_map); +bool test_bit(const uint32_t bit, uint32_t *memory_map); -void *physical_mm_allocate_block(void); -void physical_mm_free_block(void *physical_address); - -void physical_mm_set_used(const uint32_t bit, - uint32_t *total_free_blocks, - uint32_t *memory_map); -void physical_mm_set_usable(const uint32_t bit, - uint32_t *total_free_blocks, - uint32_t *memory_map); -bool physical_mm_test_bit(const uint32_t bit, uint32_t *memory_map); - -#ifdef __cplusplus } -#endif #endif diff --git a/kernel/include/mm/virtual_mm.h b/kernel/include/mm/virtual_mm.h index 6246ce3..313b1f9 100644 --- a/kernel/include/mm/virtual_mm.h +++ b/kernel/include/mm/virtual_mm.h @@ -69,52 +69,49 @@ #define VIRTUAL_ADDRESS(pd_index, pt_index) \ (((pd_index) << 22) | ((pt_index) << 12)) -#ifdef __cplusplus -extern "C" { -#endif +namespace VirtualMM +{ /* * Loads a given page directory into CR0 */ -void virtual_mm_load_page_directory(uint32_t *page_directory); +void load_page_directory(uint32_t *page_directory); /* * Switches the current page directory to a given page directory */ -bool virtual_mm_switch_page_directory(uint32_t *page_directory); +bool switch_page_directory(uint32_t *page_directory); /* * Initialize the virtual memory manager */ -void virtual_mm_initialize(void); +void init(void); /* * Map a physical address to a virtual address */ -void virtual_mm_map_page(void *physical_address, void *virtual_address); +void map_page(void *physical_address, void *virtual_address); /* * Unmap a page starting at virtual address */ -void virtual_mm_unmap_page(void *virtual_address); +void unmap_page(void *virtual_address); /* * Find a virtual address with n consecutive free addresses. */ -void *virtual_mm_find_free_addresses(uint32_t n_pages); +void *find_free_addresses(uint32_t n_pages); /* * Allocate and map n pages. */ -void *virtual_mm_alloc_pages(uint32_t n_pages); +void *alloc_pages(uint32_t n_pages); /* * Free n pages from the starting address. */ -void virtual_mm_free_pages(void *starting_address, uint32_t n_pages); +void free_pages(void *starting_address, uint32_t n_pages); -#ifdef __cplusplus } -#endif #endif diff --git a/kernel/kernel/kernel.cc b/kernel/kernel/kernel.cc index ca41a6f..384244a 100644 --- a/kernel/kernel/kernel.cc +++ b/kernel/kernel/kernel.cc @@ -41,14 +41,14 @@ kernel_main(uint32_t magic, multiboot_info_t *multiboot_info) GDT::load(); memory_map_load(multiboot_info); - physical_mm_init(); - virtual_mm_initialize(); + PhysicalMM::init(); + VirtualMM::init(); // void *x = virtual_mm_find_free_addresses(1046999); // printk("debug", "x(0x%x)", x); #if 0 - int *x = physical_mm_allocate_block(); + int *x = PhysicalMM::allocate_block(); /* *x = 20; */ printk("debug", "x(0x%x)", x); diff --git a/kernel/kernel/spinlock.c b/kernel/kernel/spinlock.cc index 1688a12..c13a08f 100644 --- a/kernel/kernel/spinlock.c +++ b/kernel/kernel/spinlock.cc @@ -16,20 +16,26 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <stdatomic.h> +#include <common.h> +#include <kernel/spinlock.h> + +namespace Spinlock +{ void -spinlock_acquire(atomic_flag *lock) +acquire(spinlock_t volatile *plock) { __asm__ volatile("cli"); - - while (atomic_flag_test_and_set_explicit(lock, memory_order_acquire)) - __asm__ volatile("rep; nop"); + while (!__sync_bool_compare_and_swap(plock, 0, 1)) + while (*plock) + __asm__ volatile("rep; nop"); } void -spinlock_release(atomic_flag *lock) +release(spinlock_t volatile *plock) { - atomic_flag_clear_explicit(lock, memory_order_release); + __sync_bool_compare_and_swap(plock, 1, 0); /* TODO: Enable interrupts here */ } + +} diff --git a/kernel/libk/kmalloc.c b/kernel/libk/kmalloc.cc index 23b6c2e..4afaa61 100644 --- a/kernel/libk/kmalloc.c +++ b/kernel/libk/kmalloc.cc @@ -28,7 +28,7 @@ memory_chunk_t *starting_mc = NULL; static memory_chunk_t * add_block(void *address, uint32_t size) { - memory_chunk_t *mc = address; + memory_chunk_t *mc = (memory_chunk_t *) address; mc->next = NULL; mc->prev = NULL; @@ -47,7 +47,7 @@ add_block(void *address, uint32_t size) static void kmalloc_init(void) { - int *initial_region = virtual_mm_alloc_pages(MIN_PAGES); + int *initial_region = (int *) VirtualMM::alloc_pages(MIN_PAGES); printk("debug", "%x", initial_region); /* *initial_region = 10; */ diff --git a/kernel/mm/physical_mm/memory_map.c b/kernel/mm/physical_mm/memory_map.cc index 8184a37..6284173 100644 --- a/kernel/mm/physical_mm/memory_map.c +++ b/kernel/mm/physical_mm/memory_map.cc @@ -16,17 +16,16 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <mm/physical_mm.h> #include <stdbool.h> #include <stdint.h> -#include <mm/memory_map.h> -#include <mm/physical_mm.h> +namespace PhysicalMM +{ /* Marks the block as 'used' */ -ALWAYS_INLINE void -physical_mm_set_used(const uint32_t bit, - uint32_t *total_free_blocks, - uint32_t *memory_map) +void +set_used(const uint32_t bit, uint32_t *total_free_blocks, uint32_t *memory_map) { uint32_t memory_map_index = bit / BITMAP_ENTRY_SIZE; uint32_t bitmask = 1 << (bit % BITMAP_ENTRY_SIZE); @@ -35,10 +34,10 @@ physical_mm_set_used(const uint32_t bit, } /* Marks the block as 'unused' */ -ALWAYS_INLINE void -physical_mm_set_usable(const uint32_t bit, - uint32_t *total_free_blocks, - uint32_t *memory_map) +void +set_usable(const uint32_t bit, + uint32_t *total_free_blocks, + uint32_t *memory_map) { uint32_t memory_map_index = bit / BITMAP_ENTRY_SIZE; uint32_t bitmask = 1 << (bit % BITMAP_ENTRY_SIZE); @@ -50,10 +49,12 @@ physical_mm_set_usable(const uint32_t bit, * True if the bit is set (block is in use) * False if the bit is unset (block isn't in use) */ -ALWAYS_INLINE bool -physical_mm_test_bit(const uint32_t bit, uint32_t *memory_map) +bool +test_bit(const uint32_t bit, uint32_t *memory_map) { uint32_t memory_map_index = bit / BITMAP_ENTRY_SIZE; uint32_t bitmask = 1 << (bit % BITMAP_ENTRY_SIZE); return memory_map[memory_map_index] & bitmask; } + +} diff --git a/kernel/mm/physical_mm/physical_mm.c b/kernel/mm/physical_mm/physical_mm.cc index d0d859c..ae4c62b 100644 --- a/kernel/mm/physical_mm/physical_mm.c +++ b/kernel/mm/physical_mm/physical_mm.cc @@ -27,21 +27,23 @@ #include <libk/stdio.h> #include <mm/memory_map.h> #include <mm/physical_mm.h> -#include <stdatomic.h> #include <stdbool.h> #include <stdint.h> -extern uint32_t kernel_start; -extern uint32_t kernel_end; +namespace PhysicalMM +{ + +extern "C" uint32_t kernel_start; +extern "C" uint32_t kernel_end; uint32_t block_count = 0; uint32_t total_free_blocks = 0; uint32_t memory_map[MAX_BLOCKS / BITMAP_ENTRY_SIZE]; -atomic_flag memory_map_lock; +spinlock_t memory_map_lock; ALWAYS_INLINE static void -physical_mm_log_memory_map(free_memory_regions_t *free_memory_regions) +log_memory_map(free_memory_regions_t *free_memory_regions) { printk("\nphysical_mm", "memory_map is at 0x%x", memory_map); printk("\nphysical_mm", "Free Memory Regions:"); @@ -60,23 +62,23 @@ physical_mm_log_memory_map(free_memory_regions_t *free_memory_regions) } ALWAYS_INLINE static void -physical_mm_initialize_region(uint32_t start, uint32_t length) +initialize_region(uint32_t start, uint32_t length) { /* Get the location of the start address in the bitmap */ uint32_t bit = start / BLOCK_SIZE; uint32_t n_blocks = length / BLOCK_SIZE; for (; n_blocks > 0; n_blocks--) - if (physical_mm_test_bit(bit, memory_map)) - physical_mm_set_usable(bit++, &total_free_blocks, memory_map); + if (test_bit(bit, memory_map)) + set_usable(bit++, &total_free_blocks, memory_map); /* First block is always used (first 64KiB) */ - if (!physical_mm_test_bit(0, memory_map)) - physical_mm_set_used(0, &total_free_blocks, memory_map); + if (!test_bit(0, memory_map)) + set_used(0, &total_free_blocks, memory_map); } ALWAYS_INLINE static void -physical_mm_deinitialize_region(uint32_t start, uint32_t length) +deinitialize_region(uint32_t start, uint32_t length) { uint32_t bit = start / BLOCK_SIZE; uint32_t n_blocks = length / BLOCK_SIZE; @@ -85,16 +87,16 @@ physical_mm_deinitialize_region(uint32_t start, uint32_t length) n_blocks++; for (; n_blocks > 0; n_blocks--) - physical_mm_set_used(bit++, &total_free_blocks, memory_map); + set_used(bit++, &total_free_blocks, memory_map); } void -physical_mm_init(void) +init(void) { free_memory_regions_t *free_memory_regions = memory_map_get_free_regions(); - physical_mm_log_memory_map(free_memory_regions); + log_memory_map(free_memory_regions); - spinlock_acquire(&memory_map_lock); + Spinlock::acquire(&memory_map_lock); /* All blocks are initially used */ /* TODO: Move this block to a place after block_count is set. This is why @@ -104,16 +106,16 @@ physical_mm_init(void) for (int i = 0; i < free_memory_regions->n_regions; i++) { multiboot_memory_map_t *region = free_memory_regions->region_list[i]; - physical_mm_initialize_region(region->addr_low, region->len_low); + initialize_region(region->addr_low, region->len_low); } uint32_t kernel_size = ((uint32_t) &kernel_end) - ((uint32_t) &kernel_start); - physical_mm_deinitialize_region((uint32_t) &kernel_start, kernel_size); + deinitialize_region((uint32_t) &kernel_start, kernel_size); /* Deinitialize first 4MiB */ - physical_mm_deinitialize_region(0, 4 * MiB); + deinitialize_region(0, 4 * MiB); - spinlock_release(&memory_map_lock); + Spinlock::release(&memory_map_lock); /* Manually loop through and calculate the number of free blocks. */ for (uint32_t i = 0; i < MAX_BLOCKS / BITMAP_ENTRY_SIZE; i++) @@ -121,14 +123,14 @@ physical_mm_init(void) if (memory_map[i] != 0xffffffff) /* Test each bit to see if it's zero */ for (uint32_t j = 0; j < BITMAP_ENTRY_SIZE; j++) - if (!physical_mm_test_bit(i * BITMAP_ENTRY_SIZE + j, memory_map)) + if (!test_bit(i * BITMAP_ENTRY_SIZE + j, memory_map)) total_free_blocks++; printk("physical_mm", "Total free blocks: 0x%x", total_free_blocks); } uint32_t -physical_mm_find_free_block(void) +find_free_block(void) { /* TODO: Why doesn't using block_count instead of MAX_BLOCKS work? */ for (uint32_t i = 0; i < MAX_BLOCKS / BITMAP_ENTRY_SIZE; i++) @@ -136,7 +138,7 @@ physical_mm_find_free_block(void) if (memory_map[i] != 0xffffffff) /* Test each bit to see if it's zero */ for (uint32_t j = 0; j < BITMAP_ENTRY_SIZE; j++) - if (!physical_mm_test_bit(i * BITMAP_ENTRY_SIZE + j, memory_map)) + if (!test_bit(i * BITMAP_ENTRY_SIZE + j, memory_map)) return i * BITMAP_ENTRY_SIZE + j; /* Shouldn't be reached, since we're keeping track of the number of free @@ -146,27 +148,29 @@ physical_mm_find_free_block(void) } void * -physical_mm_allocate_block(void) +allocate_block(void) { if (total_free_blocks == 0) { printk("physical_mm", "No more free blocks!"); return NULL; } - spinlock_acquire(&memory_map_lock); + Spinlock::acquire(&memory_map_lock); - uint32_t block = physical_mm_find_free_block(); - physical_mm_set_used(block, &total_free_blocks, memory_map); + uint32_t block = find_free_block(); + set_used(block, &total_free_blocks, memory_map); - spinlock_release(&memory_map_lock); + Spinlock::release(&memory_map_lock); uint32_t physical_address = block * BLOCK_SIZE; return (void *) physical_address; } void -physical_mm_free_block(void *physical_address) +free_block(void *physical_address) { uint32_t block = ((uint32_t) physical_address) / BLOCK_SIZE; - physical_mm_set_usable(block, &total_free_blocks, memory_map); + set_usable(block, &total_free_blocks, memory_map); +} + } diff --git a/kernel/mm/virtual_mm/virtual_mm.c b/kernel/mm/virtual_mm/virtual_mm.cc index f750d8d..2293083 100644 --- a/kernel/mm/virtual_mm/virtual_mm.c +++ b/kernel/mm/virtual_mm/virtual_mm.cc @@ -23,6 +23,9 @@ #include <stdbool.h> #include <stdint.h> +namespace VirtualMM +{ + extern uint32_t kernel_start; extern uint32_t kernel_end; @@ -34,24 +37,24 @@ uint32_t page_directory[1024] ALIGNED(4096); uint32_t page_table[1024] ALIGNED(4096); ALWAYS_INLINE void -virtual_mm_load_page_directory(uint32_t *page_directory) +load_page_directory(uint32_t *page_directory) { __asm__("movl %0, %%cr3" ::"r"(page_directory)); } bool -virtual_mm_switch_page_directory(uint32_t *page_directory) +switch_page_directory(uint32_t *page_directory) { if (!page_directory) return false; current_page_directory = page_directory; - virtual_mm_load_page_directory(page_directory); + load_page_directory(page_directory); return true; } ALWAYS_INLINE static void -virtual_mm_enable_paging(void) +enable_paging(void) { __asm__("movl %%cr0, %%eax;" "orl $0x80000000, %%eax;" @@ -60,7 +63,7 @@ virtual_mm_enable_paging(void) } void -virtual_mm_initialize(void) +init(void) { for (uint32_t i = 0; i < 1024; i++) page_table[i] = 0; @@ -77,8 +80,8 @@ virtual_mm_initialize(void) *pd_entry = PDE_FRAME((uint32_t) page_table) | PDE_PRESENT(1) | PDE_WRITABLE(1); - virtual_mm_switch_page_directory(page_directory); - virtual_mm_enable_paging(); + switch_page_directory(page_directory); + enable_paging(); } ALWAYS_INLINE uint32_t * @@ -86,7 +89,7 @@ get_or_make_table(uint32_t *pd_entry) { uint32_t *table; if (!PDE_IS_PRESENT(pd_entry)) { - table = physical_mm_allocate_block(); + table = (uint32_t *) PhysicalMM::allocate_block(); if (!table) ASSERT_NOT_REACHED(); @@ -101,7 +104,7 @@ get_or_make_table(uint32_t *pd_entry) } void -virtual_mm_map_page(void *physical_address, void *virtual_address) +map_page(void *physical_address, void *virtual_address) { uint32_t *pd_entry = ¤t_page_directory[GET_PD_INDEX(virtual_address)]; uint32_t *table = get_or_make_table(pd_entry); @@ -117,7 +120,7 @@ virtual_mm_map_page(void *physical_address, void *virtual_address) } void -virtual_mm_unmap_page(void *virtual_address) +unmap_page(void *virtual_address) { uint32_t *pd_entry = ¤t_page_directory[GET_PD_INDEX(virtual_address)]; @@ -134,7 +137,7 @@ virtual_mm_unmap_page(void *virtual_address) } void * -virtual_mm_find_free_addresses(uint32_t n) +find_free_addresses(uint32_t n) { /* Skip the first page directory, we don't wanna touch the first 4MiB. */ for (uint32_t pd_index = 1; pd_index < PAGE_DIRECTORY_SIZE; pd_index++) { @@ -192,26 +195,26 @@ virtual_mm_find_free_addresses(uint32_t n) } void * -virtual_mm_alloc_pages(uint32_t n_pages) +alloc_pages(uint32_t n_pages) { - uint32_t starting_address - = (uint32_t) virtual_mm_find_free_addresses(n_pages); + uint32_t starting_address = (uint32_t) find_free_addresses(n_pages); if (starting_address == 0) return 0; for (uint32_t i = 0; i < n_pages; i++) { void *virtual_address = (void *) (starting_address + (i * PAGE_SIZE)); - void *physical_address = physical_mm_allocate_block(); - virtual_mm_map_page(physical_address, virtual_address); + void *physical_address = PhysicalMM::allocate_block(); + map_page(physical_address, virtual_address); } return (void *) starting_address; } void -virtual_mm_free_pages(void *starting_address, uint32_t n_pages) +free_pages(void *starting_address, uint32_t n_pages) { for (uint32_t i = 0; i < n_pages; i++) - virtual_mm_unmap_page( - (void *) (((uint32_t) starting_address) + (i * 4096))); + unmap_page((void *) (((uint32_t) starting_address) + (i * 4096))); +} + } |