aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/include/mm/virtual_mm.h2
-rw-r--r--kernel/kernel/kernel.c9
-rw-r--r--kernel/mm/physical_mm/physical_mm.c16
-rw-r--r--kernel/mm/virtual_mm/virtual_mm.c55
4 files changed, 41 insertions, 41 deletions
diff --git a/kernel/include/mm/virtual_mm.h b/kernel/include/mm/virtual_mm.h
index 58b8692..f900c52 100644
--- a/kernel/include/mm/virtual_mm.h
+++ b/kernel/include/mm/virtual_mm.h
@@ -97,7 +97,7 @@ void virtual_mm_unmap_page(void *virtual_address);
/*
* Find a virtual address with n consecutive free addresses.
*/
-void *virtual_mm_find_free_virtual_addresses(uint32_t n_pages);
+void *virtual_mm_find_free_addresses(uint32_t n_pages);
/*
* Allocate and map n pages.
diff --git a/kernel/kernel/kernel.c b/kernel/kernel/kernel.c
index 9b5696b..c249cc2 100644
--- a/kernel/kernel/kernel.c
+++ b/kernel/kernel/kernel.c
@@ -44,15 +44,20 @@ kernel_main(uint32_t magic, multiboot_info_t *multiboot_info)
physical_mm_init();
virtual_mm_initialize();
- /* int *x = physical_mm_allocate_block(); */
+ void *x = virtual_mm_find_free_addresses(1046999);
+ printk("debug", "x(0x%x)", x);
+
+#if 0
+ int *x = physical_mm_allocate_block();
/* *x = 20; */
- /* printk("debug", "x(%lu)", *x); */
+ printk("debug", "x(0x%x)", x);
/* virtual_mm_alloc_pages(1); */
/* void *x = kmalloc(12); */
/* printk("debug", "x(0x%x)", x); */
printk("\nKernel", "Started.");
+#endif
exit();
halt(); /* If exit() fails (on real hardware) */
diff --git a/kernel/mm/physical_mm/physical_mm.c b/kernel/mm/physical_mm/physical_mm.c
index 81656ed..d0d859c 100644
--- a/kernel/mm/physical_mm/physical_mm.c
+++ b/kernel/mm/physical_mm/physical_mm.c
@@ -102,10 +102,8 @@ physical_mm_init(void)
for (uint32_t i = 0; i < MAX_BLOCKS / BITMAP_ENTRY_SIZE; i++)
memory_map[i] = 0xffffffff;
- uint32_t total_free_memory = 0;
for (int i = 0; i < free_memory_regions->n_regions; i++) {
multiboot_memory_map_t *region = free_memory_regions->region_list[i];
- total_free_memory += region->len_low;
physical_mm_initialize_region(region->addr_low, region->len_low);
}
@@ -117,26 +115,16 @@ physical_mm_init(void)
spinlock_release(&memory_map_lock);
- total_free_memory -= kernel_size;
- block_count = total_free_memory / BLOCK_SIZE;
- printk("\nphysical_mm", "Total blocks: 0x%x", block_count);
- printk("physical_mm", "Total free blocks: 0x%x", total_free_blocks);
-
-#if 0
/* Manually loop through and calculate the number of free blocks. */
- uint32_t free_blcks = 0;
for (uint32_t i = 0; i < MAX_BLOCKS / BITMAP_ENTRY_SIZE; i++)
/* At least one block in the entry isn't in use */
if (memory_map[i] != 0xffffffff)
/* Test each bit to see if it's zero */
for (uint32_t j = 0; j < BITMAP_ENTRY_SIZE; j++)
if (!physical_mm_test_bit(i * BITMAP_ENTRY_SIZE + j, memory_map))
- free_blcks++;
+ total_free_blocks++;
- printk("physical_mm",
- "Experimentally calculated free blocks: 0x%x",
- free_blcks);
-#endif
+ printk("physical_mm", "Total free blocks: 0x%x", total_free_blocks);
}
uint32_t
diff --git a/kernel/mm/virtual_mm/virtual_mm.c b/kernel/mm/virtual_mm/virtual_mm.c
index 3043686..f750d8d 100644
--- a/kernel/mm/virtual_mm/virtual_mm.c
+++ b/kernel/mm/virtual_mm/virtual_mm.c
@@ -20,6 +20,7 @@
#include <libk/stdio.h>
#include <mm/physical_mm.h>
#include <mm/virtual_mm.h>
+#include <stdbool.h>
#include <stdint.h>
extern uint32_t kernel_start;
@@ -30,7 +31,7 @@ uint32_t *current_page_directory = 0;
/* Kernel's page directory */
uint32_t page_directory[1024] ALIGNED(4096);
/* Page table for the first 4 MiB */
-uint32_t table[1024] ALIGNED(4096);
+uint32_t page_table[1024] ALIGNED(4096);
ALWAYS_INLINE void
virtual_mm_load_page_directory(uint32_t *page_directory)
@@ -62,18 +63,19 @@ void
virtual_mm_initialize(void)
{
for (uint32_t i = 0; i < 1024; i++)
- table[i] = 0;
+ page_table[i] = 0;
/* Identity map the first 4MiB, excluding the 4th MiB
* (maps 4KiB 1024 times) */
for (uint32_t i = 0; i < 1024; i++)
- table[i] = PTE_FRAME(i) | PTE_PRESENT(1) | PTE_WRITABLE(1);
+ page_table[i] = PTE_FRAME(i) | PTE_PRESENT(1) | PTE_WRITABLE(1);
for (uint32_t i = 0; i < 1024; i++)
page_directory[i] = 0;
uint32_t *pd_entry = &page_directory[0];
- *pd_entry = PDE_FRAME((uint32_t) table) | PDE_PRESENT(1) | PDE_WRITABLE(1);
+ *pd_entry
+ = PDE_FRAME((uint32_t) page_table) | PDE_PRESENT(1) | PDE_WRITABLE(1);
virtual_mm_switch_page_directory(page_directory);
virtual_mm_enable_paging();
@@ -132,23 +134,26 @@ virtual_mm_unmap_page(void *virtual_address)
}
void *
-virtual_mm_find_free_virtual_addresses(uint32_t n)
+virtual_mm_find_free_addresses(uint32_t n)
{
/* Skip the first page directory, we don't wanna touch the first 4MiB. */
for (uint32_t pd_index = 1; pd_index < PAGE_DIRECTORY_SIZE; pd_index++) {
uint32_t starting_pd_index = pd_index;
uint32_t *pd_entry = &current_page_directory[pd_index];
- /* Ideally, we shouldn't be allocating tables here */
- uint32_t *table = get_or_make_table(pd_entry);
+ uint32_t *table = 0;
+
+ bool table_is_present = PDE_IS_PRESENT(pd_entry);
+ if (!table_is_present)
+ table = (uint32_t *) PDE_GET_TABLE(pd_entry);
for (uint32_t starting_pt_index = 0; starting_pt_index < PAGE_TABLE_SIZE;
starting_pt_index++) {
- uint32_t *pt_entry = &table[starting_pt_index];
- if (PTE_IS_PRESENT(pt_entry))
- continue;
+ uint32_t count = 0;
+ if (table_is_present)
+ if (PTE_IS_PRESENT(&table[starting_pt_index]))
+ continue;
/* We found our starting pt_entry */
- uint32_t count = 0;
for (uint32_t pt_index = starting_pt_index; pt_index <= PAGE_TABLE_SIZE;
pt_index++) {
/* If we overflow, switch to the consecutive page directory entry */
@@ -158,22 +163,24 @@ virtual_mm_find_free_virtual_addresses(uint32_t n)
return 0; /* Ran out of pd_entries */
pd_entry = &current_page_directory[pd_index];
- table = get_or_make_table(pd_entry);
+ table_is_present = PDE_IS_PRESENT(pd_entry);
pt_index = 0;
}
- /* If page table entry is already used, break from the current loop
- */
- uint32_t *pt_entry = &table[pt_index];
- if (PTE_IS_PRESENT(pt_entry)) {
- /* Since we have some used address at some point between j and
- * count, we can't find n consecutive free addresses in between j
- * and the used block (j + count + 1) */
- starting_pt_index += count;
- break;
- }
+ if (table_is_present) {
+ if (PTE_IS_PRESENT(&table[pt_index])) {
+ /* Since we have some used address at some point between j and
+ * count, we can't find n consecutive free addresses in between j
+ * and the used block (j + count + 1) */
+ starting_pt_index += count;
+ break;
+ }
+
+ count++;
+ } else
+ count++;
- if (++count == n)
+ if (count == n)
return (void *) VIRTUAL_ADDRESS(starting_pd_index,
starting_pt_index);
}
@@ -188,7 +195,7 @@ void *
virtual_mm_alloc_pages(uint32_t n_pages)
{
uint32_t starting_address
- = (uint32_t) virtual_mm_find_free_virtual_addresses(n_pages);
+ = (uint32_t) virtual_mm_find_free_addresses(n_pages);
if (starting_address == 0)
return 0;