aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/include/libk/kmalloc.h4
-rw-r--r--kernel/kernel/kernel.cc1
-rw-r--r--kernel/libk/kmalloc.cc24
-rw-r--r--kernel/mm/virtual_mm/pages.cc4
-rw-r--r--kernel/mm/virtual_mm/virtual_mm.cc42
5 files changed, 48 insertions, 27 deletions
diff --git a/kernel/include/libk/kmalloc.h b/kernel/include/libk/kmalloc.h
index 85e247a..2817aff 100644
--- a/kernel/include/libk/kmalloc.h
+++ b/kernel/include/libk/kmalloc.h
@@ -21,8 +21,6 @@
#include <stdint.h>
-#define MIN_PAGES 4
-
typedef struct memory_chunk_t {
struct memory_chunk_t *next;
struct memory_chunk_t *prev;
@@ -30,6 +28,8 @@ typedef struct memory_chunk_t {
uint32_t size;
} memory_chunk_t;
+bool kmalloc_initialized(void);
+void kmalloc_initialize(void);
void *kmalloc(uint32_t size);
#endif
diff --git a/kernel/kernel/kernel.cc b/kernel/kernel/kernel.cc
index 72a182d..165956a 100644
--- a/kernel/kernel/kernel.cc
+++ b/kernel/kernel/kernel.cc
@@ -43,6 +43,7 @@ kernel_main(uint32_t magic, multiboot_info_t *multiboot_info)
MemoryMap::load(multiboot_info);
PhysicalMM::initialize();
VirtualMM::initialize();
+ kmalloc_initialize();
uint32_t *x = (uint32_t *) (5 * MiB);
*x = 8;
diff --git a/kernel/libk/kmalloc.cc b/kernel/libk/kmalloc.cc
index 3c7dd91..03b7d86 100644
--- a/kernel/libk/kmalloc.cc
+++ b/kernel/libk/kmalloc.cc
@@ -23,7 +23,10 @@
#include <stddef.h>
#include <stdint.h>
+/* TODO: Kmalloc must have space for a page table *at all times*. */
+
memory_chunk_t *starting_mc = NULL;
+bool initialized = false;
memory_chunk_t *
add_block(void *address, uint32_t size)
@@ -44,25 +47,32 @@ add_block(void *address, uint32_t size)
return mc;
}
-static void
-kmalloc_init(void)
+void
+kmalloc_initialize(void)
{
- int *initial_region = (int *) VirtualMM::alloc_pages(MIN_PAGES);
- printk("debug", "%x", initial_region);
- /* *initial_region = 10; */
+ int *initial_region = (int *) VirtualMM::alloc_pages(1);
+ printk("debug", "Initial region: 0x%x", initial_region);
+ *initial_region = 10;
/* memory_chunk_t *mc = (memory_chunk_t *) initial_region; */
/* mc->size = 10; */
/* printk("kmalloc", "mc->size(0x%x)", mc->size); */
/* starting_mc = add_block(initial_region, 4 * PAGE_SIZE); */
+ initialized = true;
+}
+
+bool
+kmalloc_initialized(void)
+{
+ return initialized;
}
void *
kmalloc(uint32_t size)
{
- if (!starting_mc)
- kmalloc_init();
+ if (!initialized)
+ kmalloc_initialize();
/* printk("kmalloc", */
/* "Initialized region with starting_mc(0x%x) and size(0x%x)", */
diff --git a/kernel/mm/virtual_mm/pages.cc b/kernel/mm/virtual_mm/pages.cc
index 0f27087..5111ac7 100644
--- a/kernel/mm/virtual_mm/pages.cc
+++ b/kernel/mm/virtual_mm/pages.cc
@@ -30,8 +30,8 @@ void *
alloc_pages(uint32_t n_pages)
{
uint32_t starting_address = (uint32_t) find_free_addresses(n_pages);
- if (starting_address == 0)
- return 0;
+ if (!starting_address)
+ return NULL;
for (uint32_t i = 0; i < n_pages; i++) {
void *virtual_address = (void *) (starting_address + (i * PAGE_SIZE));
diff --git a/kernel/mm/virtual_mm/virtual_mm.cc b/kernel/mm/virtual_mm/virtual_mm.cc
index e5cd5cc..5c766ea 100644
--- a/kernel/mm/virtual_mm/virtual_mm.cc
+++ b/kernel/mm/virtual_mm/virtual_mm.cc
@@ -17,6 +17,7 @@
*/
#include <kernel/halt.h>
+#include <libk/kmalloc.h>
#include <libk/stdio.h>
#include <mm/physical_mm.h>
#include <mm/virtual_mm.h>
@@ -38,11 +39,6 @@ uint32_t fourMiB_page_table[1024] ALIGNED(4096);
/* Page table for the next 4 MiB */
uint32_t eightMiB_page_table[1024] ALIGNED(4096);
-/* Let's reserve a page table at the end of each new page table we allocate, so
- * that we know that at any point time, we will always have space for a new
- * page table */
-uint32_t *next_page_table;
-
ALWAYS_INLINE void
load_page_directory(uint32_t *page_directory)
{
@@ -103,19 +99,32 @@ initialize(void)
}
uint32_t *
-get_or_make_table(uint32_t *pd_entry)
+make_table(uint32_t *pd_entry)
{
- uint32_t *table;
- if (!PDE_IS_PRESENT(pd_entry)) {
- table = (uint32_t *) PhysicalMM::allocate_block();
- if (!table)
- ASSERT_NOT_REACHED();
+ uint32_t *table = 0;
+ if (!kmalloc_initialized())
+ /* If we don't have a dynamic memory allocator yet (this will happen only
+ * once, when we initialize the dynamic allocator), then we hard code the
+ * next page table to be at 7MiB */
+ table = (uint32_t *) (7 * MiB);
+ else
+ table = (uint32_t *) kmalloc(sizeof(uint32_t) * 1024);
+
+ for (uint32_t i = 0; i < 1024; i++)
+ table[i] = 0x0;
+
+ *pd_entry = PDE_FRAME((uint32_t) table) | PDE_PRESENT(1) | PDE_WRITABLE(1);
+ return table;
+}
- for (uint32_t i = 0; i < 1024; i++)
- table[i] = 0x0;
+ALWAYS_INLINE static uint32_t *
+get_or_make_table(uint32_t *pd_entry)
+{
+ uint32_t *table = 0;
- *pd_entry = PDE_FRAME((uint32_t) table) | PDE_PRESENT(1) | PDE_WRITABLE(1);
- } else
+ if (!PDE_IS_PRESENT(pd_entry))
+ table = make_table(pd_entry);
+ else
table = (uint32_t *) PDE_GET_TABLE(pd_entry);
return table;
@@ -157,7 +166,8 @@ unmap_page(void *virtual_address)
void *
find_free_addresses(uint32_t n)
{
- /* Skip the first page directory, we don't wanna touch the first 8MiB. */
+ /* Skip the first two page directory entries; we don't wanna touch the first
+ * 8MiB. */
for (uint32_t pd_index = 2; pd_index < PAGE_DIRECTORY_SIZE; pd_index++) {
uint32_t starting_pd_index = pd_index;
uint32_t *pd_entry = &current_page_directory[pd_index];