aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRaghuram Subramani <raghus2247@gmail.com>2025-02-01 09:17:19 -0500
committerRaghuram Subramani <raghus2247@gmail.com>2025-02-01 09:17:19 -0500
commit4111fee72bcde6db229fc45b0076db8bc7407937 (patch)
tree18778d7ce8ce1b1d8f68087b8c41dc3e643d41b5
parentf8942052f2a71c2603fbc4b91303137d07151e72 (diff)
libk: Working (afaik) kmalloc implementation
-rw-r--r--kernel/include/libk/kmalloc.h4
-rw-r--r--kernel/kernel/kernel.cc13
-rw-r--r--kernel/libk/kmalloc.cc59
-rw-r--r--kernel/mm/virtual_mm/pages.cc3
-rw-r--r--kernel/mm/virtual_mm/virtual_mm.cc9
5 files changed, 57 insertions, 31 deletions
diff --git a/kernel/include/libk/kmalloc.h b/kernel/include/libk/kmalloc.h
index 7a9ff81..fac3a95 100644
--- a/kernel/include/libk/kmalloc.h
+++ b/kernel/include/libk/kmalloc.h
@@ -41,10 +41,8 @@ struct boundary_tag {
struct boundary_tag *prev; //< Linked list info.
};
-#define liballoc_alloc VirtualMM::alloc_pages
-#define liballoc_free VirtualMM::free_pages
-
bool kmalloc_initialized(void);
+void kmalloc_initialize(void);
void *kmalloc(size_t);
#endif
diff --git a/kernel/kernel/kernel.cc b/kernel/kernel/kernel.cc
index a229bbf..5b02baf 100644
--- a/kernel/kernel/kernel.cc
+++ b/kernel/kernel/kernel.cc
@@ -43,10 +43,17 @@ kernel_main(uint32_t magic, multiboot_info_t *multiboot_info)
MemoryMap::load(multiboot_info);
PhysicalMM::initialize();
VirtualMM::initialize();
+ kmalloc_initialize();
- int *x = (int *) kmalloc(12);
- *x = 132;
- printk("debug", "x(0x%x) *x(0x%x)", x, *x);
+ int *x = (int *) kmalloc(sizeof(int) * 8192);
+ for (uint32_t i = 0; i < 8192; i++)
+ x[i] = i;
+ printk("debug", "x(0x%x) *x(0x%x)", x, x[12]);
+
+ int *y = (int *) kmalloc(sizeof(int) * 8192);
+ for (uint32_t i = 0; i < 8192; i++)
+ y[i] = i;
+ printk("debug", "y(0x%x) *x(0x%x)", y, y[14]);
printk("\nKernel", "Started.");
diff --git a/kernel/libk/kmalloc.cc b/kernel/libk/kmalloc.cc
index f1147a6..2354139 100644
--- a/kernel/libk/kmalloc.cc
+++ b/kernel/libk/kmalloc.cc
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <kernel/spinlock.h>
#include <libk/kmalloc.h>
#include <libk/stdio.h>
#include <mm/virtual_mm.h>
@@ -25,14 +26,6 @@
/* TODO: Kmalloc must have space for a page table *at all times*. */
-bool initialized = false;
-
-bool
-kmalloc_initialized(void)
-{
- return initialized;
-}
-
#define LIBALLOC_MAGIC 0xc001c0de
#define MAXCOMPLETE 5
#define MAXEXP 32
@@ -43,22 +36,48 @@ kmalloc_initialized(void)
#define MODE MODE_BEST
-#ifdef DEBUG
-#include <stdio.h>
-#endif
-
struct boundary_tag *l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
-#ifdef DEBUG
-unsigned int l_allocated = 0; //< The real amount of memory allocated.
-unsigned int l_inuse = 0; //< The amount of memory in use (malloc'ed).
-#endif
-
static unsigned int l_initialized = 0; //< Flag to indicate initialization.
static unsigned int l_pageSize = 4096; //< Individual page size
static unsigned int l_pageCount = 1; //< Minimum number of pages to allocate.
+spinlock_t lock;
+bool initialized;
+
+#define liballoc_alloc VirtualMM::alloc_pages
+#define liballoc_free VirtualMM::free_pages
+
+inline int
+liballoc_lock(void)
+{
+ Spinlock::acquire(&lock);
+ return 0;
+}
+
+inline int
+liballoc_unlock(void)
+{
+ Spinlock::release(&lock);
+ return 0;
+}
+
+bool
+kmalloc_initialized(void)
+{
+ return initialized;
+}
+
+void
+kmalloc_initialize(void)
+{
+ // void *x =
+ kmalloc(1);
+ initialized = true;
+ // kfree(x);
+}
+
static inline int
getexp(unsigned int size)
{
@@ -189,7 +208,7 @@ kmalloc(size_t size)
void *ptr;
struct boundary_tag *tag = NULL;
- // liballoc_lock();
+ liballoc_lock();
if (l_initialized == 0) {
for (index = 0; index < MAXEXP; index++) {
@@ -218,7 +237,7 @@ kmalloc(size_t size)
// No page found. Make one.
if (tag == NULL) {
if ((tag = allocate_new_tag(size)) == NULL) {
- // liballoc_unlock();
+ liballoc_unlock();
return NULL;
}
@@ -251,6 +270,6 @@ kmalloc(size_t size)
}
ptr = (void *) ((unsigned int) tag + sizeof(struct boundary_tag));
- // liballoc_unlock();
+ liballoc_unlock();
return ptr;
}
diff --git a/kernel/mm/virtual_mm/pages.cc b/kernel/mm/virtual_mm/pages.cc
index 5111ac7..1cf5ded 100644
--- a/kernel/mm/virtual_mm/pages.cc
+++ b/kernel/mm/virtual_mm/pages.cc
@@ -29,13 +29,14 @@ namespace VirtualMM
void *
alloc_pages(uint32_t n_pages)
{
+ printk("virtual_mm", "Allocating 0x%x pages", n_pages);
uint32_t starting_address = (uint32_t) find_free_addresses(n_pages);
if (!starting_address)
return NULL;
for (uint32_t i = 0; i < n_pages; i++) {
- void *virtual_address = (void *) (starting_address + (i * PAGE_SIZE));
void *physical_address = PhysicalMM::allocate_block();
+ void *virtual_address = (void *) (starting_address + (i * PAGE_SIZE));
map_page(physical_address, virtual_address);
}
diff --git a/kernel/mm/virtual_mm/virtual_mm.cc b/kernel/mm/virtual_mm/virtual_mm.cc
index 401113b..ee2e78a 100644
--- a/kernel/mm/virtual_mm/virtual_mm.cc
+++ b/kernel/mm/virtual_mm/virtual_mm.cc
@@ -102,14 +102,15 @@ uint32_t *
make_table(uint32_t *pd_entry)
{
uint32_t *table = 0;
- if (!kmalloc_initialized())
+ if (!kmalloc_initialized()) {
/* If we don't have a dynamic memory allocator yet (this will happen only
* once, when we initialize the dynamic allocator), then we hard code the
* next page table to be at 7MiB */
table = (uint32_t *) (7 * MiB);
- else
- // table = (uint32_t *) kmalloc(sizeof(uint32_t) * 1024);
- ASSERT_NOT_REACHED();
+ printk("virtual_mm",
+ "Using our hard coded table; this should happen only once.");
+ } else
+ table = (uint32_t *) kmalloc(sizeof(uint32_t) * 1024);
for (uint32_t i = 0; i < 1024; i++)
table[i] = 0x0;