aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/include/libk/kmalloc.h29
-rw-r--r--kernel/kernel/kernel.cc22
-rw-r--r--kernel/libk/kmalloc.cc250
-rw-r--r--kernel/mm/virtual_mm/virtual_mm.cc3
4 files changed, 239 insertions, 65 deletions
diff --git a/kernel/include/libk/kmalloc.h b/kernel/include/libk/kmalloc.h
index 2817aff..7a9ff81 100644
--- a/kernel/include/libk/kmalloc.h
+++ b/kernel/include/libk/kmalloc.h
@@ -19,17 +19,32 @@
#ifndef __libk_kmalloc_h
#define __libk_kmalloc_h
+#include <mm/virtual_mm.h>
+#include <stddef.h>
#include <stdint.h>
-typedef struct memory_chunk_t {
- struct memory_chunk_t *next;
- struct memory_chunk_t *prev;
+/** This is a boundary tag which is prepended to the
+ * page or section of a page which we have allocated. It is
+ * used to identify valid memory blocks that the
+ * application is trying to free.
+ */
+struct boundary_tag {
+ unsigned int magic; //< It's a kind of ...
+ unsigned int size; //< Requested size.
+ unsigned int real_size; //< Actual size.
+ int index; //< Location in the page table.
+
+ struct boundary_tag *split_left; //< Linked-list info for broken pages.
+ struct boundary_tag *split_right; //< The same.
+
+ struct boundary_tag *next; //< Linked list info.
+ struct boundary_tag *prev; //< Linked list info.
+};
- uint32_t size;
-} memory_chunk_t;
+#define liballoc_alloc VirtualMM::alloc_pages
+#define liballoc_free VirtualMM::free_pages
bool kmalloc_initialized(void);
-void kmalloc_initialize(void);
-void *kmalloc(uint32_t size);
+void *kmalloc(size_t);
#endif
diff --git a/kernel/kernel/kernel.cc b/kernel/kernel/kernel.cc
index 165956a..a229bbf 100644
--- a/kernel/kernel/kernel.cc
+++ b/kernel/kernel/kernel.cc
@@ -43,26 +43,12 @@ kernel_main(uint32_t magic, multiboot_info_t *multiboot_info)
MemoryMap::load(multiboot_info);
PhysicalMM::initialize();
VirtualMM::initialize();
- kmalloc_initialize();
- uint32_t *x = (uint32_t *) (5 * MiB);
- *x = 8;
- printk("debug", "x(0x%x)", *x);
+ int *x = (int *) kmalloc(12);
+ *x = 132;
+ printk("debug", "x(0x%x) *x(0x%x)", x, *x);
- // void *x = VirtualMM::find_free_addresses(1046999);
- // printk("debug", "x(0x%x)", x);
-
- // #if 0
- // int *x = PhysicalMM::allocate_block();
- // /* *x = 20; */
- // printk("debug", "x(0x%x)", x);
-
- // /* virtual_mm_alloc_pages(1); */
- // /* void *x = kmalloc(12); */
- // /* printk("debug", "x(0x%x)", x); */
-
- // printk("\nKernel", "Started.");
- // #endif
+ printk("\nKernel", "Started.");
exit();
halt(); /* If exit() fails (on real hardware) */
diff --git a/kernel/libk/kmalloc.cc b/kernel/libk/kmalloc.cc
index 03b7d86..f1147a6 100644
--- a/kernel/libk/kmalloc.cc
+++ b/kernel/libk/kmalloc.cc
@@ -25,60 +25,232 @@
/* TODO: Kmalloc must have space for a page table *at all times*. */
-memory_chunk_t *starting_mc = NULL;
bool initialized = false;
-memory_chunk_t *
-add_block(void *address, uint32_t size)
+bool
+kmalloc_initialized(void)
{
- memory_chunk_t *mc = (memory_chunk_t *) address;
- mc->next = NULL;
- mc->prev = NULL;
-
- uint32_t chunk_size = size - sizeof(memory_chunk_t);
- printk("kmalloc", "size(0x%x) chunk_size(0x%x)");
-
- mc->size = chunk_size;
- printk("kmalloc",
- "mc(0x%x) mc->size(0x%x) chunk_size(0x%x)",
- mc,
- mc->size,
- chunk_size);
- return mc;
+ return initialized;
}
-void
-kmalloc_initialize(void)
+#define LIBALLOC_MAGIC 0xc001c0de
+#define MAXCOMPLETE 5
+#define MAXEXP 32
+#define MINEXP 8
+
+#define MODE_BEST 0
+#define MODE_INSTANT 1
+
+#define MODE MODE_BEST
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+struct boundary_tag *l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
+int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
+
+#ifdef DEBUG
+unsigned int l_allocated = 0; //< The real amount of memory allocated.
+unsigned int l_inuse = 0; //< The amount of memory in use (malloc'ed).
+#endif
+
+static unsigned int l_initialized = 0; //< Flag to indicate initialization.
+static unsigned int l_pageSize = 4096; //< Individual page size
+static unsigned int l_pageCount = 1; //< Minimum number of pages to allocate.
+
+static inline int
+getexp(unsigned int size)
{
- int *initial_region = (int *) VirtualMM::alloc_pages(1);
- printk("debug", "Initial region: 0x%x", initial_region);
- *initial_region = 10;
+ if (size < (1 << MINEXP)) {
+ return -1; // Smaller than the quantum.
+ }
- /* memory_chunk_t *mc = (memory_chunk_t *) initial_region; */
- /* mc->size = 10; */
- /* printk("kmalloc", "mc->size(0x%x)", mc->size); */
+ int shift = MINEXP;
- /* starting_mc = add_block(initial_region, 4 * PAGE_SIZE); */
- initialized = true;
+ while (shift < MAXEXP) {
+ if ((1 << shift) > size)
+ break;
+ shift += 1;
+ }
+
+ return shift - 1;
}
-bool
-kmalloc_initialized(void)
+static inline void
+insert_tag(struct boundary_tag *tag, int index)
{
- return initialized;
+ int realIndex;
+
+ if (index < 0) {
+ realIndex = getexp(tag->real_size - sizeof(struct boundary_tag));
+ if (realIndex < MINEXP)
+ realIndex = MINEXP;
+ } else
+ realIndex = index;
+
+ tag->index = realIndex;
+
+ if (l_freePages[realIndex] != NULL) {
+ l_freePages[realIndex]->prev = tag;
+ tag->next = l_freePages[realIndex];
+ }
+
+ l_freePages[realIndex] = tag;
+}
+
+static inline void
+remove_tag(struct boundary_tag *tag)
+{
+ if (l_freePages[tag->index] == tag)
+ l_freePages[tag->index] = tag->next;
+
+ if (tag->prev != NULL)
+ tag->prev->next = tag->next;
+ if (tag->next != NULL)
+ tag->next->prev = tag->prev;
+
+ tag->next = NULL;
+ tag->prev = NULL;
+ tag->index = -1;
+}
+
+static inline struct boundary_tag *
+split_tag(struct boundary_tag *tag)
+{
+ unsigned int remainder
+ = tag->real_size - sizeof(struct boundary_tag) - tag->size;
+
+ struct boundary_tag *new_tag
+ = (struct boundary_tag *) ((unsigned int) tag
+ + sizeof(struct boundary_tag) + tag->size);
+
+ new_tag->magic = LIBALLOC_MAGIC;
+ new_tag->real_size = remainder;
+
+ new_tag->next = NULL;
+ new_tag->prev = NULL;
+
+ new_tag->split_left = tag;
+ new_tag->split_right = tag->split_right;
+
+ if (new_tag->split_right != NULL)
+ new_tag->split_right->split_left = new_tag;
+ tag->split_right = new_tag;
+
+ tag->real_size -= new_tag->real_size;
+
+ insert_tag(new_tag, -1);
+
+ return new_tag;
+}
+
+static struct boundary_tag *
+allocate_new_tag(unsigned int size)
+{
+ unsigned int pages;
+ unsigned int usage;
+ struct boundary_tag *tag;
+
+ // This is how much space is required.
+ usage = size + sizeof(struct boundary_tag);
+
+ // Perfect amount of space
+ pages = usage / l_pageSize;
+ if ((usage % l_pageSize) != 0)
+ pages += 1;
+
+ // Make sure it's >= the minimum size.
+ if (pages < l_pageCount)
+ pages = l_pageCount;
+
+ tag = (struct boundary_tag *) liballoc_alloc(pages);
+
+ if (tag == NULL)
+ return NULL; // uh oh, we ran out of memory.
+
+ tag->magic = LIBALLOC_MAGIC;
+ tag->size = size;
+ tag->real_size = pages * l_pageSize;
+ tag->index = -1;
+
+ tag->next = NULL;
+ tag->prev = NULL;
+ tag->split_left = NULL;
+ tag->split_right = NULL;
+
+ return tag;
}
void *
-kmalloc(uint32_t size)
+kmalloc(size_t size)
{
- if (!initialized)
- kmalloc_initialize();
+ int index;
+ void *ptr;
+ struct boundary_tag *tag = NULL;
+
+ // liballoc_lock();
+
+ if (l_initialized == 0) {
+ for (index = 0; index < MAXEXP; index++) {
+ l_freePages[index] = NULL;
+ l_completePages[index] = 0;
+ }
+ l_initialized = 1;
+ }
+
+ index = getexp(size) + MODE;
+ if (index < MINEXP)
+ index = MINEXP;
+
+ // Find one big enough.
+ tag = l_freePages[index]; // Start at the front of the list.
+ while (tag != NULL) {
+ // If there's enough space in this tag.
+ if ((tag->real_size - sizeof(struct boundary_tag))
+ >= (size + sizeof(struct boundary_tag))) {
+ break;
+ }
+
+ tag = tag->next;
+ }
+
+ // No page found. Make one.
+ if (tag == NULL) {
+ if ((tag = allocate_new_tag(size)) == NULL) {
+ // liballoc_unlock();
+ return NULL;
+ }
+
+ index = getexp(tag->real_size - sizeof(struct boundary_tag));
+ } else {
+ remove_tag(tag);
+
+ if ((tag->split_left == NULL) && (tag->split_right == NULL))
+ l_completePages[index] -= 1;
+ }
+
+ // We have a free page. Remove it from the free pages list.
+
+ tag->size = size;
+
+ // Removed... see if we can re-use the excess space.
+
+ unsigned int remainder
+ = tag->real_size - size
+ - sizeof(struct boundary_tag) * 2; // Support a new tag + remainder
+
+ if (((int) (remainder)
+ > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
+ int childIndex = getexp(remainder);
- /* printk("kmalloc", */
- /* "Initialized region with starting_mc(0x%x) and size(0x%x)", */
- /* starting_mc, */
- /* starting_mc->size); */
+ if (childIndex >= 0) {
+ struct boundary_tag *new_tag = split_tag(tag);
+ (void) new_tag;
+ }
+ }
- (void) size;
- return NULL;
+ ptr = (void *) ((unsigned int) tag + sizeof(struct boundary_tag));
+ // liballoc_unlock();
+ return ptr;
}
diff --git a/kernel/mm/virtual_mm/virtual_mm.cc b/kernel/mm/virtual_mm/virtual_mm.cc
index 5c766ea..25a353f 100644
--- a/kernel/mm/virtual_mm/virtual_mm.cc
+++ b/kernel/mm/virtual_mm/virtual_mm.cc
@@ -108,7 +108,8 @@ make_table(uint32_t *pd_entry)
* next page table to be at 7MiB */
table = (uint32_t *) (7 * MiB);
else
- table = (uint32_t *) kmalloc(sizeof(uint32_t) * 1024);
+ // table = (uint32_t *) kmalloc(sizeof(uint32_t) * 1024);
+ ASSERT_NOT_REACHED();
for (uint32_t i = 0; i < 1024; i++)
table[i] = 0x0;