aboutsummaryrefslogtreecommitdiff
path: root/kernel/libk/kmalloc.cc
diff options
context:
space:
mode:
authorRaghuram Subramani <raghus2247@gmail.com>2025-02-01 08:49:14 -0500
committerRaghuram Subramani <raghus2247@gmail.com>2025-02-01 08:49:14 -0500
commit24ac72db9322277e25df56839963f19f0796fdb7 (patch)
treee6cbb5f58da79391aa787d5e11e3d969089c63a4 /kernel/libk/kmalloc.cc
parented46197e51cb55a36ca81b5fd086eaf0df2da6fd (diff)
libk: Minimal (barely) working implementation of kmalloc
Diffstat (limited to 'kernel/libk/kmalloc.cc')
-rw-r--r--kernel/libk/kmalloc.cc250
1 files changed, 211 insertions, 39 deletions
diff --git a/kernel/libk/kmalloc.cc b/kernel/libk/kmalloc.cc
index 03b7d86..f1147a6 100644
--- a/kernel/libk/kmalloc.cc
+++ b/kernel/libk/kmalloc.cc
@@ -25,60 +25,232 @@
/* TODO: Kmalloc must have space for a page table *at all times*. */
-memory_chunk_t *starting_mc = NULL;
bool initialized = false;
-memory_chunk_t *
-add_block(void *address, uint32_t size)
+bool
+kmalloc_initialized(void)
{
- memory_chunk_t *mc = (memory_chunk_t *) address;
- mc->next = NULL;
- mc->prev = NULL;
-
- uint32_t chunk_size = size - sizeof(memory_chunk_t);
- printk("kmalloc", "size(0x%x) chunk_size(0x%x)");
-
- mc->size = chunk_size;
- printk("kmalloc",
- "mc(0x%x) mc->size(0x%x) chunk_size(0x%x)",
- mc,
- mc->size,
- chunk_size);
- return mc;
+ return initialized;
}
-void
-kmalloc_initialize(void)
+#define LIBALLOC_MAGIC 0xc001c0de
+#define MAXCOMPLETE 5
+#define MAXEXP 32
+#define MINEXP 8
+
+#define MODE_BEST 0
+#define MODE_INSTANT 1
+
+#define MODE MODE_BEST
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+struct boundary_tag *l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
+int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
+
+#ifdef DEBUG
+unsigned int l_allocated = 0; //< The real amount of memory allocated.
+unsigned int l_inuse = 0; //< The amount of memory in use (malloc'ed).
+#endif
+
+static unsigned int l_initialized = 0; //< Flag to indicate initialization.
+static unsigned int l_pageSize = 4096; //< Individual page size
+static unsigned int l_pageCount = 1; //< Minimum number of pages to allocate.
+
+static inline int
+getexp(unsigned int size)
{
- int *initial_region = (int *) VirtualMM::alloc_pages(1);
- printk("debug", "Initial region: 0x%x", initial_region);
- *initial_region = 10;
+ if (size < (1 << MINEXP)) {
+ return -1; // Smaller than the quantum.
+ }
- /* memory_chunk_t *mc = (memory_chunk_t *) initial_region; */
- /* mc->size = 10; */
- /* printk("kmalloc", "mc->size(0x%x)", mc->size); */
+ int shift = MINEXP;
- /* starting_mc = add_block(initial_region, 4 * PAGE_SIZE); */
- initialized = true;
+ while (shift < MAXEXP) {
+ if ((1 << shift) > size)
+ break;
+ shift += 1;
+ }
+
+ return shift - 1;
}
-bool
-kmalloc_initialized(void)
+static inline void
+insert_tag(struct boundary_tag *tag, int index)
{
- return initialized;
+ int realIndex;
+
+ if (index < 0) {
+ realIndex = getexp(tag->real_size - sizeof(struct boundary_tag));
+ if (realIndex < MINEXP)
+ realIndex = MINEXP;
+ } else
+ realIndex = index;
+
+ tag->index = realIndex;
+
+ if (l_freePages[realIndex] != NULL) {
+ l_freePages[realIndex]->prev = tag;
+ tag->next = l_freePages[realIndex];
+ }
+
+ l_freePages[realIndex] = tag;
+}
+
+static inline void
+remove_tag(struct boundary_tag *tag)
+{
+ if (l_freePages[tag->index] == tag)
+ l_freePages[tag->index] = tag->next;
+
+ if (tag->prev != NULL)
+ tag->prev->next = tag->next;
+ if (tag->next != NULL)
+ tag->next->prev = tag->prev;
+
+ tag->next = NULL;
+ tag->prev = NULL;
+ tag->index = -1;
+}
+
+static inline struct boundary_tag *
+split_tag(struct boundary_tag *tag)
+{
+ unsigned int remainder
+ = tag->real_size - sizeof(struct boundary_tag) - tag->size;
+
+ struct boundary_tag *new_tag
+ = (struct boundary_tag *) ((unsigned int) tag
+ + sizeof(struct boundary_tag) + tag->size);
+
+ new_tag->magic = LIBALLOC_MAGIC;
+ new_tag->real_size = remainder;
+
+ new_tag->next = NULL;
+ new_tag->prev = NULL;
+
+ new_tag->split_left = tag;
+ new_tag->split_right = tag->split_right;
+
+ if (new_tag->split_right != NULL)
+ new_tag->split_right->split_left = new_tag;
+ tag->split_right = new_tag;
+
+ tag->real_size -= new_tag->real_size;
+
+ insert_tag(new_tag, -1);
+
+ return new_tag;
+}
+
+static struct boundary_tag *
+allocate_new_tag(unsigned int size)
+{
+ unsigned int pages;
+ unsigned int usage;
+ struct boundary_tag *tag;
+
+ // This is how much space is required.
+ usage = size + sizeof(struct boundary_tag);
+
+ // Perfect amount of space
+ pages = usage / l_pageSize;
+ if ((usage % l_pageSize) != 0)
+ pages += 1;
+
+ // Make sure it's >= the minimum size.
+ if (pages < l_pageCount)
+ pages = l_pageCount;
+
+ tag = (struct boundary_tag *) liballoc_alloc(pages);
+
+ if (tag == NULL)
+ return NULL; // uh oh, we ran out of memory.
+
+ tag->magic = LIBALLOC_MAGIC;
+ tag->size = size;
+ tag->real_size = pages * l_pageSize;
+ tag->index = -1;
+
+ tag->next = NULL;
+ tag->prev = NULL;
+ tag->split_left = NULL;
+ tag->split_right = NULL;
+
+ return tag;
}
void *
-kmalloc(uint32_t size)
+kmalloc(size_t size)
{
- if (!initialized)
- kmalloc_initialize();
+ int index;
+ void *ptr;
+ struct boundary_tag *tag = NULL;
+
+ // liballoc_lock();
+
+ if (l_initialized == 0) {
+ for (index = 0; index < MAXEXP; index++) {
+ l_freePages[index] = NULL;
+ l_completePages[index] = 0;
+ }
+ l_initialized = 1;
+ }
+
+ index = getexp(size) + MODE;
+ if (index < MINEXP)
+ index = MINEXP;
+
+ // Find one big enough.
+ tag = l_freePages[index]; // Start at the front of the list.
+ while (tag != NULL) {
+ // If there's enough space in this tag.
+ if ((tag->real_size - sizeof(struct boundary_tag))
+ >= (size + sizeof(struct boundary_tag))) {
+ break;
+ }
+
+ tag = tag->next;
+ }
+
+ // No page found. Make one.
+ if (tag == NULL) {
+ if ((tag = allocate_new_tag(size)) == NULL) {
+ // liballoc_unlock();
+ return NULL;
+ }
+
+ index = getexp(tag->real_size - sizeof(struct boundary_tag));
+ } else {
+ remove_tag(tag);
+
+ if ((tag->split_left == NULL) && (tag->split_right == NULL))
+ l_completePages[index] -= 1;
+ }
+
+ // We have a free page. Remove it from the free pages list.
+
+ tag->size = size;
+
+ // Removed... see if we can re-use the excess space.
+
+ unsigned int remainder
+ = tag->real_size - size
+ - sizeof(struct boundary_tag) * 2; // Support a new tag + remainder
+
+ if (((int) (remainder)
+ > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
+ int childIndex = getexp(remainder);
- /* printk("kmalloc", */
- /* "Initialized region with starting_mc(0x%x) and size(0x%x)", */
- /* starting_mc, */
- /* starting_mc->size); */
+ if (childIndex >= 0) {
+ struct boundary_tag *new_tag = split_tag(tag);
+ (void) new_tag;
+ }
+ }
- (void) size;
- return NULL;
+ ptr = (void *) ((unsigned int) tag + sizeof(struct boundary_tag));
+ // liballoc_unlock();
+ return ptr;
}