Skip to content

Commit c924289

Browse files
committed
feat: hartlocks
Signed-off-by: Autumn <auctumnus@pm.me>
1 parent 5cb8cec commit c924289

File tree

19 files changed

+182
-65
lines changed

19 files changed

+182
-65
lines changed

src/kernel/arch/riscv64/hart_locals.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ static u64 boothart_hart_id;
2727
*/
2828
void heap_change_boothart_hart(struct mm_alloc_heap *heap, struct hart *hart);
2929

30-
struct hart_locals *get_hart_locals(void) {
30+
struct hart_locals *get_hart_locals(struct hartlock *_hartlock) {
3131
return (struct hart_locals *)csrr(RISCV64_CSR_SSCRATCH);
3232
}
3333

src/kernel/hartlock.c

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
/*
2+
* SPDX-FileCopyrightText: 2026 ukoOS Contributors
3+
*
4+
* SPDX-License-Identifier: GPL-3.0-or-later
5+
*/
6+
7+
#include <stdatomic.h>
8+
9+
#include <hart_locals.h>
10+
#include <panic.h>
11+
#include <hartlock.h>
12+
#include <types.h>
13+
// TODO: make generic across architectures
14+
#include <arch/riscv64/irq.h>
15+
16+
// None of these functions should be called manually.
17+
// See doc/kernel/containers/guards.md for usage.
18+
19+
void __hartlock_init(struct hartlock *lock) {
20+
if(lock == nullptr) {
21+
panic("hartlock init called with null lock");
22+
}
23+
24+
u64 flags = local_irq_save();
25+
26+
lock->flags = flags;
27+
}
28+
29+
void __hartlock_free(struct hartlock *lock) {
30+
if (lock == nullptr) {
31+
panic("hartlock free called with null lock");
32+
}
33+
34+
local_irq_restore(lock->flags);
35+
}

src/kernel/include.mak

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ kernel-objs-c += device
3535
kernel-objs-c += devices/hart
3636
kernel-objs-c += devices/uart
3737
kernel-objs-c += devicetree
38+
kernel-objs-c += hartlock
3839
kernel-objs-c += init
3940
kernel-objs-c += main
4041
kernel-objs-c += mm/alloc

src/kernel/include/arch/riscv64/irq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
* IRQ handling.
1212
*/
1313

14-
#include <insns.h>
14+
#include <arch/riscv64/insns.h>
1515
#include <types.h>
1616

1717
// TODO: move this over to insns.h?

src/kernel/include/hart_locals.h

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
#include <crypto/subtle/random_internals.h>
1111
#include <devices/hart.h>
12+
#include <hartlock.h>
1213

1314
/**
1415
* The data that is local to a hart.
@@ -41,10 +42,22 @@ static_assert(offsetof(struct hart_locals, task) == 8);
4142
/**
4243
* Returns a pointer to the current hart's locals.
4344
*
44-
* TODO: Make sure this is called under some lock that ensures the current
45-
* thread cannot be rescheduled. Otherwise, it'd be super-easy to get a race.
45+
* Requires a hartlock to be held. You can acquire a hartlock by using
46+
* `WITH_HARTLOCK` in a new scope; for example:
47+
*
48+
* ```c
49+
* void foo() {
50+
* u8 blah = 2 + 2;
51+
* {
52+
* WITH_HARTLOCK(lock);
53+
* struct hart_locals *locals = get_hart_locals(lock);
54+
*
55+
* // access `locals->rng`, etc ...
56+
* }
57+
* }
58+
* ```
4659
*/
47-
struct hart_locals *get_hart_locals(void);
60+
struct hart_locals *get_hart_locals(struct hartlock *);
4861

4962
/**
5063
* Called during early boot to initialize the boothart's hart-local storage

src/kernel/include/hartlock.h

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
/*
2+
* SPDX-FileCopyrightText: 2026 ukoOS Contributors
3+
*
4+
* SPDX-License-Identifier: GPL-3.0-or-later
5+
*/
6+
7+
#ifndef UKO_OS_KERNEL__HARTLOCK_H
8+
#define UKO_OS_KERNEL__HARTLOCK_H 1
9+
10+
#include <types.h>
11+
12+
struct hartlock {
13+
u64 flags;
14+
};
15+
16+
// None of these functions should be called manually.
17+
// See doc/kernel/containers/guards.md for usage.
18+
19+
void __hartlock_init(struct hartlock *);
20+
21+
void __hartlock_free(struct hartlock *);
22+
23+
#define HARTLOCK_GUARDED(VARS) \
24+
struct VARS __guarded_by_hartlock; \
25+
26+
#define HARTLOCK_GUARD(GUARD_NAME, OBJECT) \
27+
typeof((OBJECT)->__guarded_by_hartlock) *GUARD_NAME; \
28+
[[gnu::cleanup(__hartlock_free)]] \
29+
struct hartlock __my_hartlock = { 0 }; \
30+
__hartlock_init(&__my_hartlock); \
31+
GUARD_NAME = &((OBJECT)->__guarded_by_hartlock)
32+
33+
#define WITH_HARTLOCK(LOCK_NAME) \
34+
[[gnu::cleanup(__hartlock_free)]] \
35+
struct hartlock __my_hartlock = { 0 }; \
36+
__hartlock_init(&__my_hartlock); \
37+
struct hartlock *LOCK_NAME = &__my_hartlock
38+
39+
#endif // UKO_OS_KERNEL__HARTLOCK_H

src/kernel/include/mm/alloc.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#define UKO_OS_KERNEL__MM_ALLOC_H 1
99

1010
#include <hart_locals.h>
11+
#include <hartlock.h>
1112

1213
/**
1314
* Frees memory returned by a previous call to `alloc`.
@@ -21,13 +22,13 @@ void free(void *ptr);
2122
* Like `alloc`, but only for allocations with `0 < size && size <= 1024`.
2223
*/
2324
[[gnu::alloc_size(1), gnu::malloc, gnu::malloc(free, 1), nodiscard]] void *
24-
alloc_small(usize size, struct mm_alloc_heap *heap);
25+
alloc_small(usize size, struct hartlock *hartlock, struct mm_alloc_heap *heap);
2526

2627
/**
2728
* The slow path of `alloc`.
2829
*/
2930
[[gnu::alloc_size(1), gnu::malloc, gnu::malloc(free, 1), nodiscard]] void *
30-
alloc_generic(usize size, struct mm_alloc_heap *heap);
31+
alloc_generic(usize size, struct hartlock *hartlock, struct mm_alloc_heap *heap);
3132

3233
/**
3334
* Allocates `size` bytes of memory and returns a pointer to it. On OOM, returns
@@ -40,7 +41,8 @@ alloc_generic(usize size, struct mm_alloc_heap *heap);
4041
*/
4142
[[gnu::alloc_size(1), gnu::malloc, nodiscard]] static inline void *
4243
alloc(usize size) {
43-
struct mm_alloc_heap *heap = get_hart_locals()->heap;
44+
WITH_HARTLOCK(hartlock);
45+
struct mm_alloc_heap *heap = get_hart_locals(hartlock)->heap;
4446
// Bump up the size if it's zero, since zero isn't a valid input to
4547
// alloc_small.
4648
//
@@ -50,11 +52,11 @@ alloc(usize size) {
5052
// constant (or at least provably non-zero) in the caller most of the time, so
5153
// the branch should be eliminated.
5254
if (size == 0)
53-
return alloc_small(1, heap);
55+
return alloc_small(1, hartlock, heap);
5456
else if (size <= 1024)
55-
return alloc_small(size, heap);
57+
return alloc_small(size, hartlock, heap);
5658
else
57-
return alloc_generic(size, heap);
59+
return alloc_generic(size, hartlock, heap);
5860
}
5961

6062
/**

src/kernel/include/spinlock.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,11 @@
88
#define UKO_OS_KERNEL__SPINLOCK_H 1
99

1010
#include <types.h>
11+
#include <hartlock.h>
1112

1213
struct spinlock {
1314
atomic bool locked;
14-
u64 flags;
15+
struct hartlock hartlock;
1516

1617
// these only have meaning while `locked` is true
1718
atomic u64 owner_hart_id;

src/kernel/main.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <random.h>
1717
#include <selftest.h>
1818
#include <symbolicate.h>
19+
#include <hartlock.h>
1920

2021
[[noreturn]]
2122
void main(u64 hart_id, paddr devicetree_start, paddr kernel_start,
@@ -64,7 +65,10 @@ void main(u64 hart_id, paddr devicetree_start, paddr kernel_start,
6465
print("Running self-tests...");
6566
run_selftests();
6667

67-
struct hart_locals *hart_locals = get_hart_locals();
68-
print("{uptr}", hart_locals->hart);
68+
{
69+
WITH_HARTLOCK(hartlock);
70+
struct hart_locals *hart_locals = get_hart_locals(hartlock);
71+
print("{uptr}", hart_locals->hart);
72+
}
6973
TODO();
7074
}

src/kernel/mm/alloc.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <mm/alloc.h>
1010
#include <panic.h>
1111
#include <stdatomic.h>
12+
#include "hartlock.h"
1213

1314
void free(void *ptr) {
1415
if (!ptr)
@@ -17,9 +18,12 @@ void free(void *ptr) {
1718
struct mm_alloc_segment *segment = segment_of_ptr((uptr)ptr);
1819
struct mm_alloc_page *page = page_of_ptr((uptr)ptr);
1920
struct mm_alloc_block *block = ptr;
20-
if (segment->hart == get_hart_locals()->hart) {
21+
22+
WITH_HARTLOCK(hartlock);
23+
24+
if (segment->hart == get_hart_locals(hartlock)->hart) {
2125
// This is a local free; i.e., we're running on the hart that owns the page.
22-
struct mm_alloc_heap *heap = get_hart_locals()->heap;
26+
struct mm_alloc_heap *heap = get_hart_locals(hartlock)->heap;
2327

2428
page_local_free_push(page, block);
2529
if (page_is_empty(page)) {
@@ -42,8 +46,8 @@ void free(void *ptr) {
4246
}
4347
}
4448

45-
void *alloc_small(usize size, struct mm_alloc_heap *heap) {
46-
assert(heap->hart == get_hart_locals()->hart);
49+
void *alloc_small(usize size, struct hartlock *hartlock, struct mm_alloc_heap *heap) {
50+
assert(heap->hart == get_hart_locals(hartlock)->hart);
4751
assert(0 < size && size <= 1024);
4852

4953
// Compute the index into pages_direct.
@@ -57,7 +61,7 @@ void *alloc_small(usize size, struct mm_alloc_heap *heap) {
5761
// generic routine.
5862
struct mm_alloc_block *block = page_free_pop(page);
5963
if (!block)
60-
return alloc_generic(size, heap);
64+
return alloc_generic(size, hartlock, heap);
6165

6266
// Increment the counter of used objects.
6367
page->used_blocks++;
@@ -82,19 +86,19 @@ static void *alloc_generic_from_page(struct mm_alloc_page *page,
8286
return block;
8387
}
8488

85-
static void *alloc_huge(usize size, struct mm_alloc_heap *heap) {
89+
static void *alloc_huge(usize size, struct hartlock *hartlock, struct mm_alloc_heap *heap) {
8690
assert(size_is_huge(size));
8791

8892
// Every huge object goes in its own page, so allocate one and use it
8993
// directly.
90-
struct mm_alloc_page *page = page_new_huge(heap, size);
94+
struct mm_alloc_page *page = page_new_huge(hartlock, heap, size);
9195
if (!page)
9296
return nullptr;
9397
return alloc_generic_from_page(page, heap);
9498
}
9599

96-
void *alloc_generic(usize size, struct mm_alloc_heap *heap) {
97-
assert(heap->hart == get_hart_locals()->hart);
100+
void *alloc_generic(usize size, struct hartlock *hartlock, struct mm_alloc_heap *heap) {
101+
assert(heap->hart == get_hart_locals(hartlock)->hart);
98102
assert(size);
99103

100104
// Go through the delayed free list to free everything.
@@ -107,7 +111,7 @@ void *alloc_generic(usize size, struct mm_alloc_heap *heap) {
107111

108112
// Huge objects get handled separately.
109113
if (size_is_huge(size))
110-
return alloc_huge(size, heap);
114+
return alloc_huge(size, hartlock, heap);
111115

112116
// Check every page of the size class for free objects.
113117
usize size_class = size_class_of_size(size);
@@ -147,9 +151,9 @@ void *alloc_generic(usize size, struct mm_alloc_heap *heap) {
147151
// new one.
148152
assert(list_is_empty(&heap->pages[size_class]));
149153
if (size_is_small(size))
150-
page = page_new_small(heap, size_class);
154+
page = page_new_small(hartlock, heap, size_class);
151155
else
152-
page = page_new_large(heap, size_class);
156+
page = page_new_large(hartlock, heap, size_class);
153157

154158
// If we couldn't allocate a new page, we're out of memory.
155159
if (!page)

0 commit comments

Comments
 (0)