0
|
1 // Buddy memory allocator
|
|
2 #include "types.h"
|
|
3 #include "defs.h"
|
|
4 #include "param.h"
|
|
5 #include "memlayout.h"
|
|
6 #include "mmu.h"
|
|
7 #include "spinlock.h"
|
|
8 #include "arm.h"
|
265
|
9 #include "ln.h"
|
0
|
10
|
|
11
|
|
12 // this file implement the buddy memory allocator. Each order divides
|
|
13 // the memory pool into equal-sized blocks (2^n). We use bitmap to record
|
|
14 // allocation status for each block. This allows for efficient merging
|
|
15 // when blocks are freed. We also use double-linked list to chain together
|
|
16 // free blocks (for each order), thus allowing fast allocation. There is
|
|
17 // about 8% overhead (maximum) for this structure.
|
|
18
|
|
19 #define MAX_ORD 12
|
|
20 #define MIN_ORD 6
|
|
21 #define N_ORD (MAX_ORD - MIN_ORD +1)
|
|
22
|
|
23 struct mark {
|
|
24 uint32 lnks; // double links (actually indexes)
|
|
25 uint32 bitmap; // bitmap, whether the block is available (1=available)
|
|
26 };
|
|
27
|
|
28 // lnks is a combination of previous link (index) and next link (index)
|
|
29 #define PRE_LNK(lnks) ((lnks) >> 16)
|
|
30 #define NEXT_LNK(lnks) ((lnks) & 0xFFFF)
|
|
31 #define LNKS(pre, next) (((pre) << 16) | ((next) & 0xFFFF))
|
|
32 #define NIL ((uint16)0xFFFF)
|
|
33
|
|
34 struct order {
|
|
35 uint32 head; // the first non-empty mark
|
|
36 uint32 offset; // the first mark
|
|
37 };
|
|
38
|
|
39 struct kmem {
|
|
40 struct spinlock lock;
|
|
41 uint start; // start of memory for marks
|
|
42 uint start_heap; // start of allocatable memory
|
|
43 uint end;
|
|
44 struct order orders[N_ORD]; // orders used for buddy systems
|
|
45 };
|
|
46
|
|
47 static struct kmem kmem;
|
|
48
|
|
49 // coversion between block id to mark and memory address
|
|
50 static inline struct mark* get_mark (int order, int idx)
|
|
51 {
|
|
52 return (struct mark*)kmem.start + (kmem.orders[order - MIN_ORD].offset + idx);
|
|
53 }
|
|
54
|
|
55 static inline void* blkid2mem (int order, int blkid)
|
|
56 {
|
|
57 return (void*)(kmem.start_heap + (1 << order) * blkid);
|
|
58 }
|
|
59
|
|
60 static inline int mem2blkid (int order, void *mem)
|
|
61 {
|
|
62 return ((uint)mem - kmem.start_heap) >> order;
|
|
63 }
|
|
64
|
|
65 static inline int available (uint bitmap, int blk_id)
|
|
66 {
|
|
67 return bitmap & (1 << (blk_id & 0x1F));
|
|
68 }
|
|
69
|
|
70 void kmem_init (void)
|
|
71 {
|
|
72 initlock(&kmem.lock, "kmem");
|
|
73 }
|
|
74
|
|
75 void kmem_init2(void *vstart, void *vend)
|
|
76 {
|
|
77 int i, j;
|
|
78 uint32 total, n;
|
|
79 uint len;
|
|
80 struct order *ord;
|
|
81 struct mark *mk;
|
|
82
|
|
83 kmem.start = (uint)vstart;
|
|
84 kmem.end = (uint)vend;
|
|
85 len = kmem.end - kmem.start;
|
|
86
|
|
87 // reserved memory at vstart for an array of marks (for all the orders)
|
|
88 n = (len >> (MAX_ORD + 5)) + 1; // estimated # of marks for max order
|
|
89 total = 0;
|
|
90
|
|
91 for (i = N_ORD - 1; i >= 0; i--) {
|
|
92 ord = kmem.orders + i;
|
|
93 ord->offset = total;
|
|
94 ord->head = NIL;
|
|
95
|
|
96 // set the bitmaps to mark all blocks not available
|
|
97 for (j = 0; j < n; j++) {
|
|
98 mk = get_mark(i + MIN_ORD, j);
|
|
99 mk->lnks = LNKS(NIL, NIL);
|
|
100 mk->bitmap = 0;
|
|
101 }
|
|
102
|
|
103 total += n;
|
|
104 n <<= 1; // each order doubles required marks
|
|
105 }
|
|
106
|
|
107 // add all available memory to the highest order bucket
|
|
108 kmem.start_heap = align_up(kmem.start + total * sizeof(*mk), 1 << MAX_ORD);
|
|
109
|
|
110 for (i = kmem.start_heap; i < kmem.end; i += (1 << MAX_ORD)){
|
|
111 kfree ((void*)i, MAX_ORD);
|
|
112 }
|
|
113 }
|
|
114
|
|
115 // mark a block as unavailable
|
|
116 static void unmark_blk (int order, int blk_id)
|
|
117 {
|
|
118 struct mark *mk, *p;
|
|
119 struct order *ord;
|
|
120 int prev, next;
|
|
121
|
|
122 ord = &kmem.orders[order - MIN_ORD];
|
|
123 mk = get_mark (order, blk_id >> 5);
|
|
124
|
|
125 // clear the bit in the bitmap
|
|
126 if (!available(mk->bitmap, blk_id)) {
|
|
127 panic ("double alloc\n");
|
|
128 }
|
|
129
|
|
130 mk->bitmap &= ~(1 << (blk_id & 0x1F));
|
|
131
|
|
132 // if it's the last block in the bitmap, delete from the list
|
|
133 if (mk->bitmap == 0) {
|
|
134 blk_id >>= 5;
|
|
135
|
|
136 prev = PRE_LNK(mk->lnks);
|
|
137 next = NEXT_LNK(mk->lnks);
|
|
138
|
|
139 if (prev != NIL) {
|
|
140 p = get_mark(order, prev);
|
|
141 p->lnks = LNKS(PRE_LNK(p->lnks), next);
|
|
142
|
|
143 } else if (ord->head == blk_id) {
|
|
144 // if we are the first in the link
|
|
145 ord->head = next;
|
|
146 }
|
|
147
|
|
148 if (next != NIL) {
|
|
149 p = get_mark(order, next);
|
|
150 p->lnks = LNKS(prev, NEXT_LNK(p->lnks));
|
|
151 }
|
|
152
|
|
153 mk->lnks = LNKS(NIL, NIL);
|
|
154 }
|
|
155 }
|
|
156
|
|
157 // mark a block as available
|
|
158 static void mark_blk (int order, int blk_id)
|
|
159 {
|
|
160 struct mark *mk, *p;
|
|
161 struct order *ord;
|
|
162 int insert;
|
|
163
|
|
164 ord = &kmem.orders[order - MIN_ORD];
|
|
165 mk = get_mark (order, blk_id >> 5);
|
|
166
|
|
167 // whether we need to insert it into the list
|
|
168 insert = (mk->bitmap == 0);
|
|
169
|
|
170 // clear the bit map
|
|
171 if (available(mk->bitmap, blk_id)) {
|
|
172 panic ("double free\n");
|
|
173 }
|
|
174
|
|
175 mk->bitmap |= (1 << (blk_id & 0x1F));
|
|
176
|
|
177 // just insert it to the head, no need to keep the list ordered
|
|
178 if (insert) {
|
|
179 blk_id >>= 5;
|
|
180 mk->lnks = LNKS(NIL, ord->head);
|
|
181
|
|
182 // fix the pre pointer of the next mark
|
|
183 if (ord->head != NIL) {
|
|
184 p = get_mark(order, ord->head);
|
|
185 p->lnks = LNKS(blk_id, NEXT_LNK(p->lnks));
|
|
186 }
|
|
187
|
|
188 ord->head = blk_id;
|
|
189 }
|
|
190 }
|
|
191
|
|
192 // get a block
|
|
193 static void* get_blk (int order)
|
|
194 {
|
|
195 struct mark *mk;
|
|
196 int blk_id;
|
|
197 int i;
|
|
198 struct order *ord;
|
|
199
|
|
200 ord = &kmem.orders[order - MIN_ORD];
|
|
201 mk = get_mark(order, ord->head);
|
|
202
|
|
203 if (mk->bitmap == 0) {
|
|
204 panic ("empty mark in the list\n");
|
|
205 }
|
|
206
|
|
207 for (i = 0; i < 32; i++) {
|
|
208 if (mk->bitmap & (1 << i)) {
|
|
209 blk_id = ord->head * 32 + i;
|
|
210 unmark_blk(order, blk_id);
|
|
211
|
|
212 return blkid2mem(order, blk_id);
|
|
213 }
|
|
214 }
|
|
215
|
|
216 return NULL;
|
|
217 }
|
|
218
|
|
219 void _kfree (void *mem, int order);
|
|
220
|
|
221
|
|
222 static void *_kmalloc (int order)
|
|
223 {
|
|
224 struct order *ord;
|
|
225 uint8 *up;
|
|
226
|
|
227 ord = &kmem.orders[order - MIN_ORD];
|
|
228 up = NULL;
|
|
229
|
|
230 if (ord->head != NIL) {
|
|
231 up = get_blk(order);
|
|
232
|
|
233 } else if (order < MAX_ORD){
|
|
234 // if currently no block available, try to split a parent
|
|
235 up = _kmalloc (order + 1);
|
|
236
|
|
237 if (up != NULL) {
|
|
238 _kfree (up + (1 << order), order);
|
|
239 }
|
|
240 }
|
|
241
|
|
242 return up;
|
|
243 }
|
|
244
|
|
245 // allocate memory that has the size of (1 << order)
|
|
246 void *kmalloc (int order)
|
|
247 {
|
|
248 uint8 *up;
|
|
249
|
|
250 if ((order > MAX_ORD) || (order < MIN_ORD)) {
|
|
251 panic("kmalloc: order out of range\n");
|
|
252 }
|
|
253
|
|
254 acquire(&kmem.lock);
|
|
255 up = _kmalloc(order);
|
|
256 release(&kmem.lock);
|
|
257
|
|
258 return up;
|
|
259 }
|
|
260
|
|
261 void _kfree (void *mem, int order)
|
|
262 {
|
|
263 int blk_id, buddy_id;
|
|
264 struct mark *mk;
|
|
265
|
|
266 blk_id = mem2blkid(order, mem);
|
|
267 mk = get_mark(order, blk_id >> 5);
|
|
268
|
|
269 if (available(mk->bitmap, blk_id)) {
|
|
270 panic ("kfree: double free");
|
|
271 }
|
|
272
|
|
273 buddy_id = blk_id ^ 0x0001; // blk_id and buddy_id differs in the last bit
|
|
274 // buddy must be in the same bit map
|
|
275 if (!available(mk->bitmap, buddy_id) || (order == MAX_ORD)) {
|
|
276 mark_blk(order, blk_id);
|
|
277 } else {
|
|
278 // our buddy is also free, merge it
|
|
279 unmark_blk (order, buddy_id);
|
|
280 _kfree (blkid2mem(order, blk_id & ~0x0001), order+1);
|
|
281 }
|
|
282 }
|
|
283
|
|
284 // free kernel memory, we require order parameter here to avoid
|
|
285 // storing size info somewhere which might break the alignment
|
|
286 void kfree (void *mem, int order)
|
|
287 {
|
|
288 if ((order > MAX_ORD) || (order < MIN_ORD) || (uint)mem & ((1<<order) -1)) {
|
|
289 panic("kfree: order out of range or memory unaligned\n");
|
|
290 }
|
|
291
|
|
292 acquire(&kmem.lock);
|
|
293 _kfree(mem, order);
|
|
294 release(&kmem.lock);
|
|
295 }
|
|
296
|
|
297 // free a page
|
|
298 void free_page(void *v)
|
|
299 {
|
|
300 kfree (v, PTE_SHIFT);
|
|
301 }
|
|
302
|
|
303 // allocate a page
|
|
304 void* alloc_page (void)
|
|
305 {
|
|
306 return kmalloc (PTE_SHIFT);
|
|
307 }
|
|
308
|
|
309 // round up power of 2, then get the order
|
|
310 // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
|
|
311 int get_order (uint32 v)
|
|
312 {
|
|
313 uint32 ord;
|
|
314
|
|
315 v--;
|
|
316 v |= v >> 1;
|
|
317 v |= v >> 2;
|
|
318 v |= v >> 4;
|
|
319 v |= v >> 8;
|
|
320 v |= v >> 16;
|
|
321 v++;
|
|
322
|
|
323 for (ord = 0; ord < 32; ord++) {
|
|
324 if (v & (1 << ord)) {
|
|
325 break;
|
|
326 }
|
|
327 }
|
|
328
|
|
329 if (ord < MIN_ORD) {
|
|
330 ord = MIN_ORD;
|
|
331 } else if (ord > MAX_ORD) {
|
|
332 panic ("order too big!");
|
|
333 }
|
|
334
|
|
335 return ord;
|
|
336
|
|
337 }
|
|
338
|