Mercurial > hg > CbC > CbC_xv6
comparison src/buddy.c @ 0:83c23a36980d
Init
author | Tatsuki IHA <e125716@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 26 May 2017 23:11:05 +0900 |
parents | |
children | f9169495d476 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:83c23a36980d |
---|---|
1 // Buddy memory allocator | |
2 #include "types.h" | |
3 #include "defs.h" | |
4 #include "param.h" | |
5 #include "memlayout.h" | |
6 #include "mmu.h" | |
7 #include "spinlock.h" | |
8 #include "arm.h" | |
9 | |
10 | |
11 // this file implement the buddy memory allocator. Each order divides | |
12 // the memory pool into equal-sized blocks (2^n). We use bitmap to record | |
13 // allocation status for each block. This allows for efficient merging | |
14 // when blocks are freed. We also use double-linked list to chain together | |
15 // free blocks (for each order), thus allowing fast allocation. There is | |
16 // about 8% overhead (maximum) for this structure. | |
17 | |
18 #define MAX_ORD 12 | |
19 #define MIN_ORD 6 | |
20 #define N_ORD (MAX_ORD - MIN_ORD +1) | |
21 | |
22 struct mark { | |
23 uint32 lnks; // double links (actually indexes) | |
24 uint32 bitmap; // bitmap, whether the block is available (1=available) | |
25 }; | |
26 | |
27 // lnks is a combination of previous link (index) and next link (index) | |
28 #define PRE_LNK(lnks) ((lnks) >> 16) | |
29 #define NEXT_LNK(lnks) ((lnks) & 0xFFFF) | |
30 #define LNKS(pre, next) (((pre) << 16) | ((next) & 0xFFFF)) | |
31 #define NIL ((uint16)0xFFFF) | |
32 | |
33 struct order { | |
34 uint32 head; // the first non-empty mark | |
35 uint32 offset; // the first mark | |
36 }; | |
37 | |
38 struct kmem { | |
39 struct spinlock lock; | |
40 uint start; // start of memory for marks | |
41 uint start_heap; // start of allocatable memory | |
42 uint end; | |
43 struct order orders[N_ORD]; // orders used for buddy systems | |
44 }; | |
45 | |
46 static struct kmem kmem; | |
47 | |
48 // coversion between block id to mark and memory address | |
49 static inline struct mark* get_mark (int order, int idx) | |
50 { | |
51 return (struct mark*)kmem.start + (kmem.orders[order - MIN_ORD].offset + idx); | |
52 } | |
53 | |
54 static inline void* blkid2mem (int order, int blkid) | |
55 { | |
56 return (void*)(kmem.start_heap + (1 << order) * blkid); | |
57 } | |
58 | |
59 static inline int mem2blkid (int order, void *mem) | |
60 { | |
61 return ((uint)mem - kmem.start_heap) >> order; | |
62 } | |
63 | |
64 static inline int available (uint bitmap, int blk_id) | |
65 { | |
66 return bitmap & (1 << (blk_id & 0x1F)); | |
67 } | |
68 | |
69 void kmem_init (void) | |
70 { | |
71 initlock(&kmem.lock, "kmem"); | |
72 } | |
73 | |
74 void kmem_init2(void *vstart, void *vend) | |
75 { | |
76 int i, j; | |
77 uint32 total, n; | |
78 uint len; | |
79 struct order *ord; | |
80 struct mark *mk; | |
81 | |
82 kmem.start = (uint)vstart; | |
83 kmem.end = (uint)vend; | |
84 len = kmem.end - kmem.start; | |
85 | |
86 // reserved memory at vstart for an array of marks (for all the orders) | |
87 n = (len >> (MAX_ORD + 5)) + 1; // estimated # of marks for max order | |
88 total = 0; | |
89 | |
90 for (i = N_ORD - 1; i >= 0; i--) { | |
91 ord = kmem.orders + i; | |
92 ord->offset = total; | |
93 ord->head = NIL; | |
94 | |
95 // set the bitmaps to mark all blocks not available | |
96 for (j = 0; j < n; j++) { | |
97 mk = get_mark(i + MIN_ORD, j); | |
98 mk->lnks = LNKS(NIL, NIL); | |
99 mk->bitmap = 0; | |
100 } | |
101 | |
102 total += n; | |
103 n <<= 1; // each order doubles required marks | |
104 } | |
105 | |
106 // add all available memory to the highest order bucket | |
107 kmem.start_heap = align_up(kmem.start + total * sizeof(*mk), 1 << MAX_ORD); | |
108 | |
109 for (i = kmem.start_heap; i < kmem.end; i += (1 << MAX_ORD)){ | |
110 kfree ((void*)i, MAX_ORD); | |
111 } | |
112 } | |
113 | |
114 // mark a block as unavailable | |
115 static void unmark_blk (int order, int blk_id) | |
116 { | |
117 struct mark *mk, *p; | |
118 struct order *ord; | |
119 int prev, next; | |
120 | |
121 ord = &kmem.orders[order - MIN_ORD]; | |
122 mk = get_mark (order, blk_id >> 5); | |
123 | |
124 // clear the bit in the bitmap | |
125 if (!available(mk->bitmap, blk_id)) { | |
126 panic ("double alloc\n"); | |
127 } | |
128 | |
129 mk->bitmap &= ~(1 << (blk_id & 0x1F)); | |
130 | |
131 // if it's the last block in the bitmap, delete from the list | |
132 if (mk->bitmap == 0) { | |
133 blk_id >>= 5; | |
134 | |
135 prev = PRE_LNK(mk->lnks); | |
136 next = NEXT_LNK(mk->lnks); | |
137 | |
138 if (prev != NIL) { | |
139 p = get_mark(order, prev); | |
140 p->lnks = LNKS(PRE_LNK(p->lnks), next); | |
141 | |
142 } else if (ord->head == blk_id) { | |
143 // if we are the first in the link | |
144 ord->head = next; | |
145 } | |
146 | |
147 if (next != NIL) { | |
148 p = get_mark(order, next); | |
149 p->lnks = LNKS(prev, NEXT_LNK(p->lnks)); | |
150 } | |
151 | |
152 mk->lnks = LNKS(NIL, NIL); | |
153 } | |
154 } | |
155 | |
156 // mark a block as available | |
157 static void mark_blk (int order, int blk_id) | |
158 { | |
159 struct mark *mk, *p; | |
160 struct order *ord; | |
161 int insert; | |
162 | |
163 ord = &kmem.orders[order - MIN_ORD]; | |
164 mk = get_mark (order, blk_id >> 5); | |
165 | |
166 // whether we need to insert it into the list | |
167 insert = (mk->bitmap == 0); | |
168 | |
169 // clear the bit map | |
170 if (available(mk->bitmap, blk_id)) { | |
171 panic ("double free\n"); | |
172 } | |
173 | |
174 mk->bitmap |= (1 << (blk_id & 0x1F)); | |
175 | |
176 // just insert it to the head, no need to keep the list ordered | |
177 if (insert) { | |
178 blk_id >>= 5; | |
179 mk->lnks = LNKS(NIL, ord->head); | |
180 | |
181 // fix the pre pointer of the next mark | |
182 if (ord->head != NIL) { | |
183 p = get_mark(order, ord->head); | |
184 p->lnks = LNKS(blk_id, NEXT_LNK(p->lnks)); | |
185 } | |
186 | |
187 ord->head = blk_id; | |
188 } | |
189 } | |
190 | |
191 // get a block | |
192 static void* get_blk (int order) | |
193 { | |
194 struct mark *mk; | |
195 int blk_id; | |
196 int i; | |
197 struct order *ord; | |
198 | |
199 ord = &kmem.orders[order - MIN_ORD]; | |
200 mk = get_mark(order, ord->head); | |
201 | |
202 if (mk->bitmap == 0) { | |
203 panic ("empty mark in the list\n"); | |
204 } | |
205 | |
206 for (i = 0; i < 32; i++) { | |
207 if (mk->bitmap & (1 << i)) { | |
208 blk_id = ord->head * 32 + i; | |
209 unmark_blk(order, blk_id); | |
210 | |
211 return blkid2mem(order, blk_id); | |
212 } | |
213 } | |
214 | |
215 return NULL; | |
216 } | |
217 | |
218 void _kfree (void *mem, int order); | |
219 | |
220 | |
221 static void *_kmalloc (int order) | |
222 { | |
223 struct order *ord; | |
224 uint8 *up; | |
225 | |
226 ord = &kmem.orders[order - MIN_ORD]; | |
227 up = NULL; | |
228 | |
229 if (ord->head != NIL) { | |
230 up = get_blk(order); | |
231 | |
232 } else if (order < MAX_ORD){ | |
233 // if currently no block available, try to split a parent | |
234 up = _kmalloc (order + 1); | |
235 | |
236 if (up != NULL) { | |
237 _kfree (up + (1 << order), order); | |
238 } | |
239 } | |
240 | |
241 return up; | |
242 } | |
243 | |
244 // allocate memory that has the size of (1 << order) | |
245 void *kmalloc (int order) | |
246 { | |
247 uint8 *up; | |
248 | |
249 if ((order > MAX_ORD) || (order < MIN_ORD)) { | |
250 panic("kmalloc: order out of range\n"); | |
251 } | |
252 | |
253 acquire(&kmem.lock); | |
254 up = _kmalloc(order); | |
255 release(&kmem.lock); | |
256 | |
257 return up; | |
258 } | |
259 | |
260 void _kfree (void *mem, int order) | |
261 { | |
262 int blk_id, buddy_id; | |
263 struct mark *mk; | |
264 | |
265 blk_id = mem2blkid(order, mem); | |
266 mk = get_mark(order, blk_id >> 5); | |
267 | |
268 if (available(mk->bitmap, blk_id)) { | |
269 panic ("kfree: double free"); | |
270 } | |
271 | |
272 buddy_id = blk_id ^ 0x0001; // blk_id and buddy_id differs in the last bit | |
273 // buddy must be in the same bit map | |
274 if (!available(mk->bitmap, buddy_id) || (order == MAX_ORD)) { | |
275 mark_blk(order, blk_id); | |
276 } else { | |
277 // our buddy is also free, merge it | |
278 unmark_blk (order, buddy_id); | |
279 _kfree (blkid2mem(order, blk_id & ~0x0001), order+1); | |
280 } | |
281 } | |
282 | |
283 // free kernel memory, we require order parameter here to avoid | |
284 // storing size info somewhere which might break the alignment | |
285 void kfree (void *mem, int order) | |
286 { | |
287 if ((order > MAX_ORD) || (order < MIN_ORD) || (uint)mem & ((1<<order) -1)) { | |
288 panic("kfree: order out of range or memory unaligned\n"); | |
289 } | |
290 | |
291 acquire(&kmem.lock); | |
292 _kfree(mem, order); | |
293 release(&kmem.lock); | |
294 } | |
295 | |
296 // free a page | |
297 void free_page(void *v) | |
298 { | |
299 kfree (v, PTE_SHIFT); | |
300 } | |
301 | |
302 // allocate a page | |
303 void* alloc_page (void) | |
304 { | |
305 return kmalloc (PTE_SHIFT); | |
306 } | |
307 | |
308 // round up power of 2, then get the order | |
309 // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 | |
310 int get_order (uint32 v) | |
311 { | |
312 uint32 ord; | |
313 | |
314 v--; | |
315 v |= v >> 1; | |
316 v |= v >> 2; | |
317 v |= v >> 4; | |
318 v |= v >> 8; | |
319 v |= v >> 16; | |
320 v++; | |
321 | |
322 for (ord = 0; ord < 32; ord++) { | |
323 if (v & (1 << ord)) { | |
324 break; | |
325 } | |
326 } | |
327 | |
328 if (ord < MIN_ORD) { | |
329 ord = MIN_ORD; | |
330 } else if (ord > MAX_ORD) { | |
331 panic ("order too big!"); | |
332 } | |
333 | |
334 return ord; | |
335 | |
336 } | |
337 |