]> code.delx.au - refind/blob - filesystems/fsw_btrfs.c
Previous commit broke loading of EFI drivers with SB active; fix it.
[refind] / filesystems / fsw_btrfs.c
1 /*
2 * fsw_btrfs.c:
3 * btrfs UEFI driver
4 * by Samuel Liao
5 * Copyright (c) 2013 Tencent, Inc.
6 *
7 * This driver base on grub 2.0 btrfs implementation.
8 */
9
10 /* btrfs.c - B-tree file system. */
11 /*
12 * GRUB -- GRand Unified Bootloader
13 * Copyright (C) 2010 Free Software Foundation, Inc.
14 *
15 * GRUB is free software: you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation, either version 3 of the License, or
18 * (at your option) any later version.
19 *
20 * GRUB is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
27 */
28
29 //#define DPRINT(x...) Print(x)
30
31 #include "fsw_core.h"
32 #define uint8_t fsw_u8
33 #define uint16_t fsw_u16
34 #define uint32_t fsw_u32
35 #define uint64_t fsw_u64
36 #define int64_t fsw_s64
37 #define int32_t fsw_s32
38
39 #ifndef DPRINT
40 #define DPRINT(x...) /* */
41 #endif
42
43 /* no single io/element size over 2G */
44 #define fsw_size_t int
45 #define fsw_ssize_t int
46 /* never zip over 2G, 32bit is enough */
47 #define grub_off_t int32_t
48 #define grub_size_t int32_t
49 #define grub_ssize_t int32_t
50 #include "crc32c.c"
51 #include "gzio.c"
52 #define MINILZO_CFG_SKIP_LZO_PTR 1
53 #define MINILZO_CFG_SKIP_LZO_UTIL 1
54 #define MINILZO_CFG_SKIP_LZO_STRING 1
55 #define MINILZO_CFG_SKIP_LZO_INIT 1
56 #define MINILZO_CFG_SKIP_LZO1X_DECOMPRESS 1
57 #define MINILZO_CFG_SKIP_LZO1X_1_COMPRESS 1
58 #define MINILZO_CFG_SKIP_LZO_STRING 1
59 #include "minilzo.c"
60 #include "scandisk.c"
61
62 #define BTRFS_DEFAULT_BLOCK_SIZE 4096
63 #define BTRFS_INITIAL_BCACHE_SIZE 1024
64 #define GRUB_BTRFS_SIGNATURE "_BHRfS_M"
65
66 /* From http://www.oberhumer.com/opensource/lzo/lzofaq.php
67 * LZO will expand incompressible data by a little amount. I still haven't
68 * computed the exact values, but I suggest using these formulas for
69 * a worst-case expansion calculation:
70 *
71 * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3
72 * */
73 #define GRUB_BTRFS_LZO_BLOCK_SIZE 4096
74 #define GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE (GRUB_BTRFS_LZO_BLOCK_SIZE + \
75 (GRUB_BTRFS_LZO_BLOCK_SIZE / 16) + 64 + 3)
76
77 /*
78 * on disk struct has prefix 'btrfs_', little endian
79 * on memory struct has prefix 'fsw_btrfs_'
80 */
81 typedef uint8_t btrfs_checksum_t[0x20];
82 typedef uint32_t btrfs_uuid_t[4];
83
84 struct btrfs_device
85 {
86 uint64_t device_id;
87 uint64_t size;
88 uint8_t dummy[0x62 - 0x10];
89 } __attribute__ ((__packed__));
90
91 struct btrfs_superblock
92 {
93 btrfs_checksum_t checksum;
94 btrfs_uuid_t uuid;
95 uint8_t dummy[0x10];
96 uint8_t signature[sizeof (GRUB_BTRFS_SIGNATURE) - 1];
97 uint64_t generation;
98 uint64_t root_tree;
99 uint64_t chunk_tree;
100 uint8_t dummy2[0x10];
101 uint64_t total_bytes;
102 uint64_t bytes_used;
103 uint64_t root_dir_objectid;
104 #define BTRFS_MAX_NUM_DEVICES 0x10000
105 uint64_t num_devices;
106 uint32_t sectorsize;
107 uint32_t nodesize;
108
109 uint8_t dummy3[0x31];
110 struct btrfs_device this_device;
111 char label[0x100];
112 uint8_t dummy4[0x100];
113 uint8_t bootstrap_mapping[0x800];
114 } __attribute__ ((__packed__));
115
116 struct btrfs_header
117 {
118 btrfs_checksum_t checksum;
119 btrfs_uuid_t uuid;
120 uint8_t dummy[0x30];
121 uint32_t nitems;
122 uint8_t level;
123 } __attribute__ ((__packed__));
124
125 struct fsw_btrfs_device_desc
126 {
127 struct fsw_volume * dev;
128 uint64_t id;
129 };
130
131 struct fsw_btrfs_volume
132 {
133 struct fsw_volume g; //!< Generic volume structure
134
135 /* superblock shadows */
136 uint8_t bootstrap_mapping[0x800];
137 btrfs_uuid_t uuid;
138 uint64_t total_bytes;
139 uint64_t bytes_used;
140 uint64_t chunk_tree;
141 uint64_t root_tree;
142 uint64_t top_tree; /* top volume tree */
143 unsigned num_devices;
144 unsigned sectorshift;
145 unsigned sectorsize;
146 int is_master;
147
148 struct fsw_btrfs_device_desc *devices_attached;
149 unsigned n_devices_attached;
150 unsigned n_devices_allocated;
151
152 /* Cached extent data. */
153 uint64_t extstart;
154 uint64_t extend;
155 uint64_t extino;
156 uint64_t exttree;
157 uint32_t extsize;
158 struct btrfs_extent_data *extent;
159 };
160
161 enum
162 {
163 GRUB_BTRFS_ITEM_TYPE_INODE_ITEM = 0x01,
164 GRUB_BTRFS_ITEM_TYPE_INODE_REF = 0x0c,
165 GRUB_BTRFS_ITEM_TYPE_DIR_ITEM = 0x54,
166 GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM = 0x6c,
167 GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM = 0x84,
168 GRUB_BTRFS_ITEM_TYPE_DEVICE = 0xd8,
169 GRUB_BTRFS_ITEM_TYPE_CHUNK = 0xe4
170 };
171
172 struct btrfs_key
173 {
174 uint64_t object_id;
175 uint8_t type;
176 uint64_t offset;
177 } __attribute__ ((__packed__));
178
179 struct btrfs_chunk_item
180 {
181 uint64_t size;
182 uint64_t dummy;
183 uint64_t stripe_length;
184 uint64_t type;
185 #define GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE 0x07
186 #define GRUB_BTRFS_CHUNK_TYPE_SINGLE 0x00
187 #define GRUB_BTRFS_CHUNK_TYPE_RAID0 0x08
188 #define GRUB_BTRFS_CHUNK_TYPE_RAID1 0x10
189 #define GRUB_BTRFS_CHUNK_TYPE_DUPLICATED 0x20
190 #define GRUB_BTRFS_CHUNK_TYPE_RAID10 0x40
191 uint8_t dummy2[0xc];
192 uint16_t nstripes;
193 uint16_t nsubstripes;
194 } __attribute__ ((__packed__));
195
196 struct btrfs_chunk_stripe
197 {
198 uint64_t device_id;
199 uint64_t offset;
200 btrfs_uuid_t device_uuid;
201 } __attribute__ ((__packed__));
202
203 struct btrfs_leaf_node
204 {
205 struct btrfs_key key;
206 uint32_t offset;
207 uint32_t size;
208 } __attribute__ ((__packed__));
209
210 struct btrfs_internal_node
211 {
212 struct btrfs_key key;
213 uint64_t addr;
214 uint64_t dummy;
215 } __attribute__ ((__packed__));
216
217 struct btrfs_dir_item
218 {
219 struct btrfs_key key;
220 uint64_t transid;
221 uint16_t m;
222 uint16_t n;
223 #define GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR 1
224 #define GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY 2
225 #define GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK 7
226 uint8_t type;
227 char name[0];
228 } __attribute__ ((__packed__));
229
230 struct fsw_btrfs_leaf_descriptor
231 {
232 unsigned depth;
233 unsigned allocated;
234 struct
235 {
236 uint64_t addr;
237 unsigned iter;
238 unsigned maxiter;
239 int leaf;
240 } *data;
241 };
242
243 struct btrfs_root_item
244 {
245 uint8_t dummy[0xb0];
246 uint64_t tree;
247 uint64_t inode;
248 } __attribute__ ((__packed__));
249
250 struct btrfs_time
251 {
252 int64_t sec;
253 uint32_t nanosec;
254 } __attribute__ ((__packed__));
255
256 struct btrfs_inode
257 {
258 uint64_t gen_id;
259 uint64_t trans_id;
260 uint64_t size;
261 uint64_t nbytes;
262 uint64_t block_group;
263 uint32_t nlink;
264 uint32_t uid;
265 uint32_t gid;
266 uint32_t mode;
267 uint64_t rdev;
268 uint64_t flags;
269
270 uint64_t seq;
271
272 uint64_t reserved[4];
273 struct btrfs_time atime;
274 struct btrfs_time ctime;
275 struct btrfs_time mtime;
276 struct btrfs_time otime;
277 } __attribute__ ((__packed__));
278
279 struct fsw_btrfs_dnode {
280 struct fsw_dnode g; //!< Generic dnode structure
281 struct btrfs_inode *raw; //!< Full raw inode structure
282 };
283
284 struct btrfs_extent_data
285 {
286 uint64_t dummy;
287 uint64_t size;
288 uint8_t compression;
289 uint8_t encryption;
290 uint16_t encoding;
291 uint8_t type;
292 union
293 {
294 char inl[0];
295 struct
296 {
297 uint64_t laddr;
298 uint64_t compressed_size;
299 uint64_t offset;
300 uint64_t filled;
301 };
302 };
303 } __attribute__ ((__packed__));
304
305 #define GRUB_BTRFS_EXTENT_INLINE 0
306 #define GRUB_BTRFS_EXTENT_REGULAR 1
307
308 #define GRUB_BTRFS_COMPRESSION_NONE 0
309 #define GRUB_BTRFS_COMPRESSION_ZLIB 1
310 #define GRUB_BTRFS_COMPRESSION_LZO 2
311
312 #define GRUB_BTRFS_OBJECT_ID_CHUNK 0x100
313
314 struct fsw_btrfs_uuid_list {
315 struct fsw_btrfs_volume *master;
316 struct fsw_btrfs_uuid_list *next;
317 };
318
319 static int uuid_eq(btrfs_uuid_t u1, btrfs_uuid_t u2) {
320 return u1[0]==u2[0] && u1[1]==u2[1] && u1[2]==u2[2] && u1[3]==u2[3];
321 }
322
323 static struct fsw_btrfs_uuid_list *master_uuid_list = NULL;
324
325 static int master_uuid_add(struct fsw_btrfs_volume *vol, struct fsw_btrfs_volume **master_out) {
326 struct fsw_btrfs_uuid_list *l;
327
328 for (l = master_uuid_list; l; l=l->next)
329 if(uuid_eq(l->master->uuid, vol->uuid)) {
330 if(master_out)
331 *master_out = l->master;
332 return 0;
333 }
334
335 l = AllocatePool(sizeof(struct fsw_btrfs_uuid_list));
336 l->master = vol;
337 l->next = master_uuid_list;
338 master_uuid_list = l;
339 return 1;
340 }
341
342 static void master_uuid_remove(struct fsw_btrfs_volume *vol) {
343 struct fsw_btrfs_uuid_list **lp;
344
345 for (lp = &master_uuid_list; *lp; lp=&(*lp)->next)
346 if((*lp)->master == vol) {
347 struct fsw_btrfs_uuid_list *n = *lp;
348 *lp = n->next;
349 FreePool(n);
350 break;
351 }
352 }
353
354 static fsw_status_t btrfs_set_superblock_info(struct fsw_btrfs_volume *vol, struct btrfs_superblock *sb)
355 {
356 int i;
357 vol->uuid[0] = sb->uuid[0];
358 vol->uuid[1] = sb->uuid[1];
359 vol->uuid[2] = sb->uuid[2];
360 vol->uuid[3] = sb->uuid[3];
361 vol->chunk_tree = sb->chunk_tree;
362 vol->root_tree = sb->root_tree;
363 vol->total_bytes = fsw_u64_le_swap(sb->total_bytes);
364 vol->bytes_used = fsw_u64_le_swap(sb->bytes_used);
365
366 vol->sectorshift = 0;
367 vol->sectorsize = fsw_u32_le_swap(sb->sectorsize);
368 for(i=9; i<20; i++) {
369 if((1UL<<i) == vol->sectorsize) {
370 vol->sectorshift = i;
371 break;
372 }
373 }
374 if(fsw_u64_le_swap(sb->num_devices) > BTRFS_MAX_NUM_DEVICES)
375 vol->num_devices = BTRFS_MAX_NUM_DEVICES;
376 else
377 vol->num_devices = fsw_u64_le_swap(sb->num_devices);
378 fsw_memcpy(vol->bootstrap_mapping, sb->bootstrap_mapping, sizeof(vol->bootstrap_mapping));
379 return FSW_SUCCESS;
380 }
381
382 static uint64_t superblock_pos[4] = {
383 64 / 4,
384 64 * 1024 / 4,
385 256 * 1048576 / 4,
386 1048576ULL * 1048576ULL / 4
387 };
388
389 static fsw_status_t fsw_btrfs_read_logical(struct fsw_btrfs_volume *vol,
390 uint64_t addr, void *buf, fsw_size_t size, int rdepth, int cache_level);
391
392 static fsw_status_t btrfs_read_superblock (struct fsw_volume *vol, struct btrfs_superblock *sb_out)
393 {
394 unsigned i;
395 uint64_t total_blocks = 1024;
396 fsw_status_t err = FSW_SUCCESS;
397
398 fsw_set_blocksize(vol, BTRFS_DEFAULT_BLOCK_SIZE, BTRFS_DEFAULT_BLOCK_SIZE);
399 for (i = 0; i < 4; i++)
400 {
401 uint8_t *buffer;
402 struct btrfs_superblock *sb;
403
404 /* Don't try additional superblocks beyond device size. */
405 if (total_blocks <= superblock_pos[i])
406 break;
407
408 err = fsw_block_get(vol, superblock_pos[i], 0, (void **)&buffer);
409 if (err) {
410 fsw_block_release(vol, superblock_pos[i], buffer);
411 break;
412 }
413
414 sb = (struct btrfs_superblock *)buffer;
415 if (!fsw_memeq (sb->signature, GRUB_BTRFS_SIGNATURE,
416 sizeof (GRUB_BTRFS_SIGNATURE) - 1))
417 {
418 fsw_block_release(vol, superblock_pos[i], buffer);
419 break;
420 }
421 if (i == 0 || fsw_u64_le_swap (sb->generation) > fsw_u64_le_swap (sb_out->generation))
422 {
423 fsw_memcpy (sb_out, sb, sizeof (*sb));
424 total_blocks = fsw_u64_le_swap (sb->this_device.size) >> 12;
425 }
426 fsw_block_release(vol, superblock_pos[i], buffer);
427 }
428
429 if ((err == FSW_UNSUPPORTED || !err) && i == 0)
430 return FSW_UNSUPPORTED;
431
432 if (err == FSW_UNSUPPORTED)
433 err = FSW_SUCCESS;
434
435 if(err == 0)
436 DPRINT(L"btrfs: UUID: %08x-%08x-%08x-%08x device id: %d\n",
437 sb_out->uuid[0], sb_out->uuid[1], sb_out->uuid[2], sb_out->uuid[3],
438 sb_out->this_device.device_id);
439 return err;
440 }
441
442 static int key_cmp (const struct btrfs_key *a, const struct btrfs_key *b)
443 {
444 if (fsw_u64_le_swap (a->object_id) < fsw_u64_le_swap (b->object_id))
445 return -1;
446 if (fsw_u64_le_swap (a->object_id) > fsw_u64_le_swap (b->object_id))
447 return +1;
448
449 if (a->type < b->type)
450 return -1;
451 if (a->type > b->type)
452 return +1;
453
454 if (fsw_u64_le_swap (a->offset) < fsw_u64_le_swap (b->offset))
455 return -1;
456 if (fsw_u64_le_swap (a->offset) > fsw_u64_le_swap (b->offset))
457 return +1;
458 return 0;
459 }
460
461 static void free_iterator (struct fsw_btrfs_leaf_descriptor *desc)
462 {
463 fsw_free (desc->data);
464 }
465
466 static fsw_status_t save_ref (struct fsw_btrfs_leaf_descriptor *desc,
467 uint64_t addr, unsigned i, unsigned m, int l)
468 {
469 desc->depth++;
470 if (desc->allocated < desc->depth)
471 {
472 void *newdata;
473 int oldsize = sizeof (desc->data[0]) * desc->allocated;
474 desc->allocated *= 2;
475 newdata = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
476 if (!newdata)
477 return FSW_OUT_OF_MEMORY;
478 fsw_memcpy(newdata, desc->data, oldsize);
479 FreePool(desc->data);
480 desc->data = newdata;
481 }
482 desc->data[desc->depth - 1].addr = addr;
483 desc->data[desc->depth - 1].iter = i;
484 desc->data[desc->depth - 1].maxiter = m;
485 desc->data[desc->depth - 1].leaf = l;
486 return FSW_SUCCESS;
487 }
488
489 static int next (struct fsw_btrfs_volume *vol,
490 struct fsw_btrfs_leaf_descriptor *desc,
491 uint64_t * outaddr, fsw_size_t * outsize,
492 struct btrfs_key *key_out)
493 {
494 fsw_status_t err;
495 struct btrfs_leaf_node leaf;
496
497 for (; desc->depth > 0; desc->depth--)
498 {
499 desc->data[desc->depth - 1].iter++;
500 if (desc->data[desc->depth - 1].iter
501 < desc->data[desc->depth - 1].maxiter)
502 break;
503 }
504 if (desc->depth == 0)
505 return 0;
506 while (!desc->data[desc->depth - 1].leaf)
507 {
508 struct btrfs_internal_node node;
509 struct btrfs_header head;
510 fsw_memzero(&node, sizeof(node));
511
512 err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
513 * sizeof (node)
514 + sizeof (struct btrfs_header)
515 + desc->data[desc->depth - 1].addr,
516 &node, sizeof (node), 0, 1);
517 if (err)
518 return -err;
519
520 err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (node.addr),
521 &head, sizeof (head), 0, 1);
522 if (err)
523 return -err;
524
525 save_ref (desc, fsw_u64_le_swap (node.addr), 0,
526 fsw_u32_le_swap (head.nitems), !head.level);
527 }
528 err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
529 * sizeof (leaf)
530 + sizeof (struct btrfs_header)
531 + desc->data[desc->depth - 1].addr, &leaf,
532 sizeof (leaf), 0, 1);
533 if (err)
534 return -err;
535 *outsize = fsw_u32_le_swap (leaf.size);
536 *outaddr = desc->data[desc->depth - 1].addr + sizeof (struct btrfs_header)
537 + fsw_u32_le_swap (leaf.offset);
538 *key_out = leaf.key;
539 return 1;
540 }
541
542 #define depth2cache(x) ((x) >= 4 ? 1 : 5-(x))
543 static fsw_status_t lower_bound (struct fsw_btrfs_volume *vol,
544 const struct btrfs_key *key_in,
545 struct btrfs_key *key_out,
546 uint64_t root,
547 uint64_t *outaddr, fsw_size_t *outsize,
548 struct fsw_btrfs_leaf_descriptor *desc,
549 int rdepth)
550 {
551 uint64_t addr = fsw_u64_le_swap (root);
552 int depth = -1;
553
554 if (desc)
555 {
556 desc->allocated = 16;
557 desc->depth = 0;
558 desc->data = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
559 if (!desc->data)
560 return FSW_OUT_OF_MEMORY;
561 }
562
563 /* > 2 would work as well but be robust and allow a bit more just in case.
564 */
565 if (rdepth > 10)
566 return FSW_VOLUME_CORRUPTED;
567
568 DPRINT (L"btrfs: retrieving %lx %x %lx\n",
569 key_in->object_id, key_in->type, key_in->offset);
570
571 while (1)
572 {
573 fsw_status_t err;
574 struct btrfs_header head;
575 fsw_memzero(&head, sizeof(head));
576
577 reiter:
578 depth++;
579 /* FIXME: preread few nodes into buffer. */
580 err = fsw_btrfs_read_logical (vol, addr, &head, sizeof (head),
581 rdepth + 1, depth2cache(rdepth));
582 if (err)
583 return err;
584 addr += sizeof (head);
585 if (head.level)
586 {
587 unsigned i;
588 struct btrfs_internal_node node, node_last;
589 int have_last = 0;
590 fsw_memzero (&node_last, sizeof (node_last));
591 for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
592 {
593 err = fsw_btrfs_read_logical (vol, addr + i * sizeof (node),
594 &node, sizeof (node), rdepth + 1, depth2cache(rdepth));
595 if (err)
596 return err;
597
598 DPRINT (L"btrfs: internal node (depth %d) %lx %x %lx\n", depth,
599 node.key.object_id, node.key.type,
600 node.key.offset);
601
602 if (key_cmp (&node.key, key_in) == 0)
603 {
604 err = FSW_SUCCESS;
605 if (desc)
606 err = save_ref (desc, addr - sizeof (head), i,
607 fsw_u32_le_swap (head.nitems), 0);
608 if (err)
609 return err;
610 addr = fsw_u64_le_swap (node.addr);
611 goto reiter;
612 }
613 if (key_cmp (&node.key, key_in) > 0)
614 break;
615 node_last = node;
616 have_last = 1;
617 }
618 if (have_last)
619 {
620 err = FSW_SUCCESS;
621 if (desc)
622 err = save_ref (desc, addr - sizeof (head), i - 1,
623 fsw_u32_le_swap (head.nitems), 0);
624 if (err)
625 return err;
626 addr = fsw_u64_le_swap (node_last.addr);
627 goto reiter;
628 }
629 *outsize = 0;
630 *outaddr = 0;
631 fsw_memzero (key_out, sizeof (*key_out));
632 if (desc)
633 return save_ref (desc, addr - sizeof (head), -1,
634 fsw_u32_le_swap (head.nitems), 0);
635 return FSW_SUCCESS;
636 }
637 {
638 unsigned i;
639 struct btrfs_leaf_node leaf, leaf_last;
640 int have_last = 0;
641 for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
642 {
643 err = fsw_btrfs_read_logical (vol, addr + i * sizeof (leaf),
644 &leaf, sizeof (leaf), rdepth + 1, depth2cache(rdepth));
645 if (err)
646 return err;
647
648 DPRINT (L"btrfs: leaf (depth %d) %lx %x %lx\n", depth,
649 leaf.key.object_id, leaf.key.type, leaf.key.offset);
650
651 if (key_cmp (&leaf.key, key_in) == 0)
652 {
653 fsw_memcpy (key_out, &leaf.key, sizeof (*key_out));
654 *outsize = fsw_u32_le_swap (leaf.size);
655 *outaddr = addr + fsw_u32_le_swap (leaf.offset);
656 if (desc)
657 return save_ref (desc, addr - sizeof (head), i,
658 fsw_u32_le_swap (head.nitems), 1);
659 return FSW_SUCCESS;
660 }
661
662 if (key_cmp (&leaf.key, key_in) > 0)
663 break;
664
665 have_last = 1;
666 leaf_last = leaf;
667 }
668
669 if (have_last)
670 {
671 fsw_memcpy (key_out, &leaf_last.key, sizeof (*key_out));
672 *outsize = fsw_u32_le_swap (leaf_last.size);
673 *outaddr = addr + fsw_u32_le_swap (leaf_last.offset);
674 if (desc)
675 return save_ref (desc, addr - sizeof (head), i - 1,
676 fsw_u32_le_swap (head.nitems), 1);
677 return FSW_SUCCESS;
678 }
679 *outsize = 0;
680 *outaddr = 0;
681 fsw_memzero (key_out, sizeof (*key_out));
682 if (desc)
683 return save_ref (desc, addr - sizeof (head), -1,
684 fsw_u32_le_swap (head.nitems), 1);
685 return FSW_SUCCESS;
686 }
687 }
688 }
689
690 static int btrfs_add_multi_device(struct fsw_btrfs_volume *master, struct fsw_volume *slave, struct btrfs_superblock *sb)
691 {
692 int i;
693 for( i = 0; i < master->n_devices_attached; i++)
694 if(sb->this_device.device_id == master->devices_attached[i].id)
695 return FSW_UNSUPPORTED;
696
697 slave = clone_dummy_volume(slave);
698 if(slave == NULL)
699 return FSW_OUT_OF_MEMORY;
700 fsw_set_blocksize(slave, master->sectorsize, master->sectorsize);
701 slave->bcache_size = BTRFS_INITIAL_BCACHE_SIZE;
702
703 master->devices_attached[i].id = sb->this_device.device_id;
704 master->devices_attached[i].dev = slave;
705 master->n_devices_attached++;
706
707 DPRINT(L"Found slave %d\n", sb->this_device.device_id);
708 return FSW_SUCCESS;
709 }
710
711 static int scan_disks_hook(struct fsw_volume *volg, struct fsw_volume *slave) {
712 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
713 struct btrfs_superblock sb;
714 fsw_status_t err;
715
716 if(vol->n_devices_attached >= vol->n_devices_allocated)
717 return FSW_UNSUPPORTED;
718
719 err = btrfs_read_superblock(slave, &sb);
720 if(err)
721 return FSW_UNSUPPORTED;
722
723 if(!uuid_eq(vol->uuid, sb.uuid))
724 return FSW_UNSUPPORTED;
725
726 return btrfs_add_multi_device(vol, slave, &sb);
727 }
728
729 static struct fsw_volume *
730 find_device (struct fsw_btrfs_volume *vol, uint64_t id, int do_rescan) {
731 int i;
732
733 do {
734 for (i = 0; i < vol->n_devices_attached; i++)
735 if (id == vol->devices_attached[i].id)
736 return vol->devices_attached[i].dev;
737 } while(vol->n_devices_attached < vol->n_devices_allocated &&
738 do_rescan-- > 0 &&
739 scan_disks(scan_disks_hook, &vol->g) > 0);
740 DPRINT(L"sub device %d not found\n", id);
741 return NULL;
742 }
743
744 static fsw_status_t fsw_btrfs_read_logical (struct fsw_btrfs_volume *vol, uint64_t addr,
745 void *buf, fsw_size_t size, int rdepth, int cache_level)
746 {
747 while (size > 0)
748 {
749 uint8_t *ptr;
750 struct btrfs_key *key;
751 struct btrfs_chunk_item *chunk;
752 uint64_t csize;
753 fsw_status_t err = 0;
754 struct btrfs_key key_out;
755 int challoc = 0;
756 struct btrfs_key key_in;
757 fsw_size_t chsize;
758 uint64_t chaddr;
759
760 for (ptr = vol->bootstrap_mapping; ptr < vol->bootstrap_mapping + sizeof (vol->bootstrap_mapping) - sizeof (struct btrfs_key);)
761 {
762 key = (struct btrfs_key *) ptr;
763 if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK)
764 break;
765 chunk = (struct btrfs_chunk_item *) (key + 1);
766 if (fsw_u64_le_swap (key->offset) <= addr
767 && addr < fsw_u64_le_swap (key->offset)
768 + fsw_u64_le_swap (chunk->size))
769 {
770 goto chunk_found;
771 }
772 ptr += sizeof (*key) + sizeof (*chunk)
773 + sizeof (struct btrfs_chunk_stripe)
774 * fsw_u16_le_swap (chunk->nstripes);
775 }
776
777 key_in.object_id = fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK);
778 key_in.type = GRUB_BTRFS_ITEM_TYPE_CHUNK;
779 key_in.offset = fsw_u64_le_swap (addr);
780 err = lower_bound (vol, &key_in, &key_out, vol->chunk_tree, &chaddr, &chsize, NULL, rdepth);
781 if (err)
782 return err;
783 key = &key_out;
784 if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK
785 || !(fsw_u64_le_swap (key->offset) <= addr))
786 {
787 return FSW_VOLUME_CORRUPTED;
788 }
789 // "couldn't find the chunk descriptor");
790
791 chunk = AllocatePool (chsize);
792 if (!chunk) {
793 return FSW_OUT_OF_MEMORY;
794 }
795
796 challoc = 1;
797 err = fsw_btrfs_read_logical (vol, chaddr, chunk, chsize, rdepth, cache_level < 5 ? cache_level+1 : 5);
798 if (err)
799 {
800 if(chunk)
801 FreePool (chunk);
802 return err;
803 }
804
805 chunk_found:
806 {
807 #ifdef __MAKEWITH_GNUEFI
808 #define UINTREM UINTN
809 #else
810 #undef DivU64x32
811 #define DivU64x32 DivU64x32Remainder
812 #define UINTREM UINT32
813 #endif
814 UINTREM stripen;
815 UINTREM stripe_offset;
816 uint64_t off = addr - fsw_u64_le_swap (key->offset);
817 unsigned redundancy = 1;
818 unsigned i, j;
819
820 if (fsw_u64_le_swap (chunk->size) <= off)
821 {
822 return FSW_VOLUME_CORRUPTED;
823 //"couldn't find the chunk descriptor");
824 }
825
826 DPRINT(L"btrfs chunk 0x%lx+0xlx %d stripes (%d substripes) of %lx\n",
827 fsw_u64_le_swap (key->offset),
828 fsw_u64_le_swap (chunk->size),
829 fsw_u16_le_swap (chunk->nstripes),
830 fsw_u16_le_swap (chunk->nsubstripes),
831 fsw_u64_le_swap (chunk->stripe_length));
832
833 /* gnu-efi has no DivU64x64Remainder, limited to DivU64x32 */
834 switch (fsw_u64_le_swap (chunk->type)
835 & ~GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE)
836 {
837 case GRUB_BTRFS_CHUNK_TYPE_SINGLE:
838 {
839 uint64_t stripe_length;
840
841 stripe_length = DivU64x32 (fsw_u64_le_swap (chunk->size),
842 fsw_u16_le_swap (chunk->nstripes), NULL);
843
844 if(stripe_length > 1UL<<30)
845 return FSW_VOLUME_CORRUPTED;
846
847 stripen = DivU64x32 (off, (uint32_t)stripe_length, &stripe_offset);
848 csize = (stripen + 1) * stripe_length - off;
849 DPRINT(L"read_logical %d chunk_found single csize=%d\n", __LINE__, csize);
850 break;
851 }
852 case GRUB_BTRFS_CHUNK_TYPE_DUPLICATED:
853 case GRUB_BTRFS_CHUNK_TYPE_RAID1:
854 {
855 stripen = 0;
856 stripe_offset = off;
857 csize = fsw_u64_le_swap (chunk->size) - off;
858 redundancy = 2;
859 DPRINT(L"read_logical %d chunk_found dup/raid1 off=%lx csize=%d\n", __LINE__, stripe_offset, csize);
860 break;
861 }
862 case GRUB_BTRFS_CHUNK_TYPE_RAID0:
863 {
864 uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
865 uint64_t middle, high;
866 UINTREM low;
867
868 if(stripe_length > 1UL<<30)
869 return FSW_VOLUME_CORRUPTED;
870
871 middle = DivU64x32 (off, (uint32_t)stripe_length, &low);
872
873 high = DivU64x32 (middle, fsw_u16_le_swap (chunk->nstripes), &stripen);
874 stripe_offset =
875 low + fsw_u64_le_swap (chunk->stripe_length) * high;
876 csize = fsw_u64_le_swap (chunk->stripe_length) - low;
877 DPRINT(L"read_logical %d chunk_found raid0 csize=%d\n", __LINE__, csize);
878 break;
879 }
880 case GRUB_BTRFS_CHUNK_TYPE_RAID10:
881 {
882 uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
883 uint64_t middle, high;
884 UINTREM low;
885
886 if(stripe_length > 1UL<<30)
887 return FSW_VOLUME_CORRUPTED;
888
889 middle = DivU64x32 (off, stripe_length, &low);
890
891 high = DivU64x32 (middle,
892 fsw_u16_le_swap (chunk->nstripes)
893 / fsw_u16_le_swap (chunk->nsubstripes),
894 &stripen);
895 stripen *= fsw_u16_le_swap (chunk->nsubstripes);
896 redundancy = fsw_u16_le_swap (chunk->nsubstripes);
897 stripe_offset = low + fsw_u64_le_swap (chunk->stripe_length)
898 * high;
899 csize = fsw_u64_le_swap (chunk->stripe_length) - low;
900 DPRINT(L"read_logical %d chunk_found raid01 csize=%d\n", __LINE__, csize);
901 break;
902 }
903 default:
904 DPRINT (L"btrfs: unsupported RAID\n");
905 return FSW_UNSUPPORTED;
906 }
907 if (csize == 0)
908 //"couldn't find the chunk descriptor");
909 return FSW_VOLUME_CORRUPTED;
910
911 if (csize > (uint64_t) size)
912 csize = size;
913
914 for (j = 0; j < 2; j++)
915 {
916 for (i = 0; i < redundancy; i++)
917 {
918 struct btrfs_chunk_stripe *stripe;
919 uint64_t paddr;
920 struct fsw_volume *dev;
921
922 stripe = (struct btrfs_chunk_stripe *) (chunk + 1);
923 /* Right now the redundancy handling is easy.
924 With RAID5-like it will be more difficult. */
925 stripe += stripen + i;
926
927 paddr = fsw_u64_le_swap (stripe->offset) + stripe_offset;
928
929 DPRINT (L"btrfs: chunk 0x%lx+0x%lx (%d stripes (%d substripes) of %lx) stripe %lx maps to 0x%lx\n",
930 fsw_u64_le_swap (key->offset),
931 fsw_u64_le_swap (chunk->size),
932 fsw_u16_le_swap (chunk->nstripes),
933 fsw_u16_le_swap (chunk->nsubstripes),
934 fsw_u64_le_swap (chunk->stripe_length),
935 stripen, stripe->offset);
936 DPRINT (L"btrfs: reading paddr 0x%lx for laddr 0x%lx\n", paddr, addr);
937
938 dev = find_device (vol, stripe->device_id, j);
939 if (!dev)
940 {
941 err = FSW_VOLUME_CORRUPTED;
942 continue;
943 }
944
945 uint32_t off = paddr & (vol->sectorsize - 1);
946 paddr >>= vol->sectorshift;
947 uint64_t n = 0;
948 while(n < csize) {
949 char *buffer;
950 err = fsw_block_get(dev, paddr, cache_level, (void **)&buffer);
951 if(err)
952 break;
953 int s = vol->sectorsize - off;
954 if(s > csize - n)
955 s = csize - n;
956 fsw_memcpy(buf+n, buffer+off, s);
957 fsw_block_release(dev, paddr, (void *)buffer);
958
959 n += s;
960 off = 0;
961 paddr++;
962 }
963 DPRINT (L"read logical: err %d csize %d got %d\n",
964 err, csize, n);
965 if(n>=csize)
966 break;
967 }
968 if (i != redundancy)
969 break;
970 }
971 if (err)
972 return err;
973 }
974 size -= csize;
975 buf = (uint8_t *) buf + csize;
976 addr += csize;
977 if (challoc && chunk)
978 FreePool (chunk);
979 }
980 return FSW_SUCCESS;
981 }
982
983 static fsw_status_t fsw_btrfs_get_default_root(struct fsw_btrfs_volume *vol, uint64_t root_dir_objectid);
984 static fsw_status_t fsw_btrfs_volume_mount(struct fsw_volume *volg) {
985 struct btrfs_superblock sblock;
986 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
987 struct fsw_btrfs_volume *master_out = NULL;
988 struct fsw_string s;
989 fsw_status_t err;
990 int i;
991
992 init_crc32c_table();
993 fsw_memzero((char *)vol+sizeof(*volg), sizeof(*vol)-sizeof(*volg));
994
995 err = btrfs_read_superblock (volg, &sblock);
996 if (err)
997 return err;
998
999 btrfs_set_superblock_info(vol, &sblock);
1000
1001 if(vol->sectorshift == 0)
1002 return FSW_UNSUPPORTED;
1003
1004 if(vol->num_devices >= BTRFS_MAX_NUM_DEVICES)
1005 return FSW_UNSUPPORTED;
1006
1007 vol->is_master = master_uuid_add(vol, &master_out);
1008 /* already mounted via other device */
1009 if(vol->is_master == 0) {
1010 #define FAKE_LABEL "btrfs.multi.device"
1011 s.type = FSW_STRING_TYPE_UTF8;
1012 s.size = s.len = sizeof(FAKE_LABEL)-1;
1013 s.data = FAKE_LABEL;
1014 err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
1015 if (err)
1016 return err;
1017 btrfs_add_multi_device(master_out, volg, &sblock);
1018 /* create fake root */
1019 return fsw_dnode_create_root_with_tree(volg, 0, 0, &volg->root);
1020 }
1021
1022 fsw_set_blocksize(volg, vol->sectorsize, vol->sectorsize);
1023 vol->g.bcache_size = BTRFS_INITIAL_BCACHE_SIZE;
1024 vol->n_devices_allocated = vol->num_devices;
1025 vol->devices_attached = AllocatePool (sizeof (vol->devices_attached[0])
1026 * vol->n_devices_allocated);
1027 if (!vol->devices_attached)
1028 return FSW_OUT_OF_MEMORY;
1029
1030 vol->n_devices_attached = 1;
1031 vol->devices_attached[0].dev = volg;
1032 vol->devices_attached[0].id = sblock.this_device.device_id;
1033
1034 for (i = 0; i < 0x100; i++)
1035 if (sblock.label[i] == 0)
1036 break;
1037
1038 s.type = FSW_STRING_TYPE_UTF8;
1039 s.size = s.len = i;
1040 s.data = sblock.label;
1041 err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
1042 if (err) {
1043 FreePool (vol->devices_attached);
1044 vol->devices_attached = NULL;
1045 return err;
1046 }
1047
1048 err = fsw_btrfs_get_default_root(vol, sblock.root_dir_objectid);
1049 if (err) {
1050 DPRINT(L"root not found\n");
1051 FreePool (vol->devices_attached);
1052 vol->devices_attached = NULL;
1053 return err;
1054 }
1055
1056 return FSW_SUCCESS;
1057 }
1058
1059 static void fsw_btrfs_volume_free(struct fsw_volume *volg)
1060 {
1061 unsigned i;
1062 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1063
1064 if (vol==NULL)
1065 return;
1066
1067 if (vol->is_master)
1068 master_uuid_remove(vol);
1069
1070 /* The device 0 is closed one layer upper. */
1071 for (i = 1; i < vol->n_devices_attached; i++)
1072 fsw_unmount (vol->devices_attached[i].dev);
1073 if(vol->devices_attached)
1074 FreePool (vol->devices_attached);
1075 if(vol->extent)
1076 FreePool (vol->extent);
1077 }
1078
1079 static fsw_status_t fsw_btrfs_volume_stat(struct fsw_volume *volg, struct fsw_volume_stat *sb)
1080 {
1081 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1082 sb->total_bytes = vol->total_bytes;
1083 sb->free_bytes = vol->bytes_used;
1084 return FSW_SUCCESS;
1085 }
1086
1087 static fsw_status_t fsw_btrfs_read_inode (struct fsw_btrfs_volume *vol,
1088 struct btrfs_inode *inode, uint64_t num,
1089 uint64_t tree)
1090 {
1091 struct btrfs_key key_in, key_out;
1092 uint64_t elemaddr;
1093 fsw_size_t elemsize;
1094 fsw_status_t err;
1095
1096 key_in.object_id = num;
1097 key_in.type = GRUB_BTRFS_ITEM_TYPE_INODE_ITEM;
1098 key_in.offset = 0;
1099
1100 err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
1101 if (err)
1102 return err;
1103 if (num != key_out.object_id
1104 || key_out.type != GRUB_BTRFS_ITEM_TYPE_INODE_ITEM)
1105 return FSW_NOT_FOUND;
1106
1107 return fsw_btrfs_read_logical (vol, elemaddr, inode, sizeof (*inode), 0, 2);
1108 }
1109
1110 static fsw_status_t fsw_btrfs_dnode_fill(struct fsw_volume *volg, struct fsw_dnode *dnog)
1111 {
1112 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1113 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1114 fsw_status_t err;
1115 uint32_t mode;
1116
1117 /* slave device got empty root */
1118 if (!vol->is_master) {
1119 dno->g.size = 0;
1120 dno->g.type = FSW_DNODE_TYPE_DIR;
1121 return FSW_SUCCESS;
1122 }
1123
1124 if (dno->raw)
1125 return FSW_SUCCESS;
1126
1127 dno->raw = AllocatePool(sizeof(struct btrfs_inode));
1128 if(dno->raw == NULL)
1129 return FSW_OUT_OF_MEMORY;
1130
1131 err = fsw_btrfs_read_inode(vol, dno->raw, dno->g.dnode_id, dno->g.tree_id);
1132 if (err) {
1133 FreePool(dno->raw);
1134 dno->raw = NULL;
1135 return err;
1136 }
1137
1138 // get info from the inode
1139 dno->g.size = fsw_u64_le_swap(dno->raw->size);
1140 // TODO: check docs for 64-bit sized files
1141 mode = fsw_u32_le_swap(dno->raw->mode);
1142 if (S_ISREG(mode))
1143 dno->g.type = FSW_DNODE_TYPE_FILE;
1144 else if (S_ISDIR(mode))
1145 dno->g.type = FSW_DNODE_TYPE_DIR;
1146 else if (S_ISLNK(mode))
1147 dno->g.type = FSW_DNODE_TYPE_SYMLINK;
1148 else
1149 dno->g.type = FSW_DNODE_TYPE_SPECIAL;
1150
1151 return FSW_SUCCESS;
1152 }
1153
1154 static void fsw_btrfs_dnode_free(struct fsw_volume *volg, struct fsw_dnode *dnog)
1155 {
1156 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1157 if (dno->raw)
1158 FreePool(dno->raw);
1159 }
1160
1161 static fsw_status_t fsw_btrfs_dnode_stat(struct fsw_volume *volg, struct fsw_dnode *dnog, struct fsw_dnode_stat *sb)
1162 {
1163 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1164
1165 /* slave device got empty root */
1166 if(dno->raw == NULL) {
1167 sb->used_bytes = 0;
1168 fsw_store_time_posix(sb, FSW_DNODE_STAT_CTIME, 0);
1169 fsw_store_time_posix(sb, FSW_DNODE_STAT_ATIME, 0);
1170 fsw_store_time_posix(sb, FSW_DNODE_STAT_MTIME, 0);
1171 return FSW_SUCCESS;
1172 }
1173 sb->used_bytes = fsw_u64_le_swap(dno->raw->nbytes);
1174 fsw_store_time_posix(sb, FSW_DNODE_STAT_ATIME,
1175 fsw_u64_le_swap(dno->raw->atime.sec));
1176 fsw_store_time_posix(sb, FSW_DNODE_STAT_CTIME,
1177 fsw_u64_le_swap(dno->raw->ctime.sec));
1178 fsw_store_time_posix(sb, FSW_DNODE_STAT_MTIME,
1179 fsw_u64_le_swap(dno->raw->mtime.sec));
1180 fsw_store_attr_posix(sb, fsw_u32_le_swap(dno->raw->mode));
1181
1182 return FSW_SUCCESS;
1183 }
1184
1185 static fsw_ssize_t grub_btrfs_lzo_decompress(char *ibuf, fsw_size_t isize, grub_off_t off,
1186 char *obuf, fsw_size_t osize)
1187 {
1188 uint32_t total_size, cblock_size;
1189 fsw_size_t ret = 0;
1190 unsigned char buf[GRUB_BTRFS_LZO_BLOCK_SIZE];
1191 char *ibuf0 = ibuf;
1192
1193 #define fsw_get_unaligned32(x) (*(uint32_t *)(x))
1194 total_size = fsw_u32_le_swap (fsw_get_unaligned32(ibuf));
1195 ibuf += sizeof (total_size);
1196
1197 if (isize < total_size)
1198 return -1;
1199
1200 /* Jump forward to first block with requested data. */
1201 while (off >= GRUB_BTRFS_LZO_BLOCK_SIZE)
1202 {
1203 /* Don't let following uint32_t cross the page boundary. */
1204 if (((ibuf - ibuf0) & 0xffc) == 0xffc)
1205 ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
1206
1207 cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
1208 ibuf += sizeof (cblock_size);
1209
1210 if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
1211 return -1;
1212
1213 off -= GRUB_BTRFS_LZO_BLOCK_SIZE;
1214 ibuf += cblock_size;
1215 }
1216
1217 while (osize > 0)
1218 {
1219 lzo_uint usize = GRUB_BTRFS_LZO_BLOCK_SIZE;
1220
1221 /* Don't let following uint32_t cross the page boundary. */
1222 if (((ibuf - ibuf0) & 0xffc) == 0xffc)
1223 ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
1224
1225 cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
1226 ibuf += sizeof (cblock_size);
1227
1228 if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
1229 return -1;
1230
1231 /* Block partially filled with requested data. */
1232 if (off > 0 || osize < GRUB_BTRFS_LZO_BLOCK_SIZE)
1233 {
1234 fsw_size_t to_copy = GRUB_BTRFS_LZO_BLOCK_SIZE - off;
1235
1236 if (to_copy > osize)
1237 to_copy = osize;
1238
1239 if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)buf, &usize, NULL) != 0)
1240 return -1;
1241
1242 if (to_copy > usize)
1243 to_copy = usize;
1244 fsw_memcpy(obuf, buf + off, to_copy);
1245
1246 osize -= to_copy;
1247 ret += to_copy;
1248 obuf += to_copy;
1249 ibuf += cblock_size;
1250 off = 0;
1251 continue;
1252 }
1253
1254 /* Decompress whole block directly to output buffer. */
1255 if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)obuf, &usize, NULL) != 0)
1256 return -1;
1257
1258 osize -= usize;
1259 ret += usize;
1260 obuf += usize;
1261 ibuf += cblock_size;
1262 }
1263
1264 return ret;
1265 }
1266
1267 static fsw_status_t fsw_btrfs_get_extent(struct fsw_volume *volg, struct fsw_dnode *dnog,
1268 struct fsw_extent *extent)
1269 {
1270 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1271 uint64_t ino = dnog->dnode_id;
1272 uint64_t tree = dnog->tree_id;
1273 uint64_t pos0 = extent->log_start << vol->sectorshift;
1274 extent->type = FSW_EXTENT_TYPE_INVALID;
1275 extent->log_count = 1;
1276 uint64_t pos = pos0;
1277 fsw_size_t csize;
1278 fsw_status_t err;
1279 uint64_t extoff;
1280 char *buf = NULL;
1281 uint64_t count;
1282
1283 /* slave device got empty root */
1284 if (!vol->is_master)
1285 return FSW_NOT_FOUND;
1286
1287 if (!vol->extent || vol->extstart > pos || vol->extino != ino
1288 || vol->exttree != tree || vol->extend <= pos)
1289 {
1290 struct btrfs_key key_in, key_out;
1291 uint64_t elemaddr;
1292 fsw_size_t elemsize;
1293
1294 if(vol->extent) {
1295 FreePool (vol->extent);
1296 vol->extent = NULL;
1297 }
1298 key_in.object_id = ino;
1299 key_in.type = GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM;
1300 key_in.offset = fsw_u64_le_swap (pos);
1301 err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
1302 if (err)
1303 return FSW_VOLUME_CORRUPTED;
1304 if (key_out.object_id != ino
1305 || key_out.type != GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM)
1306 {
1307 return FSW_VOLUME_CORRUPTED;
1308 }
1309 if ((fsw_ssize_t) elemsize < ((char *) &vol->extent->inl
1310 - (char *) vol->extent))
1311 {
1312 return FSW_VOLUME_CORRUPTED;
1313 }
1314 vol->extstart = fsw_u64_le_swap (key_out.offset);
1315 vol->extsize = elemsize;
1316 vol->extent = AllocatePool (elemsize);
1317 vol->extino = ino;
1318 vol->exttree = tree;
1319 if (!vol->extent)
1320 return FSW_OUT_OF_MEMORY;
1321
1322 err = fsw_btrfs_read_logical (vol, elemaddr, vol->extent, elemsize, 0, 1);
1323 if (err)
1324 return err;
1325
1326 vol->extend = vol->extstart + fsw_u64_le_swap (vol->extent->size);
1327 if (vol->extent->type == GRUB_BTRFS_EXTENT_REGULAR
1328 && (char *) vol->extent + elemsize
1329 >= (char *) &vol->extent->filled + sizeof (vol->extent->filled))
1330 vol->extend =
1331 vol->extstart + fsw_u64_le_swap (vol->extent->filled);
1332
1333 DPRINT (L"btrfs: %lx +0x%lx\n", fsw_u64_le_swap (key_out.offset), fsw_u64_le_swap (vol->extent->size));
1334 if (vol->extend <= pos)
1335 {
1336 return FSW_VOLUME_CORRUPTED;
1337 }
1338 }
1339
1340 csize = vol->extend - pos;
1341 extoff = pos - vol->extstart;
1342
1343 if (vol->extent->encryption ||vol->extent->encoding)
1344 {
1345 return FSW_UNSUPPORTED;
1346 }
1347
1348 switch(vol->extent->compression) {
1349 case GRUB_BTRFS_COMPRESSION_LZO:
1350 case GRUB_BTRFS_COMPRESSION_ZLIB:
1351 case GRUB_BTRFS_COMPRESSION_NONE:
1352 break;
1353 default:
1354 return FSW_UNSUPPORTED;
1355 }
1356
1357 count = ( csize + vol->sectorsize - 1) >> vol->sectorshift;
1358 switch (vol->extent->type)
1359 {
1360 case GRUB_BTRFS_EXTENT_INLINE:
1361 buf = AllocatePool( count << vol->sectorshift);
1362 if(!buf)
1363 return FSW_OUT_OF_MEMORY;
1364 if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
1365 {
1366 if (grub_zlib_decompress (vol->extent->inl, vol->extsize -
1367 ((uint8_t *) vol->extent->inl
1368 - (uint8_t *) vol->extent),
1369 extoff, buf, csize)
1370 != (fsw_ssize_t) csize)
1371 {
1372 FreePool(buf);
1373 return FSW_VOLUME_CORRUPTED;
1374 }
1375 }
1376 else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
1377 {
1378 if (grub_btrfs_lzo_decompress(vol->extent->inl, vol->extsize -
1379 ((uint8_t *) vol->extent->inl
1380 - (uint8_t *) vol->extent),
1381 extoff, buf, csize)
1382 != (fsw_ssize_t) csize)
1383 {
1384 FreePool(buf);
1385 return -FSW_VOLUME_CORRUPTED;
1386 }
1387 }
1388 else
1389 fsw_memcpy (buf, vol->extent->inl + extoff, csize);
1390 break;
1391
1392 case GRUB_BTRFS_EXTENT_REGULAR:
1393 if (!vol->extent->laddr)
1394 break;
1395
1396 if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_NONE)
1397 {
1398 if( count > 64 ) {
1399 count = 64;
1400 csize = count << vol->sectorshift;
1401 }
1402 buf = AllocatePool( count << vol->sectorshift);
1403 if(!buf)
1404 return FSW_OUT_OF_MEMORY;
1405 err = fsw_btrfs_read_logical (vol,
1406 fsw_u64_le_swap (vol->extent->laddr)
1407 + fsw_u64_le_swap (vol->extent->offset)
1408 + extoff, buf, csize, 0, 0);
1409 if (err) {
1410 FreePool(buf);
1411 return err;
1412 }
1413 break;
1414 }
1415 if (vol->extent->compression != GRUB_BTRFS_COMPRESSION_NONE)
1416 {
1417 char *tmp;
1418 uint64_t zsize;
1419 fsw_ssize_t ret;
1420
1421 zsize = fsw_u64_le_swap (vol->extent->compressed_size);
1422 tmp = AllocatePool (zsize);
1423 if (!tmp)
1424 return -FSW_OUT_OF_MEMORY;
1425 err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (vol->extent->laddr), tmp, zsize, 0, 0);
1426 if (err)
1427 {
1428 FreePool (tmp);
1429 return -FSW_VOLUME_CORRUPTED;
1430 }
1431
1432 buf = AllocatePool( count << vol->sectorshift);
1433 if(!buf) {
1434 FreePool(tmp);
1435 return FSW_OUT_OF_MEMORY;
1436 }
1437
1438 if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
1439 {
1440 ret = grub_zlib_decompress (tmp, zsize, extoff
1441 + fsw_u64_le_swap (vol->extent->offset),
1442 buf, csize);
1443 }
1444 else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
1445 ret = grub_btrfs_lzo_decompress (tmp, zsize, extoff
1446 + fsw_u64_le_swap (vol->extent->offset),
1447 buf, csize);
1448 else
1449 ret = -1;
1450
1451 FreePool (tmp);
1452
1453 if (ret != (fsw_ssize_t) csize) {
1454 FreePool(tmp);
1455 return -FSW_VOLUME_CORRUPTED;
1456 }
1457
1458 break;
1459 }
1460 break;
1461 default:
1462 return -FSW_VOLUME_CORRUPTED;
1463 }
1464
1465 extent->log_count = count;
1466 if(buf) {
1467 if(csize < (count << vol->sectorshift))
1468 fsw_memzero( buf + csize, (count << vol->sectorshift) - csize);
1469 extent->buffer = buf;
1470 extent->type = FSW_EXTENT_TYPE_BUFFER;
1471 } else {
1472 extent->buffer = NULL;
1473 extent->type = FSW_EXTENT_TYPE_SPARSE;
1474 }
1475 return FSW_SUCCESS;
1476 }
1477
1478 static fsw_status_t fsw_btrfs_readlink(struct fsw_volume *volg, struct fsw_dnode *dnog,
1479 struct fsw_string *link_target)
1480 {
1481 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1482 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1483 int i;
1484 fsw_status_t status;
1485 struct fsw_string s;
1486 char *tmp;
1487
1488 if (dno->g.size > FSW_PATH_MAX)
1489 return FSW_VOLUME_CORRUPTED;
1490
1491 tmp = AllocatePool(dno->g.size);
1492 if(!tmp)
1493 return FSW_OUT_OF_MEMORY;
1494
1495 i = 0;
1496 do {
1497 struct fsw_extent extent;
1498 int size;
1499 extent.log_start = i;
1500 status = fsw_btrfs_get_extent(volg, dnog, &extent);
1501 if(status || extent.type != FSW_EXTENT_TYPE_BUFFER) {
1502 FreePool(tmp);
1503 if(extent.buffer)
1504 FreePool(extent.buffer);
1505 return FSW_VOLUME_CORRUPTED;
1506 }
1507 size = extent.log_count << vol->sectorshift;
1508 if(size > (dno->g.size - (i<<vol->sectorshift)))
1509 size = dno->g.size - (i<<vol->sectorshift);
1510 fsw_memcpy(tmp + (i<<vol->sectorshift), extent.buffer, size);
1511 FreePool(extent.buffer);
1512 i += extent.log_count;
1513 } while( (i << vol->sectorshift) < dno->g.size);
1514
1515 s.type = FSW_STRING_TYPE_UTF8;
1516 s.size = s.len = (int)dno->g.size;
1517 s.data = tmp;
1518 status = fsw_strdup_coerce(link_target, volg->host_string_type, &s);
1519 FreePool(tmp);
1520
1521 return FSW_SUCCESS;
1522 }
1523
1524 static fsw_status_t fsw_btrfs_lookup_dir_item(struct fsw_btrfs_volume *vol,
1525 uint64_t tree_id, uint64_t object_id,
1526 struct fsw_string *lookup_name,
1527 struct btrfs_dir_item **direl_buf,
1528 struct btrfs_dir_item **direl_out
1529 )
1530 {
1531 uint64_t elemaddr;
1532 fsw_size_t elemsize;
1533 fsw_size_t allocated = 0;
1534 struct btrfs_key key;
1535 struct btrfs_key key_out;
1536 struct btrfs_dir_item *cdirel;
1537 fsw_status_t err;
1538
1539 *direl_buf = NULL;
1540
1541 key.object_id = object_id;
1542 key.type = GRUB_BTRFS_ITEM_TYPE_DIR_ITEM;
1543 key.offset = fsw_u64_le_swap (~grub_getcrc32c (1, lookup_name->data, lookup_name->size));
1544
1545 err = lower_bound (vol, &key, &key_out, tree_id, &elemaddr, &elemsize, NULL, 0);
1546 if (err)
1547 return err;
1548
1549 if (key_cmp (&key, &key_out) != 0)
1550 return FSW_NOT_FOUND;
1551
1552 if (elemsize > allocated)
1553 {
1554 allocated = 2 * elemsize;
1555 if(*direl_buf)
1556 FreePool (*direl_buf);
1557 *direl_buf = AllocatePool (allocated + 1);
1558 if (!*direl_buf)
1559 return FSW_OUT_OF_MEMORY;
1560 }
1561
1562 err = fsw_btrfs_read_logical (vol, elemaddr, *direl_buf, elemsize, 0, 1);
1563 if (err)
1564 return err;
1565
1566 for (cdirel = *direl_buf;
1567 (uint8_t *) cdirel - (uint8_t *) *direl_buf < (fsw_ssize_t) elemsize;
1568 cdirel = (void *) ((uint8_t *) (*direl_buf + 1)
1569 + fsw_u16_le_swap (cdirel->n)
1570 + fsw_u16_le_swap (cdirel->m)))
1571 {
1572 if (lookup_name->size == fsw_u16_le_swap (cdirel->n)
1573 && fsw_memeq (cdirel->name, lookup_name->data, lookup_name->size))
1574 break;
1575 }
1576 if ((uint8_t *) cdirel - (uint8_t *) *direl_buf >= (fsw_ssize_t) elemsize)
1577 return FSW_NOT_FOUND;
1578
1579 *direl_out = cdirel;
1580 return FSW_SUCCESS;
1581 }
1582
1583 static fsw_status_t fsw_btrfs_get_root_tree(
1584 struct fsw_btrfs_volume *vol,
1585 struct btrfs_key *key_in,
1586 uint64_t *tree_out)
1587 {
1588 fsw_status_t err;
1589 struct btrfs_root_item ri;
1590 struct btrfs_key key_out;
1591 uint64_t elemaddr;
1592 fsw_size_t elemsize;
1593
1594 err = lower_bound (vol, key_in, &key_out, vol->root_tree, &elemaddr, &elemsize, NULL, 0);
1595 if (err)
1596 return err;
1597
1598 if (key_in->object_id != key_out.object_id || key_in->type != key_out.type)
1599 return FSW_NOT_FOUND;
1600
1601 err = fsw_btrfs_read_logical (vol, elemaddr, &ri, sizeof (ri), 0, 1);
1602 if (err)
1603 return err;
1604
1605 *tree_out = ri.tree;
1606 return FSW_SUCCESS;
1607 }
1608
1609 static fsw_status_t fsw_btrfs_get_sub_dnode(
1610 struct fsw_btrfs_volume *vol,
1611 struct fsw_btrfs_dnode *dno,
1612 struct btrfs_dir_item *cdirel,
1613 struct fsw_string *name,
1614 struct fsw_dnode **child_dno_out)
1615 {
1616 fsw_status_t err;
1617 int child_type;
1618 uint64_t tree_id = dno->g.tree_id;
1619 uint64_t child_id;
1620
1621 switch (cdirel->key.type)
1622 {
1623 case GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM:
1624 err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &tree_id);
1625 if (err)
1626 return err;
1627
1628 child_type = GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY;
1629 child_id = fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK);
1630 break;
1631 case GRUB_BTRFS_ITEM_TYPE_INODE_ITEM:
1632 child_type = cdirel->type;
1633 child_id = cdirel->key.object_id;
1634 break;
1635
1636 default:
1637 DPRINT (L"btrfs: unrecognised object type 0x%x", cdirel->key.type);
1638 return FSW_VOLUME_CORRUPTED;
1639 }
1640
1641 switch(child_type) {
1642 case GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR:
1643 child_type = FSW_DNODE_TYPE_FILE;
1644 break;
1645 case GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY:
1646 child_type = FSW_DNODE_TYPE_DIR;
1647 break;
1648 case GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK:
1649 child_type = FSW_DNODE_TYPE_SYMLINK;
1650 break;
1651 default:
1652 child_type = FSW_DNODE_TYPE_SPECIAL;
1653 break;
1654 }
1655 return fsw_dnode_create_with_tree(&dno->g, tree_id, child_id, child_type, name, child_dno_out);
1656 }
1657
1658 static fsw_status_t fsw_btrfs_dir_lookup(struct fsw_volume *volg, struct fsw_dnode *dnog,
1659 struct fsw_string *lookup_name, struct fsw_dnode **child_dno_out)
1660 {
1661 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1662 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1663 fsw_status_t err;
1664 struct fsw_string s;
1665
1666 *child_dno_out = NULL;
1667
1668 /* slave device got empty root */
1669 if (!vol->is_master)
1670 return FSW_NOT_FOUND;
1671
1672 err = fsw_strdup_coerce(&s, FSW_STRING_TYPE_UTF8, lookup_name);
1673 if(err)
1674 return err;
1675
1676 /* treat '...' under root as top root */
1677 if(dnog == volg->root && s.size == 3 && ((char *)s.data)[0]=='.' && ((char *)s.data)[1]=='.' && ((char *)s.data)[2]=='.')
1678 {
1679 fsw_strfree (&s);
1680 if(dnog->tree_id == vol->top_tree) {
1681 fsw_dnode_retain(dnog);
1682 *child_dno_out = dnog;
1683 return FSW_SUCCESS;
1684 }
1685 return fsw_dnode_create_with_tree(dnog,
1686 vol->top_tree, fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK),
1687 FSW_DNODE_TYPE_DIR, lookup_name, child_dno_out);
1688 }
1689 struct btrfs_dir_item *direl=NULL, *cdirel;
1690 err = fsw_btrfs_lookup_dir_item(vol, dnog->tree_id, dnog->dnode_id, &s, &direl, &cdirel);
1691 if(!err)
1692 err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, lookup_name, child_dno_out);
1693 if(direl)
1694 FreePool (direl);
1695 fsw_strfree (&s);
1696 return err;
1697 }
1698
1699 static fsw_status_t fsw_btrfs_get_default_root(struct fsw_btrfs_volume *vol, uint64_t root_dir_objectid)
1700 {
1701 fsw_status_t err;
1702 struct fsw_string s;
1703 struct btrfs_dir_item *direl=NULL, *cdirel;
1704 uint64_t default_tree_id = 0;
1705 struct btrfs_key top_root_key;
1706
1707 /* Get to top tree id */
1708 top_root_key.object_id = fsw_u64_le_swap(5UL);
1709 top_root_key.type = GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM;
1710 top_root_key.offset = -1LL;
1711 err = fsw_btrfs_get_root_tree (vol, &top_root_key, &vol->top_tree);
1712 if (err)
1713 return err;
1714
1715 s.type = FSW_STRING_TYPE_UTF8;
1716 s.data = "default";
1717 s.size = 7;
1718 err = fsw_btrfs_lookup_dir_item(vol, vol->root_tree, root_dir_objectid, &s, &direl, &cdirel);
1719
1720 /* if "default" is failed or invalid, use top tree */
1721 if (err || /* failed */
1722 cdirel->type != GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY || /* not dir */
1723 cdirel->key.type != GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM || /* not tree */
1724 cdirel->key.object_id == fsw_u64_le_swap(5UL) || /* same as top */
1725 (err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &default_tree_id)))
1726 default_tree_id = vol->top_tree;
1727
1728 if (!err)
1729 err = fsw_dnode_create_root_with_tree(&vol->g, default_tree_id,
1730 fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK), &vol->g.root);
1731 if (direl)
1732 FreePool (direl);
1733 return err;
1734 }
1735
1736 static fsw_status_t fsw_btrfs_dir_read(struct fsw_volume *volg, struct fsw_dnode *dnog,
1737 struct fsw_shandle *shand, struct fsw_dnode **child_dno_out)
1738 {
1739 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1740 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1741 fsw_status_t err;
1742
1743 struct btrfs_key key_in, key_out;
1744 uint64_t elemaddr;
1745 fsw_size_t elemsize;
1746 fsw_size_t allocated = 0;
1747 struct btrfs_dir_item *direl = NULL;
1748 struct fsw_btrfs_leaf_descriptor desc;
1749 int r = 0;
1750 uint64_t tree = dnog->tree_id;
1751
1752 /* slave device got empty root */
1753 if (!vol->is_master)
1754 return FSW_NOT_FOUND;
1755
1756 key_in.object_id = dnog->dnode_id;
1757 key_in.type = GRUB_BTRFS_ITEM_TYPE_DIR_ITEM;
1758 key_in.offset = shand->pos;
1759
1760 if((int64_t)key_in.offset == -1LL)
1761 {
1762 return FSW_NOT_FOUND;
1763 }
1764
1765 err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, &desc, 0);
1766 if (err) {
1767 return err;
1768 }
1769
1770 DPRINT(L"key_in %lx:%x:%lx out %lx:%x:%lx elem %lx+%lx\n",
1771 key_in.object_id, key_in.type, key_in.offset,
1772 key_out.object_id, key_out.type, key_out.offset,
1773 elemaddr, elemsize);
1774 if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
1775 key_out.object_id != key_in.object_id)
1776 {
1777 r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
1778 if (r <= 0)
1779 goto out;
1780 DPRINT(L"next out %lx:%x:%lx\n",
1781 key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
1782 }
1783 if (key_out.type == GRUB_BTRFS_ITEM_TYPE_DIR_ITEM &&
1784 key_out.object_id == key_in.object_id &&
1785 fsw_u64_le_swap(key_out.offset) <= fsw_u64_le_swap(key_in.offset))
1786 {
1787 r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
1788 if (r <= 0)
1789 goto out;
1790 DPRINT(L"next out %lx:%x:%lx\n",
1791 key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
1792 }
1793
1794 do
1795 {
1796 struct btrfs_dir_item *cdirel;
1797 if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
1798 key_out.object_id != key_in.object_id)
1799 {
1800 r = 0;
1801 break;
1802 }
1803 if (elemsize > allocated)
1804 {
1805 allocated = 2 * elemsize;
1806 if(direl)
1807 FreePool (direl);
1808 direl = AllocatePool (allocated + 1);
1809 if (!direl)
1810 {
1811 r = -FSW_OUT_OF_MEMORY;
1812 break;
1813 }
1814 }
1815
1816 err = fsw_btrfs_read_logical (vol, elemaddr, direl, elemsize, 0, 1);
1817 if (err)
1818 {
1819 r = -err;
1820 break;
1821 }
1822
1823 for (cdirel = direl;
1824 (uint8_t *) cdirel - (uint8_t *) direl
1825 < (fsw_ssize_t) elemsize;
1826 cdirel = (void *) ((uint8_t *) (direl + 1)
1827 + fsw_u16_le_swap (cdirel->n)
1828 + fsw_u16_le_swap (cdirel->m)))
1829 {
1830 struct fsw_string s;
1831 s.type = FSW_STRING_TYPE_UTF8;
1832 s.size = s.len = fsw_u16_le_swap (cdirel->n);
1833 s.data = cdirel->name;
1834 DPRINT(L"item key %lx:%x%lx, type %lx, namelen=%lx\n",
1835 cdirel->key.object_id, cdirel->key.type, cdirel->key.offset, cdirel->type, s.size);
1836 if(!err) {
1837 err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, &s, child_dno_out);
1838 if(direl)
1839 FreePool (direl);
1840 free_iterator (&desc);
1841 shand->pos = key_out.offset;
1842 return FSW_SUCCESS;
1843 }
1844 }
1845 r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
1846 DPRINT(L"next2 out %lx:%x:%lx\n",
1847 key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
1848 }
1849 while (r > 0);
1850
1851 out:
1852 if(direl)
1853 FreePool (direl);
1854 free_iterator (&desc);
1855
1856 r = r < 0 ? -r : FSW_NOT_FOUND;
1857 return r;
1858 }
1859
1860 //
1861 // Dispatch Table
1862 //
1863
1864 struct fsw_fstype_table FSW_FSTYPE_TABLE_NAME(btrfs) = {
1865 { FSW_STRING_TYPE_UTF8, 5, 5, "btrfs" },
1866 sizeof(struct fsw_btrfs_volume),
1867 sizeof(struct fsw_btrfs_dnode),
1868
1869 fsw_btrfs_volume_mount,
1870 fsw_btrfs_volume_free,
1871 fsw_btrfs_volume_stat,
1872 fsw_btrfs_dnode_fill,
1873 fsw_btrfs_dnode_free,
1874 fsw_btrfs_dnode_stat,
1875 fsw_btrfs_get_extent,
1876 fsw_btrfs_dir_lookup,
1877 fsw_btrfs_dir_read,
1878 fsw_btrfs_readlink,
1879 };
1880