]> code.delx.au - refind/blob - filesystems/fsw_btrfs.c
Added primitive cache to most drivers to improve performance,
[refind] / filesystems / fsw_btrfs.c
1 /*
2 * fsw_btrfs.c:
3 * btrfs UEFI driver
4 * by Samuel Liao
5 * Copyright (c) 2013 Tencent, Inc.
6 *
7 * This driver base on grub 2.0 btrfs implementation.
8 */
9
10 /* btrfs.c - B-tree file system. */
11 /*
12 * GRUB -- GRand Unified Bootloader
13 * Copyright (C) 2010 Free Software Foundation, Inc.
14 *
15 * GRUB is free software: you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation, either version 3 of the License, or
18 * (at your option) any later version.
19 *
20 * GRUB is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
27 */
28
29 //#define DPRINT(x...) Print(x)
30
31 #include "fsw_core.h"
32 #define uint8_t fsw_u8
33 #define uint16_t fsw_u16
34 #define uint32_t fsw_u32
35 #define uint64_t fsw_u64
36 #define int64_t fsw_s64
37 #define int32_t fsw_s32
38
39 #ifndef DPRINT
40 #define DPRINT(x...) /* */
41 #endif
42
43 /* no single io/element size over 2G */
44 #define fsw_size_t int
45 #define fsw_ssize_t int
46 /* never zip over 2G, 32bit is enough */
47 #define grub_off_t int32_t
48 #define grub_size_t int32_t
49 #define grub_ssize_t int32_t
50 #include "crc32c.c"
51 #include "gzio.c"
52 #define MINILZO_CFG_SKIP_LZO_PTR 1
53 #define MINILZO_CFG_SKIP_LZO_UTIL 1
54 #define MINILZO_CFG_SKIP_LZO_STRING 1
55 #define MINILZO_CFG_SKIP_LZO_INIT 1
56 #define MINILZO_CFG_SKIP_LZO1X_DECOMPRESS 1
57 #define MINILZO_CFG_SKIP_LZO1X_1_COMPRESS 1
58 #define MINILZO_CFG_SKIP_LZO_STRING 1
59 #include "minilzo.c"
60 #include "scandisk.c"
61
62 #define BTRFS_DEFAULT_BLOCK_SIZE 4096
63 //#define BTRFS_DEFAULT_BLOCK_SIZE 8192
64 #define BTRFS_INITIAL_BCACHE_SIZE 1024
65 #define GRUB_BTRFS_SIGNATURE "_BHRfS_M"
66
67 /* From http://www.oberhumer.com/opensource/lzo/lzofaq.php
68 * LZO will expand incompressible data by a little amount. I still haven't
69 * computed the exact values, but I suggest using these formulas for
70 * a worst-case expansion calculation:
71 *
72 * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3
73 * */
74 #define GRUB_BTRFS_LZO_BLOCK_SIZE 4096
75 #define GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE (GRUB_BTRFS_LZO_BLOCK_SIZE + \
76 (GRUB_BTRFS_LZO_BLOCK_SIZE / 16) + 64 + 3)
77
78 /*
79 * on disk struct has prefix 'btrfs_', little endian
80 * on memory struct has prefix 'fsw_btrfs_'
81 */
82 typedef uint8_t btrfs_checksum_t[0x20];
83 typedef uint32_t btrfs_uuid_t[4];
84
85 struct btrfs_device
86 {
87 uint64_t device_id;
88 uint64_t size;
89 uint8_t dummy[0x62 - 0x10];
90 } __attribute__ ((__packed__));
91
92 struct btrfs_superblock
93 {
94 btrfs_checksum_t checksum;
95 btrfs_uuid_t uuid;
96 uint8_t dummy[0x10];
97 uint8_t signature[sizeof (GRUB_BTRFS_SIGNATURE) - 1];
98 uint64_t generation;
99 uint64_t root_tree;
100 uint64_t chunk_tree;
101 uint8_t dummy2[0x10];
102 uint64_t total_bytes;
103 uint64_t bytes_used;
104 uint64_t root_dir_objectid;
105 #define BTRFS_MAX_NUM_DEVICES 0x10000
106 uint64_t num_devices;
107 uint32_t sectorsize;
108 uint32_t nodesize;
109
110 uint8_t dummy3[0x31];
111 struct btrfs_device this_device;
112 char label[0x100];
113 uint8_t dummy4[0x100];
114 uint8_t bootstrap_mapping[0x800];
115 } __attribute__ ((__packed__));
116
117 struct btrfs_header
118 {
119 btrfs_checksum_t checksum;
120 btrfs_uuid_t uuid;
121 uint8_t dummy[0x30];
122 uint32_t nitems;
123 uint8_t level;
124 } __attribute__ ((__packed__));
125
126 struct fsw_btrfs_device_desc
127 {
128 struct fsw_volume * dev;
129 uint64_t id;
130 };
131
132 struct fsw_btrfs_volume
133 {
134 struct fsw_volume g; //!< Generic volume structure
135
136 /* superblock shadows */
137 uint8_t bootstrap_mapping[0x800];
138 btrfs_uuid_t uuid;
139 uint64_t total_bytes;
140 uint64_t bytes_used;
141 uint64_t chunk_tree;
142 uint64_t root_tree;
143 uint64_t top_tree; /* top volume tree */
144 unsigned num_devices;
145 unsigned sectorshift;
146 unsigned sectorsize;
147 int is_master;
148
149 struct fsw_btrfs_device_desc *devices_attached;
150 unsigned n_devices_attached;
151 unsigned n_devices_allocated;
152
153 /* Cached extent data. */
154 uint64_t extstart;
155 uint64_t extend;
156 uint64_t extino;
157 uint64_t exttree;
158 uint32_t extsize;
159 struct btrfs_extent_data *extent;
160 };
161
162 enum
163 {
164 GRUB_BTRFS_ITEM_TYPE_INODE_ITEM = 0x01,
165 GRUB_BTRFS_ITEM_TYPE_INODE_REF = 0x0c,
166 GRUB_BTRFS_ITEM_TYPE_DIR_ITEM = 0x54,
167 GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM = 0x6c,
168 GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM = 0x84,
169 GRUB_BTRFS_ITEM_TYPE_DEVICE = 0xd8,
170 GRUB_BTRFS_ITEM_TYPE_CHUNK = 0xe4
171 };
172
173 struct btrfs_key
174 {
175 uint64_t object_id;
176 uint8_t type;
177 uint64_t offset;
178 } __attribute__ ((__packed__));
179
180 struct btrfs_chunk_item
181 {
182 uint64_t size;
183 uint64_t dummy;
184 uint64_t stripe_length;
185 uint64_t type;
186 #define GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE 0x07
187 #define GRUB_BTRFS_CHUNK_TYPE_SINGLE 0x00
188 #define GRUB_BTRFS_CHUNK_TYPE_RAID0 0x08
189 #define GRUB_BTRFS_CHUNK_TYPE_RAID1 0x10
190 #define GRUB_BTRFS_CHUNK_TYPE_DUPLICATED 0x20
191 #define GRUB_BTRFS_CHUNK_TYPE_RAID10 0x40
192 uint8_t dummy2[0xc];
193 uint16_t nstripes;
194 uint16_t nsubstripes;
195 } __attribute__ ((__packed__));
196
197 struct btrfs_chunk_stripe
198 {
199 uint64_t device_id;
200 uint64_t offset;
201 btrfs_uuid_t device_uuid;
202 } __attribute__ ((__packed__));
203
204 struct btrfs_leaf_node
205 {
206 struct btrfs_key key;
207 uint32_t offset;
208 uint32_t size;
209 } __attribute__ ((__packed__));
210
211 struct btrfs_internal_node
212 {
213 struct btrfs_key key;
214 uint64_t addr;
215 uint64_t dummy;
216 } __attribute__ ((__packed__));
217
218 struct btrfs_dir_item
219 {
220 struct btrfs_key key;
221 uint64_t transid;
222 uint16_t m;
223 uint16_t n;
224 #define GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR 1
225 #define GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY 2
226 #define GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK 7
227 uint8_t type;
228 char name[0];
229 } __attribute__ ((__packed__));
230
231 struct fsw_btrfs_leaf_descriptor
232 {
233 unsigned depth;
234 unsigned allocated;
235 struct
236 {
237 uint64_t addr;
238 unsigned iter;
239 unsigned maxiter;
240 int leaf;
241 } *data;
242 };
243
244 struct btrfs_root_item
245 {
246 uint8_t dummy[0xb0];
247 uint64_t tree;
248 uint64_t inode;
249 } __attribute__ ((__packed__));
250
251 struct btrfs_time
252 {
253 int64_t sec;
254 uint32_t nanosec;
255 } __attribute__ ((__packed__));
256
257 struct btrfs_inode
258 {
259 uint64_t gen_id;
260 uint64_t trans_id;
261 uint64_t size;
262 uint64_t nbytes;
263 uint64_t block_group;
264 uint32_t nlink;
265 uint32_t uid;
266 uint32_t gid;
267 uint32_t mode;
268 uint64_t rdev;
269 uint64_t flags;
270
271 uint64_t seq;
272
273 uint64_t reserved[4];
274 struct btrfs_time atime;
275 struct btrfs_time ctime;
276 struct btrfs_time mtime;
277 struct btrfs_time otime;
278 } __attribute__ ((__packed__));
279
280 struct fsw_btrfs_dnode {
281 struct fsw_dnode g; //!< Generic dnode structure
282 struct btrfs_inode *raw; //!< Full raw inode structure
283 };
284
285 struct btrfs_extent_data
286 {
287 uint64_t dummy;
288 uint64_t size;
289 uint8_t compression;
290 uint8_t encryption;
291 uint16_t encoding;
292 uint8_t type;
293 union
294 {
295 char inl[0];
296 struct
297 {
298 uint64_t laddr;
299 uint64_t compressed_size;
300 uint64_t offset;
301 uint64_t filled;
302 };
303 };
304 } __attribute__ ((__packed__));
305
306 #define GRUB_BTRFS_EXTENT_INLINE 0
307 #define GRUB_BTRFS_EXTENT_REGULAR 1
308
309 #define GRUB_BTRFS_COMPRESSION_NONE 0
310 #define GRUB_BTRFS_COMPRESSION_ZLIB 1
311 #define GRUB_BTRFS_COMPRESSION_LZO 2
312
313 #define GRUB_BTRFS_OBJECT_ID_CHUNK 0x100
314
315 struct fsw_btrfs_uuid_list {
316 struct fsw_btrfs_volume *master;
317 struct fsw_btrfs_uuid_list *next;
318 };
319
320 static int uuid_eq(btrfs_uuid_t u1, btrfs_uuid_t u2) {
321 return u1[0]==u2[0] && u1[1]==u2[1] && u1[2]==u2[2] && u1[3]==u2[3];
322 }
323
324 static struct fsw_btrfs_uuid_list *master_uuid_list = NULL;
325
326 static int master_uuid_add(struct fsw_btrfs_volume *vol, struct fsw_btrfs_volume **master_out) {
327 struct fsw_btrfs_uuid_list *l;
328
329 for (l = master_uuid_list; l; l=l->next)
330 if(uuid_eq(l->master->uuid, vol->uuid)) {
331 if(master_out)
332 *master_out = l->master;
333 return 0;
334 }
335
336 l = AllocatePool(sizeof(struct fsw_btrfs_uuid_list));
337 l->master = vol;
338 l->next = master_uuid_list;
339 master_uuid_list = l;
340 return 1;
341 }
342
343 static void master_uuid_remove(struct fsw_btrfs_volume *vol) {
344 struct fsw_btrfs_uuid_list **lp;
345
346 for (lp = &master_uuid_list; *lp; lp=&(*lp)->next)
347 if((*lp)->master == vol) {
348 struct fsw_btrfs_uuid_list *n = *lp;
349 *lp = n->next;
350 FreePool(n);
351 break;
352 }
353 }
354
355 static fsw_status_t btrfs_set_superblock_info(struct fsw_btrfs_volume *vol, struct btrfs_superblock *sb)
356 {
357 int i;
358 vol->uuid[0] = sb->uuid[0];
359 vol->uuid[1] = sb->uuid[1];
360 vol->uuid[2] = sb->uuid[2];
361 vol->uuid[3] = sb->uuid[3];
362 vol->chunk_tree = sb->chunk_tree;
363 vol->root_tree = sb->root_tree;
364 vol->total_bytes = fsw_u64_le_swap(sb->total_bytes);
365 vol->bytes_used = fsw_u64_le_swap(sb->bytes_used);
366
367 vol->sectorshift = 0;
368 vol->sectorsize = fsw_u32_le_swap(sb->sectorsize);
369 for(i=9; i<20; i++) {
370 if((1UL<<i) == vol->sectorsize) {
371 vol->sectorshift = i;
372 break;
373 }
374 }
375 if(fsw_u64_le_swap(sb->num_devices) > BTRFS_MAX_NUM_DEVICES)
376 vol->num_devices = BTRFS_MAX_NUM_DEVICES;
377 else
378 vol->num_devices = fsw_u64_le_swap(sb->num_devices);
379 fsw_memcpy(vol->bootstrap_mapping, sb->bootstrap_mapping, sizeof(vol->bootstrap_mapping));
380 return FSW_SUCCESS;
381 }
382
383 static uint64_t superblock_pos[4] = {
384 64 / 4,
385 64 * 1024 / 4,
386 256 * 1048576 / 4,
387 1048576ULL * 1048576ULL / 4
388 };
389
390 static fsw_status_t fsw_btrfs_read_logical(struct fsw_btrfs_volume *vol,
391 uint64_t addr, void *buf, fsw_size_t size, int rdepth, int cache_level);
392
393 static fsw_status_t btrfs_read_superblock (struct fsw_volume *vol, struct btrfs_superblock *sb_out)
394 {
395 unsigned i;
396 uint64_t total_blocks = 1024;
397 fsw_status_t err = FSW_SUCCESS;
398
399 fsw_set_blocksize(vol, BTRFS_DEFAULT_BLOCK_SIZE, BTRFS_DEFAULT_BLOCK_SIZE);
400 for (i = 0; i < 4; i++)
401 {
402 uint8_t *buffer;
403 struct btrfs_superblock *sb;
404
405 /* Don't try additional superblocks beyond device size. */
406 if (total_blocks <= superblock_pos[i])
407 break;
408
409 err = fsw_block_get(vol, superblock_pos[i], 0, (void **)&buffer);
410 if (err == FSW_UNSUPPORTED) {
411 fsw_block_release(vol, superblock_pos[i], buffer);
412 break;
413 }
414
415 sb = (struct btrfs_superblock *)buffer;
416 if (!fsw_memeq (sb->signature, GRUB_BTRFS_SIGNATURE,
417 sizeof (GRUB_BTRFS_SIGNATURE) - 1))
418 {
419 fsw_block_release(vol, superblock_pos[i], buffer);
420 break;
421 }
422 if (i == 0 || fsw_u64_le_swap (sb->generation) > fsw_u64_le_swap (sb_out->generation))
423 {
424 fsw_memcpy (sb_out, sb, sizeof (*sb));
425 total_blocks = fsw_u64_le_swap (sb->this_device.size) >> 12;
426 }
427 fsw_block_release(vol, superblock_pos[i], buffer);
428 }
429
430 if ((err == FSW_UNSUPPORTED || !err) && i == 0)
431 return FSW_UNSUPPORTED;
432
433 if (err == FSW_UNSUPPORTED)
434 err = FSW_SUCCESS;
435
436 if(err == 0)
437 DPRINT(L"btrfs: UUID: %08x-%08x-%08x-%08x device id: %d\n",
438 sb_out->uuid[0], sb_out->uuid[1], sb_out->uuid[2], sb_out->uuid[3],
439 sb_out->this_device.device_id);
440 return err;
441 }
442
443 static int key_cmp (const struct btrfs_key *a, const struct btrfs_key *b)
444 {
445 if (fsw_u64_le_swap (a->object_id) < fsw_u64_le_swap (b->object_id))
446 return -1;
447 if (fsw_u64_le_swap (a->object_id) > fsw_u64_le_swap (b->object_id))
448 return +1;
449
450 if (a->type < b->type)
451 return -1;
452 if (a->type > b->type)
453 return +1;
454
455 if (fsw_u64_le_swap (a->offset) < fsw_u64_le_swap (b->offset))
456 return -1;
457 if (fsw_u64_le_swap (a->offset) > fsw_u64_le_swap (b->offset))
458 return +1;
459 return 0;
460 }
461
462 static void free_iterator (struct fsw_btrfs_leaf_descriptor *desc)
463 {
464 fsw_free (desc->data);
465 }
466
467 static fsw_status_t save_ref (struct fsw_btrfs_leaf_descriptor *desc,
468 uint64_t addr, unsigned i, unsigned m, int l)
469 {
470 desc->depth++;
471 if (desc->allocated < desc->depth)
472 {
473 void *newdata;
474 int oldsize = sizeof (desc->data[0]) * desc->allocated;
475 desc->allocated *= 2;
476 newdata = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
477 if (!newdata)
478 return FSW_OUT_OF_MEMORY;
479 fsw_memcpy(newdata, desc->data, oldsize);
480 FreePool(desc->data);
481 desc->data = newdata;
482 }
483 desc->data[desc->depth - 1].addr = addr;
484 desc->data[desc->depth - 1].iter = i;
485 desc->data[desc->depth - 1].maxiter = m;
486 desc->data[desc->depth - 1].leaf = l;
487 return FSW_SUCCESS;
488 }
489
490 static int next (struct fsw_btrfs_volume *vol,
491 struct fsw_btrfs_leaf_descriptor *desc,
492 uint64_t * outaddr, fsw_size_t * outsize,
493 struct btrfs_key *key_out)
494 {
495 fsw_status_t err;
496 struct btrfs_leaf_node leaf;
497
498 for (; desc->depth > 0; desc->depth--)
499 {
500 desc->data[desc->depth - 1].iter++;
501 if (desc->data[desc->depth - 1].iter
502 < desc->data[desc->depth - 1].maxiter)
503 break;
504 }
505 if (desc->depth == 0)
506 return 0;
507 while (!desc->data[desc->depth - 1].leaf)
508 {
509 struct btrfs_internal_node node;
510 struct btrfs_header head;
511 fsw_memzero(&node, sizeof(node));
512
513 err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
514 * sizeof (node)
515 + sizeof (struct btrfs_header)
516 + desc->data[desc->depth - 1].addr,
517 &node, sizeof (node), 0, 1);
518 if (err)
519 return -err;
520
521 err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (node.addr),
522 &head, sizeof (head), 0, 1);
523 if (err)
524 return -err;
525
526 save_ref (desc, fsw_u64_le_swap (node.addr), 0,
527 fsw_u32_le_swap (head.nitems), !head.level);
528 }
529 err = fsw_btrfs_read_logical (vol, desc->data[desc->depth - 1].iter
530 * sizeof (leaf)
531 + sizeof (struct btrfs_header)
532 + desc->data[desc->depth - 1].addr, &leaf,
533 sizeof (leaf), 0, 1);
534 if (err)
535 return -err;
536 *outsize = fsw_u32_le_swap (leaf.size);
537 *outaddr = desc->data[desc->depth - 1].addr + sizeof (struct btrfs_header)
538 + fsw_u32_le_swap (leaf.offset);
539 *key_out = leaf.key;
540 return 1;
541 }
542
543 #define depth2cache(x) ((x) >= 4 ? 1 : 5-(x))
544 static fsw_status_t lower_bound (struct fsw_btrfs_volume *vol,
545 const struct btrfs_key *key_in,
546 struct btrfs_key *key_out,
547 uint64_t root,
548 uint64_t *outaddr, fsw_size_t *outsize,
549 struct fsw_btrfs_leaf_descriptor *desc,
550 int rdepth)
551 {
552 uint64_t addr = fsw_u64_le_swap (root);
553 int depth = -1;
554
555 if (desc)
556 {
557 desc->allocated = 16;
558 desc->depth = 0;
559 desc->data = AllocatePool (sizeof (desc->data[0]) * desc->allocated);
560 if (!desc->data)
561 return FSW_OUT_OF_MEMORY;
562 }
563
564 /* > 2 would work as well but be robust and allow a bit more just in case.
565 */
566 if (rdepth > 10)
567 return FSW_VOLUME_CORRUPTED;
568
569 DPRINT (L"btrfs: retrieving %lx %x %lx\n",
570 key_in->object_id, key_in->type, key_in->offset);
571
572 while (1)
573 {
574 fsw_status_t err;
575 struct btrfs_header head;
576 fsw_memzero(&head, sizeof(head));
577
578 reiter:
579 depth++;
580 /* FIXME: preread few nodes into buffer. */
581 err = fsw_btrfs_read_logical (vol, addr, &head, sizeof (head),
582 rdepth + 1, depth2cache(rdepth));
583 if (err)
584 return err;
585 addr += sizeof (head);
586 if (head.level)
587 {
588 unsigned i;
589 struct btrfs_internal_node node, node_last;
590 int have_last = 0;
591 fsw_memzero (&node_last, sizeof (node_last));
592 for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
593 {
594 err = fsw_btrfs_read_logical (vol, addr + i * sizeof (node),
595 &node, sizeof (node), rdepth + 1, depth2cache(rdepth));
596 if (err)
597 return err;
598
599 DPRINT (L"btrfs: internal node (depth %d) %lx %x %lx\n", depth,
600 node.key.object_id, node.key.type,
601 node.key.offset);
602
603 if (key_cmp (&node.key, key_in) == 0)
604 {
605 err = FSW_SUCCESS;
606 if (desc)
607 err = save_ref (desc, addr - sizeof (head), i,
608 fsw_u32_le_swap (head.nitems), 0);
609 if (err)
610 return err;
611 addr = fsw_u64_le_swap (node.addr);
612 goto reiter;
613 }
614 if (key_cmp (&node.key, key_in) > 0)
615 break;
616 node_last = node;
617 have_last = 1;
618 }
619 if (have_last)
620 {
621 err = FSW_SUCCESS;
622 if (desc)
623 err = save_ref (desc, addr - sizeof (head), i - 1,
624 fsw_u32_le_swap (head.nitems), 0);
625 if (err)
626 return err;
627 addr = fsw_u64_le_swap (node_last.addr);
628 goto reiter;
629 }
630 *outsize = 0;
631 *outaddr = 0;
632 fsw_memzero (key_out, sizeof (*key_out));
633 if (desc)
634 return save_ref (desc, addr - sizeof (head), -1,
635 fsw_u32_le_swap (head.nitems), 0);
636 return FSW_SUCCESS;
637 }
638 {
639 unsigned i;
640 struct btrfs_leaf_node leaf, leaf_last;
641 int have_last = 0;
642 for (i = 0; i < fsw_u32_le_swap (head.nitems); i++)
643 {
644 err = fsw_btrfs_read_logical (vol, addr + i * sizeof (leaf),
645 &leaf, sizeof (leaf), rdepth + 1, depth2cache(rdepth));
646 if (err)
647 return err;
648
649 DPRINT (L"btrfs: leaf (depth %d) %lx %x %lx\n", depth,
650 leaf.key.object_id, leaf.key.type, leaf.key.offset);
651
652 if (key_cmp (&leaf.key, key_in) == 0)
653 {
654 fsw_memcpy (key_out, &leaf.key, sizeof (*key_out));
655 *outsize = fsw_u32_le_swap (leaf.size);
656 *outaddr = addr + fsw_u32_le_swap (leaf.offset);
657 if (desc)
658 return save_ref (desc, addr - sizeof (head), i,
659 fsw_u32_le_swap (head.nitems), 1);
660 return FSW_SUCCESS;
661 }
662
663 if (key_cmp (&leaf.key, key_in) > 0)
664 break;
665
666 have_last = 1;
667 leaf_last = leaf;
668 }
669
670 if (have_last)
671 {
672 fsw_memcpy (key_out, &leaf_last.key, sizeof (*key_out));
673 *outsize = fsw_u32_le_swap (leaf_last.size);
674 *outaddr = addr + fsw_u32_le_swap (leaf_last.offset);
675 if (desc)
676 return save_ref (desc, addr - sizeof (head), i - 1,
677 fsw_u32_le_swap (head.nitems), 1);
678 return FSW_SUCCESS;
679 }
680 *outsize = 0;
681 *outaddr = 0;
682 fsw_memzero (key_out, sizeof (*key_out));
683 if (desc)
684 return save_ref (desc, addr - sizeof (head), -1,
685 fsw_u32_le_swap (head.nitems), 1);
686 return FSW_SUCCESS;
687 }
688 }
689 }
690
691 static int btrfs_add_multi_device(struct fsw_btrfs_volume *master, struct fsw_volume *slave, struct btrfs_superblock *sb)
692 {
693 int i;
694 for( i = 0; i < master->n_devices_attached; i++)
695 if(sb->this_device.device_id == master->devices_attached[i].id)
696 return FSW_UNSUPPORTED;
697
698 slave = clone_dummy_volume(slave);
699 if(slave == NULL)
700 return FSW_OUT_OF_MEMORY;
701 fsw_set_blocksize(slave, master->sectorsize, master->sectorsize);
702 slave->bcache_size = BTRFS_INITIAL_BCACHE_SIZE;
703
704 master->devices_attached[i].id = sb->this_device.device_id;
705 master->devices_attached[i].dev = slave;
706 master->n_devices_attached++;
707
708 DPRINT(L"Found slave %d\n", sb->this_device.device_id);
709 return FSW_SUCCESS;
710 }
711
712 static int scan_disks_hook(struct fsw_volume *volg, struct fsw_volume *slave) {
713 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
714 struct btrfs_superblock sb;
715 fsw_status_t err;
716
717 if(vol->n_devices_attached >= vol->n_devices_allocated)
718 return FSW_UNSUPPORTED;
719
720 err = btrfs_read_superblock(slave, &sb);
721 if(err)
722 return FSW_UNSUPPORTED;
723
724 if(!uuid_eq(vol->uuid, sb.uuid))
725 return FSW_UNSUPPORTED;
726
727 return btrfs_add_multi_device(vol, slave, &sb);
728 }
729
730 static struct fsw_volume *
731 find_device (struct fsw_btrfs_volume *vol, uint64_t id, int do_rescan) {
732 int i;
733
734 do {
735 for (i = 0; i < vol->n_devices_attached; i++)
736 if (id == vol->devices_attached[i].id)
737 return vol->devices_attached[i].dev;
738 } while(vol->n_devices_attached < vol->n_devices_allocated &&
739 do_rescan-- > 0 &&
740 scan_disks(scan_disks_hook, &vol->g) > 0);
741 DPRINT(L"sub device %d not found\n", id);
742 return NULL;
743 }
744
745 static fsw_status_t fsw_btrfs_read_logical (struct fsw_btrfs_volume *vol, uint64_t addr,
746 void *buf, fsw_size_t size, int rdepth, int cache_level)
747 {
748 while (size > 0)
749 {
750 uint8_t *ptr;
751 struct btrfs_key *key;
752 struct btrfs_chunk_item *chunk;
753 uint64_t csize;
754 fsw_status_t err = 0;
755 struct btrfs_key key_out;
756 int challoc = 0;
757 struct btrfs_key key_in;
758 fsw_size_t chsize;
759 uint64_t chaddr;
760
761 for (ptr = vol->bootstrap_mapping; ptr < vol->bootstrap_mapping + sizeof (vol->bootstrap_mapping) - sizeof (struct btrfs_key);)
762 {
763 key = (struct btrfs_key *) ptr;
764 if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK)
765 break;
766 chunk = (struct btrfs_chunk_item *) (key + 1);
767 if (fsw_u64_le_swap (key->offset) <= addr
768 && addr < fsw_u64_le_swap (key->offset)
769 + fsw_u64_le_swap (chunk->size))
770 {
771 goto chunk_found;
772 }
773 ptr += sizeof (*key) + sizeof (*chunk)
774 + sizeof (struct btrfs_chunk_stripe)
775 * fsw_u16_le_swap (chunk->nstripes);
776 }
777
778 key_in.object_id = fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK);
779 key_in.type = GRUB_BTRFS_ITEM_TYPE_CHUNK;
780 key_in.offset = fsw_u64_le_swap (addr);
781 err = lower_bound (vol, &key_in, &key_out, vol->chunk_tree, &chaddr, &chsize, NULL, rdepth);
782 if (err)
783 return err;
784 key = &key_out;
785 if (key->type != GRUB_BTRFS_ITEM_TYPE_CHUNK
786 || !(fsw_u64_le_swap (key->offset) <= addr))
787 {
788 return FSW_VOLUME_CORRUPTED;
789 }
790 // "couldn't find the chunk descriptor");
791
792 chunk = AllocatePool (chsize);
793 if (!chunk) {
794 return FSW_OUT_OF_MEMORY;
795 }
796
797 challoc = 1;
798 err = fsw_btrfs_read_logical (vol, chaddr, chunk, chsize, rdepth, cache_level < 5 ? cache_level+1 : 5);
799 if (err)
800 {
801 if(chunk)
802 FreePool (chunk);
803 return err;
804 }
805
806 chunk_found:
807 {
808 #ifdef __MAKEWITH_GNUEFI
809 #define UINTREM UINTN
810 #else
811 #undef DivU64x32
812 #define DivU64x32 DivU64x32Remainder
813 #define UINTREM UINT32
814 #endif
815 UINTREM stripen;
816 UINTREM stripe_offset;
817 uint64_t off = addr - fsw_u64_le_swap (key->offset);
818 unsigned redundancy = 1;
819 unsigned i, j;
820
821 if (fsw_u64_le_swap (chunk->size) <= off)
822 {
823 return FSW_VOLUME_CORRUPTED;
824 //"couldn't find the chunk descriptor");
825 }
826
827 DPRINT(L"btrfs chunk 0x%lx+0xlx %d stripes (%d substripes) of %lx\n",
828 fsw_u64_le_swap (key->offset),
829 fsw_u64_le_swap (chunk->size),
830 fsw_u16_le_swap (chunk->nstripes),
831 fsw_u16_le_swap (chunk->nsubstripes),
832 fsw_u64_le_swap (chunk->stripe_length));
833
834 /* gnu-efi has no DivU64x64Remainder, limited to DivU64x32 */
835 switch (fsw_u64_le_swap (chunk->type)
836 & ~GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE)
837 {
838 case GRUB_BTRFS_CHUNK_TYPE_SINGLE:
839 {
840 uint64_t stripe_length;
841
842 stripe_length = DivU64x32 (fsw_u64_le_swap (chunk->size),
843 fsw_u16_le_swap (chunk->nstripes), NULL);
844
845 if(stripe_length > 1UL<<30)
846 return FSW_VOLUME_CORRUPTED;
847
848 stripen = DivU64x32 (off, (uint32_t)stripe_length, &stripe_offset);
849 csize = (stripen + 1) * stripe_length - off;
850 DPRINT(L"read_logical %d chunk_found single csize=%d\n", __LINE__, csize);
851 break;
852 }
853 case GRUB_BTRFS_CHUNK_TYPE_DUPLICATED:
854 case GRUB_BTRFS_CHUNK_TYPE_RAID1:
855 {
856 stripen = 0;
857 stripe_offset = off;
858 csize = fsw_u64_le_swap (chunk->size) - off;
859 redundancy = 2;
860 DPRINT(L"read_logical %d chunk_found dup/raid1 off=%lx csize=%d\n", __LINE__, stripe_offset, csize);
861 break;
862 }
863 case GRUB_BTRFS_CHUNK_TYPE_RAID0:
864 {
865 uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
866 uint64_t middle, high;
867 UINTREM low;
868
869 if(stripe_length > 1UL<<30)
870 return FSW_VOLUME_CORRUPTED;
871
872 middle = DivU64x32 (off, (uint32_t)stripe_length, &low);
873
874 high = DivU64x32 (middle, fsw_u16_le_swap (chunk->nstripes), &stripen);
875 stripe_offset =
876 low + fsw_u64_le_swap (chunk->stripe_length) * high;
877 csize = fsw_u64_le_swap (chunk->stripe_length) - low;
878 DPRINT(L"read_logical %d chunk_found raid0 csize=%d\n", __LINE__, csize);
879 break;
880 }
881 case GRUB_BTRFS_CHUNK_TYPE_RAID10:
882 {
883 uint64_t stripe_length = fsw_u64_le_swap (chunk->stripe_length);
884 uint64_t middle, high;
885 UINTREM low;
886
887 if(stripe_length > 1UL<<30)
888 return FSW_VOLUME_CORRUPTED;
889
890 middle = DivU64x32 (off, stripe_length, &low);
891
892 high = DivU64x32 (middle,
893 fsw_u16_le_swap (chunk->nstripes)
894 / fsw_u16_le_swap (chunk->nsubstripes),
895 &stripen);
896 stripen *= fsw_u16_le_swap (chunk->nsubstripes);
897 redundancy = fsw_u16_le_swap (chunk->nsubstripes);
898 stripe_offset = low + fsw_u64_le_swap (chunk->stripe_length)
899 * high;
900 csize = fsw_u64_le_swap (chunk->stripe_length) - low;
901 DPRINT(L"read_logical %d chunk_found raid01 csize=%d\n", __LINE__, csize);
902 break;
903 }
904 default:
905 DPRINT (L"btrfs: unsupported RAID\n");
906 return FSW_UNSUPPORTED;
907 }
908 if (csize == 0)
909 //"couldn't find the chunk descriptor");
910 return FSW_VOLUME_CORRUPTED;
911
912 if (csize > (uint64_t) size)
913 csize = size;
914
915 for (j = 0; j < 2; j++)
916 {
917 for (i = 0; i < redundancy; i++)
918 {
919 struct btrfs_chunk_stripe *stripe;
920 uint64_t paddr;
921 struct fsw_volume *dev;
922
923 stripe = (struct btrfs_chunk_stripe *) (chunk + 1);
924 /* Right now the redundancy handling is easy.
925 With RAID5-like it will be more difficult. */
926 stripe += stripen + i;
927
928 paddr = fsw_u64_le_swap (stripe->offset) + stripe_offset;
929
930 DPRINT (L"btrfs: chunk 0x%lx+0x%lx (%d stripes (%d substripes) of %lx) stripe %lx maps to 0x%lx\n",
931 fsw_u64_le_swap (key->offset),
932 fsw_u64_le_swap (chunk->size),
933 fsw_u16_le_swap (chunk->nstripes),
934 fsw_u16_le_swap (chunk->nsubstripes),
935 fsw_u64_le_swap (chunk->stripe_length),
936 stripen, stripe->offset);
937 DPRINT (L"btrfs: reading paddr 0x%lx for laddr 0x%lx\n", paddr, addr);
938
939 dev = find_device (vol, stripe->device_id, j);
940 if (!dev)
941 {
942 err = FSW_VOLUME_CORRUPTED;
943 continue;
944 }
945
946 uint32_t off = paddr & (vol->sectorsize - 1);
947 paddr >>= vol->sectorshift;
948 uint64_t n = 0;
949 while(n < csize) {
950 char *buffer;
951 err = fsw_block_get(dev, paddr, cache_level, (void **)&buffer);
952 if(err)
953 break;
954 int s = vol->sectorsize - off;
955 if(s > csize - n)
956 s = csize - n;
957 fsw_memcpy(buf+n, buffer+off, s);
958 fsw_block_release(dev, paddr, (void *)buffer);
959
960 n += s;
961 off = 0;
962 paddr++;
963 }
964 DPRINT (L"read logical: err %d csize %d got %d\n",
965 err, csize, n);
966 if(n>=csize)
967 break;
968 }
969 if (i != redundancy)
970 break;
971 }
972 if (err)
973 return err;
974 }
975 size -= csize;
976 buf = (uint8_t *) buf + csize;
977 addr += csize;
978 if (challoc && chunk)
979 FreePool (chunk);
980 }
981 return FSW_SUCCESS;
982 }
983
984 static fsw_status_t fsw_btrfs_get_default_root(struct fsw_btrfs_volume *vol, uint64_t root_dir_objectid);
985 static fsw_status_t fsw_btrfs_volume_mount(struct fsw_volume *volg) {
986 struct btrfs_superblock sblock;
987 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
988 struct fsw_btrfs_volume *master_out = NULL;
989 struct fsw_string s;
990 fsw_status_t err;
991 int i;
992
993 init_crc32c_table();
994 fsw_memzero((char *)vol+sizeof(*volg), sizeof(*vol)-sizeof(*volg));
995
996 err = btrfs_read_superblock (volg, &sblock);
997 if (err)
998 return err;
999
1000 btrfs_set_superblock_info(vol, &sblock);
1001
1002 if(vol->sectorshift == 0)
1003 return FSW_UNSUPPORTED;
1004
1005 if(vol->num_devices >= BTRFS_MAX_NUM_DEVICES)
1006 return FSW_UNSUPPORTED;
1007
1008 vol->is_master = master_uuid_add(vol, &master_out);
1009 /* already mounted via other device */
1010 if(vol->is_master == 0) {
1011 #define FAKE_LABEL "btrfs.multi.device"
1012 s.type = FSW_STRING_TYPE_UTF8;
1013 s.size = s.len = sizeof(FAKE_LABEL)-1;
1014 s.data = FAKE_LABEL;
1015 err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
1016 if (err)
1017 return err;
1018 btrfs_add_multi_device(master_out, volg, &sblock);
1019 /* create fake root */
1020 return fsw_dnode_create_root_with_tree(volg, 0, 0, &volg->root);
1021 }
1022
1023 fsw_set_blocksize(volg, vol->sectorsize, vol->sectorsize);
1024 vol->g.bcache_size = BTRFS_INITIAL_BCACHE_SIZE;
1025 vol->n_devices_allocated = vol->num_devices;
1026 vol->devices_attached = AllocatePool (sizeof (vol->devices_attached[0])
1027 * vol->n_devices_allocated);
1028 if (!vol->devices_attached)
1029 return FSW_OUT_OF_MEMORY;
1030
1031 vol->n_devices_attached = 1;
1032 vol->devices_attached[0].dev = volg;
1033 vol->devices_attached[0].id = sblock.this_device.device_id;
1034
1035 for (i = 0; i < 0x100; i++)
1036 if (sblock.label[i] == 0)
1037 break;
1038
1039 s.type = FSW_STRING_TYPE_UTF8;
1040 s.size = s.len = i;
1041 s.data = sblock.label;
1042 err = fsw_strdup_coerce(&volg->label, volg->host_string_type, &s);
1043 if (err) {
1044 FreePool (vol->devices_attached);
1045 vol->devices_attached = NULL;
1046 return err;
1047 }
1048
1049 err = fsw_btrfs_get_default_root(vol, sblock.root_dir_objectid);
1050 if (err) {
1051 DPRINT(L"root not found\n");
1052 FreePool (vol->devices_attached);
1053 vol->devices_attached = NULL;
1054 return err;
1055 }
1056
1057 return FSW_SUCCESS;
1058 }
1059
1060 static void fsw_btrfs_volume_free(struct fsw_volume *volg)
1061 {
1062 unsigned i;
1063 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1064
1065 if (vol==NULL)
1066 return;
1067
1068 if (vol->is_master)
1069 master_uuid_remove(vol);
1070
1071 /* The device 0 is closed one layer upper. */
1072 for (i = 1; i < vol->n_devices_attached; i++)
1073 fsw_unmount (vol->devices_attached[i].dev);
1074 if(vol->devices_attached)
1075 FreePool (vol->devices_attached);
1076 if(vol->extent)
1077 FreePool (vol->extent);
1078 }
1079
1080 static fsw_status_t fsw_btrfs_volume_stat(struct fsw_volume *volg, struct fsw_volume_stat *sb)
1081 {
1082 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1083 sb->total_bytes = vol->total_bytes;
1084 sb->free_bytes = vol->bytes_used;
1085 return FSW_SUCCESS;
1086 }
1087
1088 static fsw_status_t fsw_btrfs_read_inode (struct fsw_btrfs_volume *vol,
1089 struct btrfs_inode *inode, uint64_t num,
1090 uint64_t tree)
1091 {
1092 struct btrfs_key key_in, key_out;
1093 uint64_t elemaddr;
1094 fsw_size_t elemsize;
1095 fsw_status_t err;
1096
1097 key_in.object_id = num;
1098 key_in.type = GRUB_BTRFS_ITEM_TYPE_INODE_ITEM;
1099 key_in.offset = 0;
1100
1101 err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
1102 if (err)
1103 return err;
1104 if (num != key_out.object_id
1105 || key_out.type != GRUB_BTRFS_ITEM_TYPE_INODE_ITEM)
1106 return FSW_NOT_FOUND;
1107
1108 return fsw_btrfs_read_logical (vol, elemaddr, inode, sizeof (*inode), 0, 2);
1109 }
1110
1111 static fsw_status_t fsw_btrfs_dnode_fill(struct fsw_volume *volg, struct fsw_dnode *dnog)
1112 {
1113 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1114 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1115 fsw_status_t err;
1116 uint32_t mode;
1117
1118 /* slave device got empty root */
1119 if (!vol->is_master) {
1120 dno->g.size = 0;
1121 dno->g.type = FSW_DNODE_TYPE_DIR;
1122 return FSW_SUCCESS;
1123 }
1124
1125 if (dno->raw)
1126 return FSW_SUCCESS;
1127
1128 dno->raw = AllocatePool(sizeof(struct btrfs_inode));
1129 if(dno->raw == NULL)
1130 return FSW_OUT_OF_MEMORY;
1131
1132 err = fsw_btrfs_read_inode(vol, dno->raw, dno->g.dnode_id, dno->g.tree_id);
1133 if (err) {
1134 FreePool(dno->raw);
1135 dno->raw = NULL;
1136 return err;
1137 }
1138
1139 // get info from the inode
1140 dno->g.size = fsw_u64_le_swap(dno->raw->size);
1141 // TODO: check docs for 64-bit sized files
1142 mode = fsw_u32_le_swap(dno->raw->mode);
1143 if (S_ISREG(mode))
1144 dno->g.type = FSW_DNODE_TYPE_FILE;
1145 else if (S_ISDIR(mode))
1146 dno->g.type = FSW_DNODE_TYPE_DIR;
1147 else if (S_ISLNK(mode))
1148 dno->g.type = FSW_DNODE_TYPE_SYMLINK;
1149 else
1150 dno->g.type = FSW_DNODE_TYPE_SPECIAL;
1151
1152 return FSW_SUCCESS;
1153 }
1154
1155 static void fsw_btrfs_dnode_free(struct fsw_volume *volg, struct fsw_dnode *dnog)
1156 {
1157 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1158 if (dno->raw)
1159 FreePool(dno->raw);
1160 }
1161
1162 static fsw_status_t fsw_btrfs_dnode_stat(struct fsw_volume *volg, struct fsw_dnode *dnog, struct fsw_dnode_stat *sb)
1163 {
1164 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1165
1166 /* slave device got empty root */
1167 if(dno->raw == NULL) {
1168 sb->used_bytes = 0;
1169 sb->store_time_posix(sb, FSW_DNODE_STAT_CTIME, 0);
1170 sb->store_time_posix(sb, FSW_DNODE_STAT_ATIME, 0);
1171 sb->store_time_posix(sb, FSW_DNODE_STAT_MTIME, 0);
1172 return FSW_SUCCESS;
1173 }
1174 sb->used_bytes = fsw_u64_le_swap(dno->raw->nbytes);
1175 sb->store_time_posix(sb, FSW_DNODE_STAT_ATIME,
1176 fsw_u64_le_swap(dno->raw->atime.sec));
1177 sb->store_time_posix(sb, FSW_DNODE_STAT_CTIME,
1178 fsw_u64_le_swap(dno->raw->ctime.sec));
1179 sb->store_time_posix(sb, FSW_DNODE_STAT_MTIME,
1180 fsw_u64_le_swap(dno->raw->mtime.sec));
1181 sb->store_attr_posix(sb, fsw_u32_le_swap(dno->raw->mode));
1182
1183 return FSW_SUCCESS;
1184 }
1185
1186 static fsw_ssize_t grub_btrfs_lzo_decompress(char *ibuf, fsw_size_t isize, grub_off_t off,
1187 char *obuf, fsw_size_t osize)
1188 {
1189 uint32_t total_size, cblock_size;
1190 fsw_size_t ret = 0;
1191 unsigned char buf[GRUB_BTRFS_LZO_BLOCK_SIZE];
1192 char *ibuf0 = ibuf;
1193
1194 #define fsw_get_unaligned32(x) (*(uint32_t *)(x))
1195 total_size = fsw_u32_le_swap (fsw_get_unaligned32(ibuf));
1196 ibuf += sizeof (total_size);
1197
1198 if (isize < total_size)
1199 return -1;
1200
1201 /* Jump forward to first block with requested data. */
1202 while (off >= GRUB_BTRFS_LZO_BLOCK_SIZE)
1203 {
1204 /* Don't let following uint32_t cross the page boundary. */
1205 if (((ibuf - ibuf0) & 0xffc) == 0xffc)
1206 ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
1207
1208 cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
1209 ibuf += sizeof (cblock_size);
1210
1211 if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
1212 return -1;
1213
1214 off -= GRUB_BTRFS_LZO_BLOCK_SIZE;
1215 ibuf += cblock_size;
1216 }
1217
1218 while (osize > 0)
1219 {
1220 lzo_uint usize = GRUB_BTRFS_LZO_BLOCK_SIZE;
1221
1222 /* Don't let following uint32_t cross the page boundary. */
1223 if (((ibuf - ibuf0) & 0xffc) == 0xffc)
1224 ibuf = ((ibuf - ibuf0 + 3) & ~3) + ibuf0;
1225
1226 cblock_size = fsw_u32_le_swap (fsw_get_unaligned32 (ibuf));
1227 ibuf += sizeof (cblock_size);
1228
1229 if (cblock_size > GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE)
1230 return -1;
1231
1232 /* Block partially filled with requested data. */
1233 if (off > 0 || osize < GRUB_BTRFS_LZO_BLOCK_SIZE)
1234 {
1235 fsw_size_t to_copy = GRUB_BTRFS_LZO_BLOCK_SIZE - off;
1236
1237 if (to_copy > osize)
1238 to_copy = osize;
1239
1240 if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)buf, &usize, NULL) != 0)
1241 return -1;
1242
1243 if (to_copy > usize)
1244 to_copy = usize;
1245 fsw_memcpy(obuf, buf + off, to_copy);
1246
1247 osize -= to_copy;
1248 ret += to_copy;
1249 obuf += to_copy;
1250 ibuf += cblock_size;
1251 off = 0;
1252 continue;
1253 }
1254
1255 /* Decompress whole block directly to output buffer. */
1256 if (lzo1x_decompress_safe ((lzo_bytep)ibuf, cblock_size, (lzo_bytep)obuf, &usize, NULL) != 0)
1257 return -1;
1258
1259 osize -= usize;
1260 ret += usize;
1261 obuf += usize;
1262 ibuf += cblock_size;
1263 }
1264
1265 return ret;
1266 }
1267
1268 static fsw_status_t fsw_btrfs_get_extent(struct fsw_volume *volg, struct fsw_dnode *dnog,
1269 struct fsw_extent *extent)
1270 {
1271 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1272 uint64_t ino = dnog->dnode_id;
1273 uint64_t tree = dnog->tree_id;
1274 uint64_t pos0 = extent->log_start << vol->sectorshift;
1275 extent->type = FSW_EXTENT_TYPE_INVALID;
1276 extent->log_count = 1;
1277 uint64_t pos = pos0;
1278 fsw_size_t csize;
1279 fsw_status_t err;
1280 uint64_t extoff;
1281 char *buf = NULL;
1282 uint64_t count;
1283
1284 /* slave device got empty root */
1285 if (!vol->is_master)
1286 return FSW_NOT_FOUND;
1287
1288 if (!vol->extent || vol->extstart > pos || vol->extino != ino
1289 || vol->exttree != tree || vol->extend <= pos)
1290 {
1291 struct btrfs_key key_in, key_out;
1292 uint64_t elemaddr;
1293 fsw_size_t elemsize;
1294
1295 if(vol->extent) {
1296 FreePool (vol->extent);
1297 vol->extent = NULL;
1298 }
1299 key_in.object_id = ino;
1300 key_in.type = GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM;
1301 key_in.offset = fsw_u64_le_swap (pos);
1302 err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, NULL, 0);
1303 if (err)
1304 return FSW_VOLUME_CORRUPTED;
1305 if (key_out.object_id != ino
1306 || key_out.type != GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM)
1307 {
1308 return FSW_VOLUME_CORRUPTED;
1309 }
1310 if ((fsw_ssize_t) elemsize < ((char *) &vol->extent->inl
1311 - (char *) vol->extent))
1312 {
1313 return FSW_VOLUME_CORRUPTED;
1314 }
1315 vol->extstart = fsw_u64_le_swap (key_out.offset);
1316 vol->extsize = elemsize;
1317 vol->extent = AllocatePool (elemsize);
1318 vol->extino = ino;
1319 vol->exttree = tree;
1320 if (!vol->extent)
1321 return FSW_OUT_OF_MEMORY;
1322
1323 err = fsw_btrfs_read_logical (vol, elemaddr, vol->extent, elemsize, 0, 1);
1324 if (err)
1325 return err;
1326
1327 vol->extend = vol->extstart + fsw_u64_le_swap (vol->extent->size);
1328 if (vol->extent->type == GRUB_BTRFS_EXTENT_REGULAR
1329 && (char *) &vol->extent + elemsize
1330 >= (char *) &vol->extent->filled + sizeof (vol->extent->filled))
1331 vol->extend =
1332 vol->extstart + fsw_u64_le_swap (vol->extent->filled);
1333
1334 DPRINT (L"btrfs: %lx +0x%lx\n", fsw_u64_le_swap (key_out.offset), fsw_u64_le_swap (vol->extent->size));
1335 if (vol->extend <= pos)
1336 {
1337 return FSW_VOLUME_CORRUPTED;
1338 }
1339 }
1340
1341 csize = vol->extend - pos;
1342 extoff = pos - vol->extstart;
1343
1344 if (vol->extent->encryption ||vol->extent->encoding)
1345 {
1346 return FSW_UNSUPPORTED;
1347 }
1348
1349 switch(vol->extent->compression) {
1350 case GRUB_BTRFS_COMPRESSION_LZO:
1351 case GRUB_BTRFS_COMPRESSION_ZLIB:
1352 case GRUB_BTRFS_COMPRESSION_NONE:
1353 break;
1354 default:
1355 return FSW_UNSUPPORTED;
1356 }
1357
1358 count = ( csize + vol->sectorsize - 1) >> vol->sectorshift;
1359 switch (vol->extent->type)
1360 {
1361 case GRUB_BTRFS_EXTENT_INLINE:
1362 buf = AllocatePool( count << vol->sectorshift);
1363 if(!buf)
1364 return FSW_OUT_OF_MEMORY;
1365 if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
1366 {
1367 if (grub_zlib_decompress (vol->extent->inl, vol->extsize -
1368 ((uint8_t *) vol->extent->inl
1369 - (uint8_t *) vol->extent),
1370 extoff, buf, csize)
1371 != (fsw_ssize_t) csize)
1372 {
1373 FreePool(buf);
1374 return FSW_VOLUME_CORRUPTED;
1375 }
1376 }
1377 else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
1378 {
1379 if (grub_btrfs_lzo_decompress(vol->extent->inl, vol->extsize -
1380 ((uint8_t *) vol->extent->inl
1381 - (uint8_t *) vol->extent),
1382 extoff, buf, csize)
1383 != (fsw_ssize_t) csize)
1384 {
1385 FreePool(buf);
1386 return -FSW_VOLUME_CORRUPTED;
1387 }
1388 }
1389 else
1390 fsw_memcpy (buf, vol->extent->inl + extoff, csize);
1391 break;
1392
1393 case GRUB_BTRFS_EXTENT_REGULAR:
1394 if (!vol->extent->laddr)
1395 break;
1396
1397 if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_NONE)
1398 {
1399 if( count > 64 ) {
1400 count = 64;
1401 csize = count << vol->sectorshift;
1402 }
1403 buf = AllocatePool( count << vol->sectorshift);
1404 if(!buf)
1405 return FSW_OUT_OF_MEMORY;
1406 err = fsw_btrfs_read_logical (vol,
1407 fsw_u64_le_swap (vol->extent->laddr)
1408 + fsw_u64_le_swap (vol->extent->offset)
1409 + extoff, buf, csize, 0, 0);
1410 if (err) {
1411 FreePool(buf);
1412 return err;
1413 }
1414 break;
1415 }
1416 if (vol->extent->compression != GRUB_BTRFS_COMPRESSION_NONE)
1417 {
1418 char *tmp;
1419 uint64_t zsize;
1420 fsw_ssize_t ret;
1421
1422 zsize = fsw_u64_le_swap (vol->extent->compressed_size);
1423 tmp = AllocatePool (zsize);
1424 if (!tmp)
1425 return -FSW_OUT_OF_MEMORY;
1426 err = fsw_btrfs_read_logical (vol, fsw_u64_le_swap (vol->extent->laddr), tmp, zsize, 0, 0);
1427 if (err)
1428 {
1429 FreePool (tmp);
1430 return -FSW_VOLUME_CORRUPTED;
1431 }
1432
1433 buf = AllocatePool( count << vol->sectorshift);
1434 if(!buf) {
1435 FreePool(tmp);
1436 return FSW_OUT_OF_MEMORY;
1437 }
1438
1439 if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_ZLIB)
1440 {
1441 ret = grub_zlib_decompress (tmp, zsize, extoff
1442 + fsw_u64_le_swap (vol->extent->offset),
1443 buf, csize);
1444 }
1445 else if (vol->extent->compression == GRUB_BTRFS_COMPRESSION_LZO)
1446 ret = grub_btrfs_lzo_decompress (tmp, zsize, extoff
1447 + fsw_u64_le_swap (vol->extent->offset),
1448 buf, csize);
1449 else
1450 ret = -1;
1451
1452 FreePool (tmp);
1453
1454 if (ret != (fsw_ssize_t) csize) {
1455 FreePool(tmp);
1456 return -FSW_VOLUME_CORRUPTED;
1457 }
1458
1459 break;
1460 }
1461 break;
1462 default:
1463 return -FSW_VOLUME_CORRUPTED;
1464 }
1465
1466 extent->log_count = count;
1467 if(buf) {
1468 if(csize < (count << vol->sectorshift))
1469 fsw_memzero( buf + csize, (count << vol->sectorshift) - csize);
1470 extent->buffer = buf;
1471 extent->type = FSW_EXTENT_TYPE_BUFFER;
1472 } else {
1473 extent->buffer = NULL;
1474 extent->type = FSW_EXTENT_TYPE_SPARSE;
1475 }
1476 return FSW_SUCCESS;
1477 }
1478
1479 static fsw_status_t fsw_btrfs_readlink(struct fsw_volume *volg, struct fsw_dnode *dnog,
1480 struct fsw_string *link_target)
1481 {
1482 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1483 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1484 int i;
1485 fsw_status_t status;
1486 struct fsw_string s;
1487 char *tmp;
1488
1489 if (dno->g.size > FSW_PATH_MAX)
1490 return FSW_VOLUME_CORRUPTED;
1491
1492 tmp = AllocatePool(dno->g.size);
1493 if(!tmp)
1494 return FSW_OUT_OF_MEMORY;
1495
1496 i = 0;
1497 do {
1498 struct fsw_extent extent;
1499 int size;
1500 extent.log_start = i;
1501 status = fsw_btrfs_get_extent(volg, dnog, &extent);
1502 if(status || extent.type != FSW_EXTENT_TYPE_BUFFER) {
1503 FreePool(tmp);
1504 if(extent.buffer)
1505 FreePool(extent.buffer);
1506 return FSW_VOLUME_CORRUPTED;
1507 }
1508 size = extent.log_count << vol->sectorshift;
1509 if(size > (dno->g.size - (i<<vol->sectorshift)))
1510 size = dno->g.size - (i<<vol->sectorshift);
1511 fsw_memcpy(tmp + (i<<vol->sectorshift), extent.buffer, size);
1512 FreePool(extent.buffer);
1513 i += extent.log_count;
1514 } while( (i << vol->sectorshift) < dno->g.size);
1515
1516 s.type = FSW_STRING_TYPE_UTF8;
1517 s.size = s.len = (int)dno->g.size;
1518 s.data = tmp;
1519 status = fsw_strdup_coerce(link_target, volg->host_string_type, &s);
1520 FreePool(tmp);
1521
1522 return FSW_SUCCESS;
1523 }
1524
1525 static fsw_status_t fsw_btrfs_lookup_dir_item(struct fsw_btrfs_volume *vol,
1526 uint64_t tree_id, uint64_t object_id,
1527 struct fsw_string *lookup_name,
1528 struct btrfs_dir_item **direl_buf,
1529 struct btrfs_dir_item **direl_out
1530 )
1531 {
1532 uint64_t elemaddr;
1533 fsw_size_t elemsize;
1534 fsw_size_t allocated = 0;
1535 struct btrfs_key key;
1536 struct btrfs_key key_out;
1537 struct btrfs_dir_item *cdirel;
1538 fsw_status_t err;
1539
1540 *direl_buf = NULL;
1541
1542 key.object_id = object_id;
1543 key.type = GRUB_BTRFS_ITEM_TYPE_DIR_ITEM;
1544 key.offset = fsw_u64_le_swap (~grub_getcrc32c (1, lookup_name->data, lookup_name->size));
1545
1546 err = lower_bound (vol, &key, &key_out, tree_id, &elemaddr, &elemsize, NULL, 0);
1547 if (err)
1548 return err;
1549
1550 if (key_cmp (&key, &key_out) != 0)
1551 return FSW_NOT_FOUND;
1552
1553 if (elemsize > allocated)
1554 {
1555 allocated = 2 * elemsize;
1556 if(*direl_buf)
1557 FreePool (*direl_buf);
1558 *direl_buf = AllocatePool (allocated + 1);
1559 if (!*direl_buf)
1560 return FSW_OUT_OF_MEMORY;
1561 }
1562
1563 err = fsw_btrfs_read_logical (vol, elemaddr, *direl_buf, elemsize, 0, 1);
1564 if (err)
1565 return err;
1566
1567 for (cdirel = *direl_buf;
1568 (uint8_t *) cdirel - (uint8_t *) *direl_buf < (fsw_ssize_t) elemsize;
1569 cdirel = (void *) ((uint8_t *) (*direl_buf + 1)
1570 + fsw_u16_le_swap (cdirel->n)
1571 + fsw_u16_le_swap (cdirel->m)))
1572 {
1573 if (lookup_name->size == fsw_u16_le_swap (cdirel->n)
1574 && fsw_memeq (cdirel->name, lookup_name->data, lookup_name->size))
1575 break;
1576 }
1577 if ((uint8_t *) cdirel - (uint8_t *) *direl_buf >= (fsw_ssize_t) elemsize)
1578 return FSW_NOT_FOUND;
1579
1580 *direl_out = cdirel;
1581 return FSW_SUCCESS;
1582 }
1583
1584 static fsw_status_t fsw_btrfs_get_root_tree(
1585 struct fsw_btrfs_volume *vol,
1586 struct btrfs_key *key_in,
1587 uint64_t *tree_out)
1588 {
1589 fsw_status_t err;
1590 struct btrfs_root_item ri;
1591 struct btrfs_key key_out;
1592 uint64_t elemaddr;
1593 fsw_size_t elemsize;
1594
1595 err = lower_bound (vol, key_in, &key_out, vol->root_tree, &elemaddr, &elemsize, NULL, 0);
1596 if (err)
1597 return err;
1598
1599 if (key_in->object_id != key_out.object_id || key_in->type != key_out.type)
1600 return FSW_NOT_FOUND;
1601
1602 err = fsw_btrfs_read_logical (vol, elemaddr, &ri, sizeof (ri), 0, 1);
1603 if (err)
1604 return err;
1605
1606 *tree_out = ri.tree;
1607 return FSW_SUCCESS;
1608 }
1609
1610 static fsw_status_t fsw_btrfs_get_sub_dnode(
1611 struct fsw_btrfs_volume *vol,
1612 struct fsw_btrfs_dnode *dno,
1613 struct btrfs_dir_item *cdirel,
1614 struct fsw_string *name,
1615 struct fsw_dnode **child_dno_out)
1616 {
1617 fsw_status_t err;
1618 int child_type;
1619 uint64_t tree_id = dno->g.tree_id;
1620 uint64_t child_id;
1621
1622 switch (cdirel->key.type)
1623 {
1624 case GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM:
1625 err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &tree_id);
1626 if (err)
1627 return err;
1628
1629 child_type = GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY;
1630 child_id = fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK);
1631 break;
1632 case GRUB_BTRFS_ITEM_TYPE_INODE_ITEM:
1633 child_type = cdirel->type;
1634 child_id = cdirel->key.object_id;
1635 break;
1636
1637 default:
1638 DPRINT (L"btrfs: unrecognised object type 0x%x", cdirel->key.type);
1639 return FSW_VOLUME_CORRUPTED;
1640 }
1641
1642 switch(child_type) {
1643 case GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR:
1644 child_type = FSW_DNODE_TYPE_FILE;
1645 break;
1646 case GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY:
1647 child_type = FSW_DNODE_TYPE_DIR;
1648 break;
1649 case GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK:
1650 child_type = FSW_DNODE_TYPE_SYMLINK;
1651 break;
1652 default:
1653 child_type = FSW_DNODE_TYPE_SPECIAL;
1654 break;
1655 }
1656 return fsw_dnode_create_with_tree(&dno->g, tree_id, child_id, child_type, name, child_dno_out);
1657 }
1658
1659 static fsw_status_t fsw_btrfs_dir_lookup(struct fsw_volume *volg, struct fsw_dnode *dnog,
1660 struct fsw_string *lookup_name, struct fsw_dnode **child_dno_out)
1661 {
1662 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1663 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1664 fsw_status_t err;
1665 struct fsw_string s;
1666
1667 *child_dno_out = NULL;
1668
1669 /* slave device got empty root */
1670 if (!vol->is_master)
1671 return FSW_NOT_FOUND;
1672
1673 err = fsw_strdup_coerce(&s, FSW_STRING_TYPE_UTF8, lookup_name);
1674 if(err)
1675 return err;
1676
1677 /* treat '...' under root as top root */
1678 if(dnog == volg->root && s.size == 3 && ((char *)s.data)[0]=='.' && ((char *)s.data)[1]=='.' && ((char *)s.data)[2]=='.')
1679 {
1680 fsw_strfree (&s);
1681 if(dnog->tree_id == vol->top_tree) {
1682 fsw_dnode_retain(dnog);
1683 *child_dno_out = dnog;
1684 return FSW_SUCCESS;
1685 }
1686 return fsw_dnode_create_with_tree(dnog,
1687 vol->top_tree, fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK),
1688 FSW_DNODE_TYPE_DIR, lookup_name, child_dno_out);
1689 }
1690 struct btrfs_dir_item *direl=NULL, *cdirel;
1691 err = fsw_btrfs_lookup_dir_item(vol, dnog->tree_id, dnog->dnode_id, &s, &direl, &cdirel);
1692 if(!err)
1693 err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, lookup_name, child_dno_out);
1694 if(direl)
1695 FreePool (direl);
1696 fsw_strfree (&s);
1697 return err;
1698 }
1699
1700 static fsw_status_t fsw_btrfs_get_default_root(struct fsw_btrfs_volume *vol, uint64_t root_dir_objectid)
1701 {
1702 fsw_status_t err;
1703 struct fsw_string s;
1704 struct btrfs_dir_item *direl=NULL, *cdirel;
1705 uint64_t default_tree_id = 0;
1706 struct btrfs_key top_root_key;
1707
1708 /* Get to top tree id */
1709 top_root_key.object_id = fsw_u64_le_swap(5UL);
1710 top_root_key.type = GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM;
1711 top_root_key.offset = -1LL;
1712 err = fsw_btrfs_get_root_tree (vol, &top_root_key, &vol->top_tree);
1713 if (err)
1714 return err;
1715
1716 s.type = FSW_STRING_TYPE_UTF8;
1717 s.data = "default";
1718 s.size = 7;
1719 err = fsw_btrfs_lookup_dir_item(vol, vol->root_tree, root_dir_objectid, &s, &direl, &cdirel);
1720
1721 /* if "default" is failed or invalid, use top tree */
1722 if (err || /* failed */
1723 cdirel->type != GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY || /* not dir */
1724 cdirel->key.type != GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM || /* not tree */
1725 cdirel->key.object_id == fsw_u64_le_swap(5UL) || /* same as top */
1726 (err = fsw_btrfs_get_root_tree (vol, &cdirel->key, &default_tree_id)))
1727 default_tree_id = vol->top_tree;
1728
1729 if (!err)
1730 err = fsw_dnode_create_root_with_tree(&vol->g, default_tree_id,
1731 fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK), &vol->g.root);
1732 if (direl)
1733 FreePool (direl);
1734 return err;
1735 }
1736
1737 static fsw_status_t fsw_btrfs_dir_read(struct fsw_volume *volg, struct fsw_dnode *dnog,
1738 struct fsw_shandle *shand, struct fsw_dnode **child_dno_out)
1739 {
1740 struct fsw_btrfs_volume *vol = (struct fsw_btrfs_volume *)volg;
1741 struct fsw_btrfs_dnode *dno = (struct fsw_btrfs_dnode *)dnog;
1742 fsw_status_t err;
1743
1744 struct btrfs_key key_in, key_out;
1745 uint64_t elemaddr;
1746 fsw_size_t elemsize;
1747 fsw_size_t allocated = 0;
1748 struct btrfs_dir_item *direl = NULL;
1749 struct fsw_btrfs_leaf_descriptor desc;
1750 int r = 0;
1751 uint64_t tree = dnog->tree_id;
1752
1753 /* slave device got empty root */
1754 if (!vol->is_master)
1755 return FSW_NOT_FOUND;
1756
1757 key_in.object_id = dnog->dnode_id;
1758 key_in.type = GRUB_BTRFS_ITEM_TYPE_DIR_ITEM;
1759 key_in.offset = shand->pos;
1760
1761 if((int64_t)key_in.offset == -1LL)
1762 {
1763 return FSW_NOT_FOUND;
1764 }
1765
1766 err = lower_bound (vol, &key_in, &key_out, tree, &elemaddr, &elemsize, &desc, 0);
1767 if (err) {
1768 return err;
1769 }
1770
1771 DPRINT(L"key_in %lx:%x:%lx out %lx:%x:%lx elem %lx+%lx\n",
1772 key_in.object_id, key_in.type, key_in.offset,
1773 key_out.object_id, key_out.type, key_out.offset,
1774 elemaddr, elemsize);
1775 if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
1776 key_out.object_id != key_in.object_id)
1777 {
1778 r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
1779 if (r <= 0)
1780 goto out;
1781 DPRINT(L"next out %lx:%x:%lx\n",
1782 key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
1783 }
1784 if (key_out.type == GRUB_BTRFS_ITEM_TYPE_DIR_ITEM &&
1785 key_out.object_id == key_in.object_id &&
1786 fsw_u64_le_swap(key_out.offset) <= fsw_u64_le_swap(key_in.offset))
1787 {
1788 r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
1789 if (r <= 0)
1790 goto out;
1791 DPRINT(L"next out %lx:%x:%lx\n",
1792 key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
1793 }
1794
1795 do
1796 {
1797 struct btrfs_dir_item *cdirel;
1798 if (key_out.type != GRUB_BTRFS_ITEM_TYPE_DIR_ITEM ||
1799 key_out.object_id != key_in.object_id)
1800 {
1801 r = 0;
1802 break;
1803 }
1804 if (elemsize > allocated)
1805 {
1806 allocated = 2 * elemsize;
1807 if(direl)
1808 FreePool (direl);
1809 direl = AllocatePool (allocated + 1);
1810 if (!direl)
1811 {
1812 r = -FSW_OUT_OF_MEMORY;
1813 break;
1814 }
1815 }
1816
1817 err = fsw_btrfs_read_logical (vol, elemaddr, direl, elemsize, 0, 1);
1818 if (err)
1819 {
1820 r = -err;
1821 break;
1822 }
1823
1824 for (cdirel = direl;
1825 (uint8_t *) cdirel - (uint8_t *) direl
1826 < (fsw_ssize_t) elemsize;
1827 cdirel = (void *) ((uint8_t *) (direl + 1)
1828 + fsw_u16_le_swap (cdirel->n)
1829 + fsw_u16_le_swap (cdirel->m)))
1830 {
1831 struct fsw_string s;
1832 s.type = FSW_STRING_TYPE_UTF8;
1833 s.size = s.len = fsw_u16_le_swap (cdirel->n);
1834 s.data = cdirel->name;
1835 DPRINT(L"item key %lx:%x%lx, type %lx, namelen=%lx\n",
1836 cdirel->key.object_id, cdirel->key.type, cdirel->key.offset, cdirel->type, s.size);
1837 if(!err) {
1838 err = fsw_btrfs_get_sub_dnode(vol, dno, cdirel, &s, child_dno_out);
1839 if(direl)
1840 FreePool (direl);
1841 free_iterator (&desc);
1842 shand->pos = key_out.offset;
1843 return FSW_SUCCESS;
1844 }
1845 }
1846 r = next (vol, &desc, &elemaddr, &elemsize, &key_out);
1847 DPRINT(L"next2 out %lx:%x:%lx\n",
1848 key_out.object_id, key_out.type, key_out.offset, elemaddr, elemsize);
1849 }
1850 while (r > 0);
1851
1852 out:
1853 if(direl)
1854 FreePool (direl);
1855 free_iterator (&desc);
1856
1857 r = r < 0 ? -r : FSW_NOT_FOUND;
1858 return r;
1859 }
1860
1861 //
1862 // Dispatch Table
1863 //
1864
1865 struct fsw_fstype_table FSW_FSTYPE_TABLE_NAME(btrfs) = {
1866 { FSW_STRING_TYPE_UTF8, 4, 4, "btrfs" },
1867 sizeof(struct fsw_btrfs_volume),
1868 sizeof(struct fsw_btrfs_dnode),
1869
1870 fsw_btrfs_volume_mount,
1871 fsw_btrfs_volume_free,
1872 fsw_btrfs_volume_stat,
1873 fsw_btrfs_dnode_fill,
1874 fsw_btrfs_dnode_free,
1875 fsw_btrfs_dnode_stat,
1876 fsw_btrfs_get_extent,
1877 fsw_btrfs_dir_lookup,
1878 fsw_btrfs_dir_read,
1879 fsw_btrfs_readlink,
1880 };
1881