5 * Copyright (c) 2013 Tencent, Inc.
7 * This driver base on grub 2.0 btrfs implementation.
10 /* btrfs.c - B-tree file system. */
12 * GRUB -- GRand Unified Bootloader
13 * Copyright (C) 2010 Free Software Foundation, Inc.
15 * GRUB is free software: you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation, either version 3 of the License, or
18 * (at your option) any later version.
20 * GRUB is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
29 //#define DPRINT(x...) Print(x)
32 #define uint8_t fsw_u8
33 #define uint16_t fsw_u16
34 #define uint32_t fsw_u32
35 #define uint64_t fsw_u64
36 #define int64_t fsw_s64
37 #define int32_t fsw_s32
40 #define DPRINT(x...) /* */
43 /* no single io/element size over 2G */
44 #define fsw_size_t int
45 #define fsw_ssize_t int
46 /* never zip over 2G, 32bit is enough */
47 #define grub_off_t int32_t
48 #define grub_size_t int32_t
49 #define grub_ssize_t int32_t
52 #define MINILZO_CFG_SKIP_LZO_PTR 1
53 #define MINILZO_CFG_SKIP_LZO_UTIL 1
54 #define MINILZO_CFG_SKIP_LZO_STRING 1
55 #define MINILZO_CFG_SKIP_LZO_INIT 1
56 #define MINILZO_CFG_SKIP_LZO1X_DECOMPRESS 1
57 #define MINILZO_CFG_SKIP_LZO1X_1_COMPRESS 1
58 #define MINILZO_CFG_SKIP_LZO_STRING 1
62 #define BTRFS_DEFAULT_BLOCK_SIZE 4096
63 //#define BTRFS_DEFAULT_BLOCK_SIZE 8192
64 #define BTRFS_INITIAL_BCACHE_SIZE 1024
65 #define GRUB_BTRFS_SIGNATURE "_BHRfS_M"
67 /* From http://www.oberhumer.com/opensource/lzo/lzofaq.php
68 * LZO will expand incompressible data by a little amount. I still haven't
69 * computed the exact values, but I suggest using these formulas for
70 * a worst-case expansion calculation:
72 * output_block_size = input_block_size + (input_block_size / 16) + 64 + 3
74 #define GRUB_BTRFS_LZO_BLOCK_SIZE 4096
75 #define GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE (GRUB_BTRFS_LZO_BLOCK_SIZE + \
76 (GRUB_BTRFS_LZO_BLOCK_SIZE / 16) + 64 + 3)
79 * on disk struct has prefix 'btrfs_', little endian
80 * on memory struct has prefix 'fsw_btrfs_'
82 typedef uint8_t btrfs_checksum_t
[0x20];
83 typedef uint32_t btrfs_uuid_t
[4];
89 uint8_t dummy
[0x62 - 0x10];
90 } __attribute__ ((__packed__
));
92 struct btrfs_superblock
94 btrfs_checksum_t checksum
;
97 uint8_t signature
[sizeof (GRUB_BTRFS_SIGNATURE
) - 1];
101 uint8_t dummy2
[0x10];
102 uint64_t total_bytes
;
104 uint64_t root_dir_objectid
;
105 #define BTRFS_MAX_NUM_DEVICES 0x10000
106 uint64_t num_devices
;
110 uint8_t dummy3
[0x31];
111 struct btrfs_device this_device
;
113 uint8_t dummy4
[0x100];
114 uint8_t bootstrap_mapping
[0x800];
115 } __attribute__ ((__packed__
));
119 btrfs_checksum_t checksum
;
124 } __attribute__ ((__packed__
));
126 struct fsw_btrfs_device_desc
128 struct fsw_volume
* dev
;
132 struct fsw_btrfs_volume
134 struct fsw_volume g
; //!< Generic volume structure
136 /* superblock shadows */
137 uint8_t bootstrap_mapping
[0x800];
139 uint64_t total_bytes
;
143 uint64_t top_tree
; /* top volume tree */
144 unsigned num_devices
;
145 unsigned sectorshift
;
149 struct fsw_btrfs_device_desc
*devices_attached
;
150 unsigned n_devices_attached
;
151 unsigned n_devices_allocated
;
153 /* Cached extent data. */
159 struct btrfs_extent_data
*extent
;
164 GRUB_BTRFS_ITEM_TYPE_INODE_ITEM
= 0x01,
165 GRUB_BTRFS_ITEM_TYPE_INODE_REF
= 0x0c,
166 GRUB_BTRFS_ITEM_TYPE_DIR_ITEM
= 0x54,
167 GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM
= 0x6c,
168 GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM
= 0x84,
169 GRUB_BTRFS_ITEM_TYPE_DEVICE
= 0xd8,
170 GRUB_BTRFS_ITEM_TYPE_CHUNK
= 0xe4
178 } __attribute__ ((__packed__
));
180 struct btrfs_chunk_item
184 uint64_t stripe_length
;
186 #define GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE 0x07
187 #define GRUB_BTRFS_CHUNK_TYPE_SINGLE 0x00
188 #define GRUB_BTRFS_CHUNK_TYPE_RAID0 0x08
189 #define GRUB_BTRFS_CHUNK_TYPE_RAID1 0x10
190 #define GRUB_BTRFS_CHUNK_TYPE_DUPLICATED 0x20
191 #define GRUB_BTRFS_CHUNK_TYPE_RAID10 0x40
194 uint16_t nsubstripes
;
195 } __attribute__ ((__packed__
));
197 struct btrfs_chunk_stripe
201 btrfs_uuid_t device_uuid
;
202 } __attribute__ ((__packed__
));
204 struct btrfs_leaf_node
206 struct btrfs_key key
;
209 } __attribute__ ((__packed__
));
211 struct btrfs_internal_node
213 struct btrfs_key key
;
216 } __attribute__ ((__packed__
));
218 struct btrfs_dir_item
220 struct btrfs_key key
;
224 #define GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR 1
225 #define GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY 2
226 #define GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK 7
229 } __attribute__ ((__packed__
));
231 struct fsw_btrfs_leaf_descriptor
244 struct btrfs_root_item
249 } __attribute__ ((__packed__
));
255 } __attribute__ ((__packed__
));
263 uint64_t block_group
;
273 uint64_t reserved
[4];
274 struct btrfs_time atime
;
275 struct btrfs_time ctime
;
276 struct btrfs_time mtime
;
277 struct btrfs_time otime
;
278 } __attribute__ ((__packed__
));
280 struct fsw_btrfs_dnode
{
281 struct fsw_dnode g
; //!< Generic dnode structure
282 struct btrfs_inode
*raw
; //!< Full raw inode structure
285 struct btrfs_extent_data
299 uint64_t compressed_size
;
304 } __attribute__ ((__packed__
));
306 #define GRUB_BTRFS_EXTENT_INLINE 0
307 #define GRUB_BTRFS_EXTENT_REGULAR 1
309 #define GRUB_BTRFS_COMPRESSION_NONE 0
310 #define GRUB_BTRFS_COMPRESSION_ZLIB 1
311 #define GRUB_BTRFS_COMPRESSION_LZO 2
313 #define GRUB_BTRFS_OBJECT_ID_CHUNK 0x100
315 struct fsw_btrfs_uuid_list
{
316 struct fsw_btrfs_volume
*master
;
317 struct fsw_btrfs_uuid_list
*next
;
320 static int uuid_eq(btrfs_uuid_t u1
, btrfs_uuid_t u2
) {
321 return u1
[0]==u2
[0] && u1
[1]==u2
[1] && u1
[2]==u2
[2] && u1
[3]==u2
[3];
324 static struct fsw_btrfs_uuid_list
*master_uuid_list
= NULL
;
326 static int master_uuid_add(struct fsw_btrfs_volume
*vol
, struct fsw_btrfs_volume
**master_out
) {
327 struct fsw_btrfs_uuid_list
*l
;
329 for (l
= master_uuid_list
; l
; l
=l
->next
)
330 if(uuid_eq(l
->master
->uuid
, vol
->uuid
)) {
332 *master_out
= l
->master
;
336 l
= AllocatePool(sizeof(struct fsw_btrfs_uuid_list
));
338 l
->next
= master_uuid_list
;
339 master_uuid_list
= l
;
343 static void master_uuid_remove(struct fsw_btrfs_volume
*vol
) {
344 struct fsw_btrfs_uuid_list
**lp
;
346 for (lp
= &master_uuid_list
; *lp
; lp
=&(*lp
)->next
)
347 if((*lp
)->master
== vol
) {
348 struct fsw_btrfs_uuid_list
*n
= *lp
;
355 static fsw_status_t
btrfs_set_superblock_info(struct fsw_btrfs_volume
*vol
, struct btrfs_superblock
*sb
)
358 vol
->uuid
[0] = sb
->uuid
[0];
359 vol
->uuid
[1] = sb
->uuid
[1];
360 vol
->uuid
[2] = sb
->uuid
[2];
361 vol
->uuid
[3] = sb
->uuid
[3];
362 vol
->chunk_tree
= sb
->chunk_tree
;
363 vol
->root_tree
= sb
->root_tree
;
364 vol
->total_bytes
= fsw_u64_le_swap(sb
->total_bytes
);
365 vol
->bytes_used
= fsw_u64_le_swap(sb
->bytes_used
);
367 vol
->sectorshift
= 0;
368 vol
->sectorsize
= fsw_u32_le_swap(sb
->sectorsize
);
369 for(i
=9; i
<20; i
++) {
370 if((1UL<<i
) == vol
->sectorsize
) {
371 vol
->sectorshift
= i
;
375 if(fsw_u64_le_swap(sb
->num_devices
) > BTRFS_MAX_NUM_DEVICES
)
376 vol
->num_devices
= BTRFS_MAX_NUM_DEVICES
;
378 vol
->num_devices
= fsw_u64_le_swap(sb
->num_devices
);
379 fsw_memcpy(vol
->bootstrap_mapping
, sb
->bootstrap_mapping
, sizeof(vol
->bootstrap_mapping
));
383 static uint64_t superblock_pos
[4] = {
387 1048576ULL * 1048576ULL / 4
390 static fsw_status_t
fsw_btrfs_read_logical(struct fsw_btrfs_volume
*vol
,
391 uint64_t addr
, void *buf
, fsw_size_t size
, int rdepth
, int cache_level
);
393 static fsw_status_t
btrfs_read_superblock (struct fsw_volume
*vol
, struct btrfs_superblock
*sb_out
)
396 uint64_t total_blocks
= 1024;
397 fsw_status_t err
= FSW_SUCCESS
;
399 fsw_set_blocksize(vol
, BTRFS_DEFAULT_BLOCK_SIZE
, BTRFS_DEFAULT_BLOCK_SIZE
);
400 for (i
= 0; i
< 4; i
++)
403 struct btrfs_superblock
*sb
;
405 /* Don't try additional superblocks beyond device size. */
406 if (total_blocks
<= superblock_pos
[i
])
409 err
= fsw_block_get(vol
, superblock_pos
[i
], 0, (void **)&buffer
);
410 if (err
== FSW_UNSUPPORTED
) {
411 fsw_block_release(vol
, superblock_pos
[i
], buffer
);
415 sb
= (struct btrfs_superblock
*)buffer
;
416 if (!fsw_memeq (sb
->signature
, GRUB_BTRFS_SIGNATURE
,
417 sizeof (GRUB_BTRFS_SIGNATURE
) - 1))
419 fsw_block_release(vol
, superblock_pos
[i
], buffer
);
422 if (i
== 0 || fsw_u64_le_swap (sb
->generation
) > fsw_u64_le_swap (sb_out
->generation
))
424 fsw_memcpy (sb_out
, sb
, sizeof (*sb
));
425 total_blocks
= fsw_u64_le_swap (sb
->this_device
.size
) >> 12;
427 fsw_block_release(vol
, superblock_pos
[i
], buffer
);
430 if ((err
== FSW_UNSUPPORTED
|| !err
) && i
== 0)
431 return FSW_UNSUPPORTED
;
433 if (err
== FSW_UNSUPPORTED
)
437 DPRINT(L
"btrfs: UUID: %08x-%08x-%08x-%08x device id: %d\n",
438 sb_out
->uuid
[0], sb_out
->uuid
[1], sb_out
->uuid
[2], sb_out
->uuid
[3],
439 sb_out
->this_device
.device_id
);
443 static int key_cmp (const struct btrfs_key
*a
, const struct btrfs_key
*b
)
445 if (fsw_u64_le_swap (a
->object_id
) < fsw_u64_le_swap (b
->object_id
))
447 if (fsw_u64_le_swap (a
->object_id
) > fsw_u64_le_swap (b
->object_id
))
450 if (a
->type
< b
->type
)
452 if (a
->type
> b
->type
)
455 if (fsw_u64_le_swap (a
->offset
) < fsw_u64_le_swap (b
->offset
))
457 if (fsw_u64_le_swap (a
->offset
) > fsw_u64_le_swap (b
->offset
))
462 static void free_iterator (struct fsw_btrfs_leaf_descriptor
*desc
)
464 fsw_free (desc
->data
);
467 static fsw_status_t
save_ref (struct fsw_btrfs_leaf_descriptor
*desc
,
468 uint64_t addr
, unsigned i
, unsigned m
, int l
)
471 if (desc
->allocated
< desc
->depth
)
474 int oldsize
= sizeof (desc
->data
[0]) * desc
->allocated
;
475 desc
->allocated
*= 2;
476 newdata
= AllocatePool (sizeof (desc
->data
[0]) * desc
->allocated
);
478 return FSW_OUT_OF_MEMORY
;
479 fsw_memcpy(newdata
, desc
->data
, oldsize
);
480 FreePool(desc
->data
);
481 desc
->data
= newdata
;
483 desc
->data
[desc
->depth
- 1].addr
= addr
;
484 desc
->data
[desc
->depth
- 1].iter
= i
;
485 desc
->data
[desc
->depth
- 1].maxiter
= m
;
486 desc
->data
[desc
->depth
- 1].leaf
= l
;
490 static int next (struct fsw_btrfs_volume
*vol
,
491 struct fsw_btrfs_leaf_descriptor
*desc
,
492 uint64_t * outaddr
, fsw_size_t
* outsize
,
493 struct btrfs_key
*key_out
)
496 struct btrfs_leaf_node leaf
;
498 for (; desc
->depth
> 0; desc
->depth
--)
500 desc
->data
[desc
->depth
- 1].iter
++;
501 if (desc
->data
[desc
->depth
- 1].iter
502 < desc
->data
[desc
->depth
- 1].maxiter
)
505 if (desc
->depth
== 0)
507 while (!desc
->data
[desc
->depth
- 1].leaf
)
509 struct btrfs_internal_node node
;
510 struct btrfs_header head
;
511 fsw_memzero(&node
, sizeof(node
));
513 err
= fsw_btrfs_read_logical (vol
, desc
->data
[desc
->depth
- 1].iter
515 + sizeof (struct btrfs_header
)
516 + desc
->data
[desc
->depth
- 1].addr
,
517 &node
, sizeof (node
), 0, 1);
521 err
= fsw_btrfs_read_logical (vol
, fsw_u64_le_swap (node
.addr
),
522 &head
, sizeof (head
), 0, 1);
526 save_ref (desc
, fsw_u64_le_swap (node
.addr
), 0,
527 fsw_u32_le_swap (head
.nitems
), !head
.level
);
529 err
= fsw_btrfs_read_logical (vol
, desc
->data
[desc
->depth
- 1].iter
531 + sizeof (struct btrfs_header
)
532 + desc
->data
[desc
->depth
- 1].addr
, &leaf
,
533 sizeof (leaf
), 0, 1);
536 *outsize
= fsw_u32_le_swap (leaf
.size
);
537 *outaddr
= desc
->data
[desc
->depth
- 1].addr
+ sizeof (struct btrfs_header
)
538 + fsw_u32_le_swap (leaf
.offset
);
543 #define depth2cache(x) ((x) >= 4 ? 1 : 5-(x))
544 static fsw_status_t
lower_bound (struct fsw_btrfs_volume
*vol
,
545 const struct btrfs_key
*key_in
,
546 struct btrfs_key
*key_out
,
548 uint64_t *outaddr
, fsw_size_t
*outsize
,
549 struct fsw_btrfs_leaf_descriptor
*desc
,
552 uint64_t addr
= fsw_u64_le_swap (root
);
557 desc
->allocated
= 16;
559 desc
->data
= AllocatePool (sizeof (desc
->data
[0]) * desc
->allocated
);
561 return FSW_OUT_OF_MEMORY
;
564 /* > 2 would work as well but be robust and allow a bit more just in case.
567 return FSW_VOLUME_CORRUPTED
;
569 DPRINT (L
"btrfs: retrieving %lx %x %lx\n",
570 key_in
->object_id
, key_in
->type
, key_in
->offset
);
575 struct btrfs_header head
;
576 fsw_memzero(&head
, sizeof(head
));
580 /* FIXME: preread few nodes into buffer. */
581 err
= fsw_btrfs_read_logical (vol
, addr
, &head
, sizeof (head
),
582 rdepth
+ 1, depth2cache(rdepth
));
585 addr
+= sizeof (head
);
589 struct btrfs_internal_node node
, node_last
;
591 fsw_memzero (&node_last
, sizeof (node_last
));
592 for (i
= 0; i
< fsw_u32_le_swap (head
.nitems
); i
++)
594 err
= fsw_btrfs_read_logical (vol
, addr
+ i
* sizeof (node
),
595 &node
, sizeof (node
), rdepth
+ 1, depth2cache(rdepth
));
599 DPRINT (L
"btrfs: internal node (depth %d) %lx %x %lx\n", depth
,
600 node
.key
.object_id
, node
.key
.type
,
603 if (key_cmp (&node
.key
, key_in
) == 0)
607 err
= save_ref (desc
, addr
- sizeof (head
), i
,
608 fsw_u32_le_swap (head
.nitems
), 0);
611 addr
= fsw_u64_le_swap (node
.addr
);
614 if (key_cmp (&node
.key
, key_in
) > 0)
623 err
= save_ref (desc
, addr
- sizeof (head
), i
- 1,
624 fsw_u32_le_swap (head
.nitems
), 0);
627 addr
= fsw_u64_le_swap (node_last
.addr
);
632 fsw_memzero (key_out
, sizeof (*key_out
));
634 return save_ref (desc
, addr
- sizeof (head
), -1,
635 fsw_u32_le_swap (head
.nitems
), 0);
640 struct btrfs_leaf_node leaf
, leaf_last
;
642 for (i
= 0; i
< fsw_u32_le_swap (head
.nitems
); i
++)
644 err
= fsw_btrfs_read_logical (vol
, addr
+ i
* sizeof (leaf
),
645 &leaf
, sizeof (leaf
), rdepth
+ 1, depth2cache(rdepth
));
649 DPRINT (L
"btrfs: leaf (depth %d) %lx %x %lx\n", depth
,
650 leaf
.key
.object_id
, leaf
.key
.type
, leaf
.key
.offset
);
652 if (key_cmp (&leaf
.key
, key_in
) == 0)
654 fsw_memcpy (key_out
, &leaf
.key
, sizeof (*key_out
));
655 *outsize
= fsw_u32_le_swap (leaf
.size
);
656 *outaddr
= addr
+ fsw_u32_le_swap (leaf
.offset
);
658 return save_ref (desc
, addr
- sizeof (head
), i
,
659 fsw_u32_le_swap (head
.nitems
), 1);
663 if (key_cmp (&leaf
.key
, key_in
) > 0)
672 fsw_memcpy (key_out
, &leaf_last
.key
, sizeof (*key_out
));
673 *outsize
= fsw_u32_le_swap (leaf_last
.size
);
674 *outaddr
= addr
+ fsw_u32_le_swap (leaf_last
.offset
);
676 return save_ref (desc
, addr
- sizeof (head
), i
- 1,
677 fsw_u32_le_swap (head
.nitems
), 1);
682 fsw_memzero (key_out
, sizeof (*key_out
));
684 return save_ref (desc
, addr
- sizeof (head
), -1,
685 fsw_u32_le_swap (head
.nitems
), 1);
691 static int btrfs_add_multi_device(struct fsw_btrfs_volume
*master
, struct fsw_volume
*slave
, struct btrfs_superblock
*sb
)
694 for( i
= 0; i
< master
->n_devices_attached
; i
++)
695 if(sb
->this_device
.device_id
== master
->devices_attached
[i
].id
)
696 return FSW_UNSUPPORTED
;
698 slave
= clone_dummy_volume(slave
);
700 return FSW_OUT_OF_MEMORY
;
701 fsw_set_blocksize(slave
, master
->sectorsize
, master
->sectorsize
);
702 slave
->bcache_size
= BTRFS_INITIAL_BCACHE_SIZE
;
704 master
->devices_attached
[i
].id
= sb
->this_device
.device_id
;
705 master
->devices_attached
[i
].dev
= slave
;
706 master
->n_devices_attached
++;
708 DPRINT(L
"Found slave %d\n", sb
->this_device
.device_id
);
712 static int scan_disks_hook(struct fsw_volume
*volg
, struct fsw_volume
*slave
) {
713 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
714 struct btrfs_superblock sb
;
717 if(vol
->n_devices_attached
>= vol
->n_devices_allocated
)
718 return FSW_UNSUPPORTED
;
720 err
= btrfs_read_superblock(slave
, &sb
);
722 return FSW_UNSUPPORTED
;
724 if(!uuid_eq(vol
->uuid
, sb
.uuid
))
725 return FSW_UNSUPPORTED
;
727 return btrfs_add_multi_device(vol
, slave
, &sb
);
730 static struct fsw_volume
*
731 find_device (struct fsw_btrfs_volume
*vol
, uint64_t id
, int do_rescan
) {
735 for (i
= 0; i
< vol
->n_devices_attached
; i
++)
736 if (id
== vol
->devices_attached
[i
].id
)
737 return vol
->devices_attached
[i
].dev
;
738 } while(vol
->n_devices_attached
< vol
->n_devices_allocated
&&
740 scan_disks(scan_disks_hook
, &vol
->g
) > 0);
741 DPRINT(L
"sub device %d not found\n", id
);
745 static fsw_status_t
fsw_btrfs_read_logical (struct fsw_btrfs_volume
*vol
, uint64_t addr
,
746 void *buf
, fsw_size_t size
, int rdepth
, int cache_level
)
751 struct btrfs_key
*key
;
752 struct btrfs_chunk_item
*chunk
;
754 fsw_status_t err
= 0;
755 struct btrfs_key key_out
;
757 struct btrfs_key key_in
;
761 for (ptr
= vol
->bootstrap_mapping
; ptr
< vol
->bootstrap_mapping
+ sizeof (vol
->bootstrap_mapping
) - sizeof (struct btrfs_key
);)
763 key
= (struct btrfs_key
*) ptr
;
764 if (key
->type
!= GRUB_BTRFS_ITEM_TYPE_CHUNK
)
766 chunk
= (struct btrfs_chunk_item
*) (key
+ 1);
767 if (fsw_u64_le_swap (key
->offset
) <= addr
768 && addr
< fsw_u64_le_swap (key
->offset
)
769 + fsw_u64_le_swap (chunk
->size
))
773 ptr
+= sizeof (*key
) + sizeof (*chunk
)
774 + sizeof (struct btrfs_chunk_stripe
)
775 * fsw_u16_le_swap (chunk
->nstripes
);
778 key_in
.object_id
= fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK
);
779 key_in
.type
= GRUB_BTRFS_ITEM_TYPE_CHUNK
;
780 key_in
.offset
= fsw_u64_le_swap (addr
);
781 err
= lower_bound (vol
, &key_in
, &key_out
, vol
->chunk_tree
, &chaddr
, &chsize
, NULL
, rdepth
);
785 if (key
->type
!= GRUB_BTRFS_ITEM_TYPE_CHUNK
786 || !(fsw_u64_le_swap (key
->offset
) <= addr
))
788 return FSW_VOLUME_CORRUPTED
;
790 // "couldn't find the chunk descriptor");
792 chunk
= AllocatePool (chsize
);
794 return FSW_OUT_OF_MEMORY
;
798 err
= fsw_btrfs_read_logical (vol
, chaddr
, chunk
, chsize
, rdepth
, cache_level
< 5 ? cache_level
+1 : 5);
808 #ifdef __MAKEWITH_GNUEFI
809 #define UINTREM UINTN
812 #define DivU64x32 DivU64x32Remainder
813 #define UINTREM UINT32
816 UINTREM stripe_offset
;
817 uint64_t off
= addr
- fsw_u64_le_swap (key
->offset
);
818 unsigned redundancy
= 1;
821 if (fsw_u64_le_swap (chunk
->size
) <= off
)
823 return FSW_VOLUME_CORRUPTED
;
824 //"couldn't find the chunk descriptor");
827 DPRINT(L
"btrfs chunk 0x%lx+0xlx %d stripes (%d substripes) of %lx\n",
828 fsw_u64_le_swap (key
->offset
),
829 fsw_u64_le_swap (chunk
->size
),
830 fsw_u16_le_swap (chunk
->nstripes
),
831 fsw_u16_le_swap (chunk
->nsubstripes
),
832 fsw_u64_le_swap (chunk
->stripe_length
));
834 /* gnu-efi has no DivU64x64Remainder, limited to DivU64x32 */
835 switch (fsw_u64_le_swap (chunk
->type
)
836 & ~GRUB_BTRFS_CHUNK_TYPE_BITS_DONTCARE
)
838 case GRUB_BTRFS_CHUNK_TYPE_SINGLE
:
840 uint64_t stripe_length
;
842 stripe_length
= DivU64x32 (fsw_u64_le_swap (chunk
->size
),
843 fsw_u16_le_swap (chunk
->nstripes
), NULL
);
845 if(stripe_length
> 1UL<<30)
846 return FSW_VOLUME_CORRUPTED
;
848 stripen
= DivU64x32 (off
, (uint32_t)stripe_length
, &stripe_offset
);
849 csize
= (stripen
+ 1) * stripe_length
- off
;
850 DPRINT(L
"read_logical %d chunk_found single csize=%d\n", __LINE__
, csize
);
853 case GRUB_BTRFS_CHUNK_TYPE_DUPLICATED
:
854 case GRUB_BTRFS_CHUNK_TYPE_RAID1
:
858 csize
= fsw_u64_le_swap (chunk
->size
) - off
;
860 DPRINT(L
"read_logical %d chunk_found dup/raid1 off=%lx csize=%d\n", __LINE__
, stripe_offset
, csize
);
863 case GRUB_BTRFS_CHUNK_TYPE_RAID0
:
865 uint64_t stripe_length
= fsw_u64_le_swap (chunk
->stripe_length
);
866 uint64_t middle
, high
;
869 if(stripe_length
> 1UL<<30)
870 return FSW_VOLUME_CORRUPTED
;
872 middle
= DivU64x32 (off
, (uint32_t)stripe_length
, &low
);
874 high
= DivU64x32 (middle
, fsw_u16_le_swap (chunk
->nstripes
), &stripen
);
876 low
+ fsw_u64_le_swap (chunk
->stripe_length
) * high
;
877 csize
= fsw_u64_le_swap (chunk
->stripe_length
) - low
;
878 DPRINT(L
"read_logical %d chunk_found raid0 csize=%d\n", __LINE__
, csize
);
881 case GRUB_BTRFS_CHUNK_TYPE_RAID10
:
883 uint64_t stripe_length
= fsw_u64_le_swap (chunk
->stripe_length
);
884 uint64_t middle
, high
;
887 if(stripe_length
> 1UL<<30)
888 return FSW_VOLUME_CORRUPTED
;
890 middle
= DivU64x32 (off
, stripe_length
, &low
);
892 high
= DivU64x32 (middle
,
893 fsw_u16_le_swap (chunk
->nstripes
)
894 / fsw_u16_le_swap (chunk
->nsubstripes
),
896 stripen
*= fsw_u16_le_swap (chunk
->nsubstripes
);
897 redundancy
= fsw_u16_le_swap (chunk
->nsubstripes
);
898 stripe_offset
= low
+ fsw_u64_le_swap (chunk
->stripe_length
)
900 csize
= fsw_u64_le_swap (chunk
->stripe_length
) - low
;
901 DPRINT(L
"read_logical %d chunk_found raid01 csize=%d\n", __LINE__
, csize
);
905 DPRINT (L
"btrfs: unsupported RAID\n");
906 return FSW_UNSUPPORTED
;
909 //"couldn't find the chunk descriptor");
910 return FSW_VOLUME_CORRUPTED
;
912 if (csize
> (uint64_t) size
)
915 for (j
= 0; j
< 2; j
++)
917 for (i
= 0; i
< redundancy
; i
++)
919 struct btrfs_chunk_stripe
*stripe
;
921 struct fsw_volume
*dev
;
923 stripe
= (struct btrfs_chunk_stripe
*) (chunk
+ 1);
924 /* Right now the redundancy handling is easy.
925 With RAID5-like it will be more difficult. */
926 stripe
+= stripen
+ i
;
928 paddr
= fsw_u64_le_swap (stripe
->offset
) + stripe_offset
;
930 DPRINT (L
"btrfs: chunk 0x%lx+0x%lx (%d stripes (%d substripes) of %lx) stripe %lx maps to 0x%lx\n",
931 fsw_u64_le_swap (key
->offset
),
932 fsw_u64_le_swap (chunk
->size
),
933 fsw_u16_le_swap (chunk
->nstripes
),
934 fsw_u16_le_swap (chunk
->nsubstripes
),
935 fsw_u64_le_swap (chunk
->stripe_length
),
936 stripen
, stripe
->offset
);
937 DPRINT (L
"btrfs: reading paddr 0x%lx for laddr 0x%lx\n", paddr
, addr
);
939 dev
= find_device (vol
, stripe
->device_id
, j
);
942 err
= FSW_VOLUME_CORRUPTED
;
946 uint32_t off
= paddr
& (vol
->sectorsize
- 1);
947 paddr
>>= vol
->sectorshift
;
951 err
= fsw_block_get(dev
, paddr
, cache_level
, (void **)&buffer
);
954 int s
= vol
->sectorsize
- off
;
957 fsw_memcpy(buf
+n
, buffer
+off
, s
);
958 fsw_block_release(dev
, paddr
, (void *)buffer
);
964 DPRINT (L
"read logical: err %d csize %d got %d\n",
976 buf
= (uint8_t *) buf
+ csize
;
978 if (challoc
&& chunk
)
984 static fsw_status_t
fsw_btrfs_get_default_root(struct fsw_btrfs_volume
*vol
, uint64_t root_dir_objectid
);
985 static fsw_status_t
fsw_btrfs_volume_mount(struct fsw_volume
*volg
) {
986 struct btrfs_superblock sblock
;
987 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
988 struct fsw_btrfs_volume
*master_out
= NULL
;
994 fsw_memzero((char *)vol
+sizeof(*volg
), sizeof(*vol
)-sizeof(*volg
));
996 err
= btrfs_read_superblock (volg
, &sblock
);
1000 btrfs_set_superblock_info(vol
, &sblock
);
1002 if(vol
->sectorshift
== 0)
1003 return FSW_UNSUPPORTED
;
1005 if(vol
->num_devices
>= BTRFS_MAX_NUM_DEVICES
)
1006 return FSW_UNSUPPORTED
;
1008 vol
->is_master
= master_uuid_add(vol
, &master_out
);
1009 /* already mounted via other device */
1010 if(vol
->is_master
== 0) {
1011 #define FAKE_LABEL "btrfs.multi.device"
1012 s
.type
= FSW_STRING_TYPE_UTF8
;
1013 s
.size
= s
.len
= sizeof(FAKE_LABEL
)-1;
1014 s
.data
= FAKE_LABEL
;
1015 err
= fsw_strdup_coerce(&volg
->label
, volg
->host_string_type
, &s
);
1018 btrfs_add_multi_device(master_out
, volg
, &sblock
);
1019 /* create fake root */
1020 return fsw_dnode_create_root_with_tree(volg
, 0, 0, &volg
->root
);
1023 fsw_set_blocksize(volg
, vol
->sectorsize
, vol
->sectorsize
);
1024 vol
->g
.bcache_size
= BTRFS_INITIAL_BCACHE_SIZE
;
1025 vol
->n_devices_allocated
= vol
->num_devices
;
1026 vol
->devices_attached
= AllocatePool (sizeof (vol
->devices_attached
[0])
1027 * vol
->n_devices_allocated
);
1028 if (!vol
->devices_attached
)
1029 return FSW_OUT_OF_MEMORY
;
1031 vol
->n_devices_attached
= 1;
1032 vol
->devices_attached
[0].dev
= volg
;
1033 vol
->devices_attached
[0].id
= sblock
.this_device
.device_id
;
1035 for (i
= 0; i
< 0x100; i
++)
1036 if (sblock
.label
[i
] == 0)
1039 s
.type
= FSW_STRING_TYPE_UTF8
;
1041 s
.data
= sblock
.label
;
1042 err
= fsw_strdup_coerce(&volg
->label
, volg
->host_string_type
, &s
);
1044 FreePool (vol
->devices_attached
);
1045 vol
->devices_attached
= NULL
;
1049 err
= fsw_btrfs_get_default_root(vol
, sblock
.root_dir_objectid
);
1051 DPRINT(L
"root not found\n");
1052 FreePool (vol
->devices_attached
);
1053 vol
->devices_attached
= NULL
;
1060 static void fsw_btrfs_volume_free(struct fsw_volume
*volg
)
1063 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1069 master_uuid_remove(vol
);
1071 /* The device 0 is closed one layer upper. */
1072 for (i
= 1; i
< vol
->n_devices_attached
; i
++)
1073 fsw_unmount (vol
->devices_attached
[i
].dev
);
1074 if(vol
->devices_attached
)
1075 FreePool (vol
->devices_attached
);
1077 FreePool (vol
->extent
);
1080 static fsw_status_t
fsw_btrfs_volume_stat(struct fsw_volume
*volg
, struct fsw_volume_stat
*sb
)
1082 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1083 sb
->total_bytes
= vol
->total_bytes
;
1084 sb
->free_bytes
= vol
->bytes_used
;
1088 static fsw_status_t
fsw_btrfs_read_inode (struct fsw_btrfs_volume
*vol
,
1089 struct btrfs_inode
*inode
, uint64_t num
,
1092 struct btrfs_key key_in
, key_out
;
1094 fsw_size_t elemsize
;
1097 key_in
.object_id
= num
;
1098 key_in
.type
= GRUB_BTRFS_ITEM_TYPE_INODE_ITEM
;
1101 err
= lower_bound (vol
, &key_in
, &key_out
, tree
, &elemaddr
, &elemsize
, NULL
, 0);
1104 if (num
!= key_out
.object_id
1105 || key_out
.type
!= GRUB_BTRFS_ITEM_TYPE_INODE_ITEM
)
1106 return FSW_NOT_FOUND
;
1108 return fsw_btrfs_read_logical (vol
, elemaddr
, inode
, sizeof (*inode
), 0, 2);
1111 static fsw_status_t
fsw_btrfs_dnode_fill(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
)
1113 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1114 struct fsw_btrfs_dnode
*dno
= (struct fsw_btrfs_dnode
*)dnog
;
1118 /* slave device got empty root */
1119 if (!vol
->is_master
) {
1121 dno
->g
.type
= FSW_DNODE_TYPE_DIR
;
1128 dno
->raw
= AllocatePool(sizeof(struct btrfs_inode
));
1129 if(dno
->raw
== NULL
)
1130 return FSW_OUT_OF_MEMORY
;
1132 err
= fsw_btrfs_read_inode(vol
, dno
->raw
, dno
->g
.dnode_id
, dno
->g
.tree_id
);
1139 // get info from the inode
1140 dno
->g
.size
= fsw_u64_le_swap(dno
->raw
->size
);
1141 // TODO: check docs for 64-bit sized files
1142 mode
= fsw_u32_le_swap(dno
->raw
->mode
);
1144 dno
->g
.type
= FSW_DNODE_TYPE_FILE
;
1145 else if (S_ISDIR(mode
))
1146 dno
->g
.type
= FSW_DNODE_TYPE_DIR
;
1147 else if (S_ISLNK(mode
))
1148 dno
->g
.type
= FSW_DNODE_TYPE_SYMLINK
;
1150 dno
->g
.type
= FSW_DNODE_TYPE_SPECIAL
;
1155 static void fsw_btrfs_dnode_free(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
)
1157 struct fsw_btrfs_dnode
*dno
= (struct fsw_btrfs_dnode
*)dnog
;
1162 static fsw_status_t
fsw_btrfs_dnode_stat(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
, struct fsw_dnode_stat
*sb
)
1164 struct fsw_btrfs_dnode
*dno
= (struct fsw_btrfs_dnode
*)dnog
;
1166 /* slave device got empty root */
1167 if(dno
->raw
== NULL
) {
1169 sb
->store_time_posix(sb
, FSW_DNODE_STAT_CTIME
, 0);
1170 sb
->store_time_posix(sb
, FSW_DNODE_STAT_ATIME
, 0);
1171 sb
->store_time_posix(sb
, FSW_DNODE_STAT_MTIME
, 0);
1174 sb
->used_bytes
= fsw_u64_le_swap(dno
->raw
->nbytes
);
1175 sb
->store_time_posix(sb
, FSW_DNODE_STAT_ATIME
,
1176 fsw_u64_le_swap(dno
->raw
->atime
.sec
));
1177 sb
->store_time_posix(sb
, FSW_DNODE_STAT_CTIME
,
1178 fsw_u64_le_swap(dno
->raw
->ctime
.sec
));
1179 sb
->store_time_posix(sb
, FSW_DNODE_STAT_MTIME
,
1180 fsw_u64_le_swap(dno
->raw
->mtime
.sec
));
1181 sb
->store_attr_posix(sb
, fsw_u32_le_swap(dno
->raw
->mode
));
1186 static fsw_ssize_t
grub_btrfs_lzo_decompress(char *ibuf
, fsw_size_t isize
, grub_off_t off
,
1187 char *obuf
, fsw_size_t osize
)
1189 uint32_t total_size
, cblock_size
;
1191 unsigned char buf
[GRUB_BTRFS_LZO_BLOCK_SIZE
];
1194 #define fsw_get_unaligned32(x) (*(uint32_t *)(x))
1195 total_size
= fsw_u32_le_swap (fsw_get_unaligned32(ibuf
));
1196 ibuf
+= sizeof (total_size
);
1198 if (isize
< total_size
)
1201 /* Jump forward to first block with requested data. */
1202 while (off
>= GRUB_BTRFS_LZO_BLOCK_SIZE
)
1204 /* Don't let following uint32_t cross the page boundary. */
1205 if (((ibuf
- ibuf0
) & 0xffc) == 0xffc)
1206 ibuf
= ((ibuf
- ibuf0
+ 3) & ~3) + ibuf0
;
1208 cblock_size
= fsw_u32_le_swap (fsw_get_unaligned32 (ibuf
));
1209 ibuf
+= sizeof (cblock_size
);
1211 if (cblock_size
> GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE
)
1214 off
-= GRUB_BTRFS_LZO_BLOCK_SIZE
;
1215 ibuf
+= cblock_size
;
1220 lzo_uint usize
= GRUB_BTRFS_LZO_BLOCK_SIZE
;
1222 /* Don't let following uint32_t cross the page boundary. */
1223 if (((ibuf
- ibuf0
) & 0xffc) == 0xffc)
1224 ibuf
= ((ibuf
- ibuf0
+ 3) & ~3) + ibuf0
;
1226 cblock_size
= fsw_u32_le_swap (fsw_get_unaligned32 (ibuf
));
1227 ibuf
+= sizeof (cblock_size
);
1229 if (cblock_size
> GRUB_BTRFS_LZO_BLOCK_MAX_CSIZE
)
1232 /* Block partially filled with requested data. */
1233 if (off
> 0 || osize
< GRUB_BTRFS_LZO_BLOCK_SIZE
)
1235 fsw_size_t to_copy
= GRUB_BTRFS_LZO_BLOCK_SIZE
- off
;
1237 if (to_copy
> osize
)
1240 if (lzo1x_decompress_safe ((lzo_bytep
)ibuf
, cblock_size
, (lzo_bytep
)buf
, &usize
, NULL
) != 0)
1243 if (to_copy
> usize
)
1245 fsw_memcpy(obuf
, buf
+ off
, to_copy
);
1250 ibuf
+= cblock_size
;
1255 /* Decompress whole block directly to output buffer. */
1256 if (lzo1x_decompress_safe ((lzo_bytep
)ibuf
, cblock_size
, (lzo_bytep
)obuf
, &usize
, NULL
) != 0)
1262 ibuf
+= cblock_size
;
1268 static fsw_status_t
fsw_btrfs_get_extent(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
,
1269 struct fsw_extent
*extent
)
1271 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1272 uint64_t ino
= dnog
->dnode_id
;
1273 uint64_t tree
= dnog
->tree_id
;
1274 uint64_t pos0
= extent
->log_start
<< vol
->sectorshift
;
1275 extent
->type
= FSW_EXTENT_TYPE_INVALID
;
1276 extent
->log_count
= 1;
1277 uint64_t pos
= pos0
;
1284 /* slave device got empty root */
1285 if (!vol
->is_master
)
1286 return FSW_NOT_FOUND
;
1288 if (!vol
->extent
|| vol
->extstart
> pos
|| vol
->extino
!= ino
1289 || vol
->exttree
!= tree
|| vol
->extend
<= pos
)
1291 struct btrfs_key key_in
, key_out
;
1293 fsw_size_t elemsize
;
1296 FreePool (vol
->extent
);
1299 key_in
.object_id
= ino
;
1300 key_in
.type
= GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM
;
1301 key_in
.offset
= fsw_u64_le_swap (pos
);
1302 err
= lower_bound (vol
, &key_in
, &key_out
, tree
, &elemaddr
, &elemsize
, NULL
, 0);
1304 return FSW_VOLUME_CORRUPTED
;
1305 if (key_out
.object_id
!= ino
1306 || key_out
.type
!= GRUB_BTRFS_ITEM_TYPE_EXTENT_ITEM
)
1308 return FSW_VOLUME_CORRUPTED
;
1310 if ((fsw_ssize_t
) elemsize
< ((char *) &vol
->extent
->inl
1311 - (char *) vol
->extent
))
1313 return FSW_VOLUME_CORRUPTED
;
1315 vol
->extstart
= fsw_u64_le_swap (key_out
.offset
);
1316 vol
->extsize
= elemsize
;
1317 vol
->extent
= AllocatePool (elemsize
);
1319 vol
->exttree
= tree
;
1321 return FSW_OUT_OF_MEMORY
;
1323 err
= fsw_btrfs_read_logical (vol
, elemaddr
, vol
->extent
, elemsize
, 0, 1);
1327 vol
->extend
= vol
->extstart
+ fsw_u64_le_swap (vol
->extent
->size
);
1328 if (vol
->extent
->type
== GRUB_BTRFS_EXTENT_REGULAR
1329 && (char *) &vol
->extent
+ elemsize
1330 >= (char *) &vol
->extent
->filled
+ sizeof (vol
->extent
->filled
))
1332 vol
->extstart
+ fsw_u64_le_swap (vol
->extent
->filled
);
1334 DPRINT (L
"btrfs: %lx +0x%lx\n", fsw_u64_le_swap (key_out
.offset
), fsw_u64_le_swap (vol
->extent
->size
));
1335 if (vol
->extend
<= pos
)
1337 return FSW_VOLUME_CORRUPTED
;
1341 csize
= vol
->extend
- pos
;
1342 extoff
= pos
- vol
->extstart
;
1344 if (vol
->extent
->encryption
||vol
->extent
->encoding
)
1346 return FSW_UNSUPPORTED
;
1349 switch(vol
->extent
->compression
) {
1350 case GRUB_BTRFS_COMPRESSION_LZO
:
1351 case GRUB_BTRFS_COMPRESSION_ZLIB
:
1352 case GRUB_BTRFS_COMPRESSION_NONE
:
1355 return FSW_UNSUPPORTED
;
1358 count
= ( csize
+ vol
->sectorsize
- 1) >> vol
->sectorshift
;
1359 switch (vol
->extent
->type
)
1361 case GRUB_BTRFS_EXTENT_INLINE
:
1362 buf
= AllocatePool( count
<< vol
->sectorshift
);
1364 return FSW_OUT_OF_MEMORY
;
1365 if (vol
->extent
->compression
== GRUB_BTRFS_COMPRESSION_ZLIB
)
1367 if (grub_zlib_decompress (vol
->extent
->inl
, vol
->extsize
-
1368 ((uint8_t *) vol
->extent
->inl
1369 - (uint8_t *) vol
->extent
),
1371 != (fsw_ssize_t
) csize
)
1374 return FSW_VOLUME_CORRUPTED
;
1377 else if (vol
->extent
->compression
== GRUB_BTRFS_COMPRESSION_LZO
)
1379 if (grub_btrfs_lzo_decompress(vol
->extent
->inl
, vol
->extsize
-
1380 ((uint8_t *) vol
->extent
->inl
1381 - (uint8_t *) vol
->extent
),
1383 != (fsw_ssize_t
) csize
)
1386 return -FSW_VOLUME_CORRUPTED
;
1390 fsw_memcpy (buf
, vol
->extent
->inl
+ extoff
, csize
);
1393 case GRUB_BTRFS_EXTENT_REGULAR
:
1394 if (!vol
->extent
->laddr
)
1397 if (vol
->extent
->compression
== GRUB_BTRFS_COMPRESSION_NONE
)
1401 csize
= count
<< vol
->sectorshift
;
1403 buf
= AllocatePool( count
<< vol
->sectorshift
);
1405 return FSW_OUT_OF_MEMORY
;
1406 err
= fsw_btrfs_read_logical (vol
,
1407 fsw_u64_le_swap (vol
->extent
->laddr
)
1408 + fsw_u64_le_swap (vol
->extent
->offset
)
1409 + extoff
, buf
, csize
, 0, 0);
1416 if (vol
->extent
->compression
!= GRUB_BTRFS_COMPRESSION_NONE
)
1422 zsize
= fsw_u64_le_swap (vol
->extent
->compressed_size
);
1423 tmp
= AllocatePool (zsize
);
1425 return -FSW_OUT_OF_MEMORY
;
1426 err
= fsw_btrfs_read_logical (vol
, fsw_u64_le_swap (vol
->extent
->laddr
), tmp
, zsize
, 0, 0);
1430 return -FSW_VOLUME_CORRUPTED
;
1433 buf
= AllocatePool( count
<< vol
->sectorshift
);
1436 return FSW_OUT_OF_MEMORY
;
1439 if (vol
->extent
->compression
== GRUB_BTRFS_COMPRESSION_ZLIB
)
1441 ret
= grub_zlib_decompress (tmp
, zsize
, extoff
1442 + fsw_u64_le_swap (vol
->extent
->offset
),
1445 else if (vol
->extent
->compression
== GRUB_BTRFS_COMPRESSION_LZO
)
1446 ret
= grub_btrfs_lzo_decompress (tmp
, zsize
, extoff
1447 + fsw_u64_le_swap (vol
->extent
->offset
),
1454 if (ret
!= (fsw_ssize_t
) csize
) {
1456 return -FSW_VOLUME_CORRUPTED
;
1463 return -FSW_VOLUME_CORRUPTED
;
1466 extent
->log_count
= count
;
1468 if(csize
< (count
<< vol
->sectorshift
))
1469 fsw_memzero( buf
+ csize
, (count
<< vol
->sectorshift
) - csize
);
1470 extent
->buffer
= buf
;
1471 extent
->type
= FSW_EXTENT_TYPE_BUFFER
;
1473 extent
->buffer
= NULL
;
1474 extent
->type
= FSW_EXTENT_TYPE_SPARSE
;
1479 static fsw_status_t
fsw_btrfs_readlink(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
,
1480 struct fsw_string
*link_target
)
1482 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1483 struct fsw_btrfs_dnode
*dno
= (struct fsw_btrfs_dnode
*)dnog
;
1485 fsw_status_t status
;
1486 struct fsw_string s
;
1489 if (dno
->g
.size
> FSW_PATH_MAX
)
1490 return FSW_VOLUME_CORRUPTED
;
1492 tmp
= AllocatePool(dno
->g
.size
);
1494 return FSW_OUT_OF_MEMORY
;
1498 struct fsw_extent extent
;
1500 extent
.log_start
= i
;
1501 status
= fsw_btrfs_get_extent(volg
, dnog
, &extent
);
1502 if(status
|| extent
.type
!= FSW_EXTENT_TYPE_BUFFER
) {
1505 FreePool(extent
.buffer
);
1506 return FSW_VOLUME_CORRUPTED
;
1508 size
= extent
.log_count
<< vol
->sectorshift
;
1509 if(size
> (dno
->g
.size
- (i
<<vol
->sectorshift
)))
1510 size
= dno
->g
.size
- (i
<<vol
->sectorshift
);
1511 fsw_memcpy(tmp
+ (i
<<vol
->sectorshift
), extent
.buffer
, size
);
1512 FreePool(extent
.buffer
);
1513 i
+= extent
.log_count
;
1514 } while( (i
<< vol
->sectorshift
) < dno
->g
.size
);
1516 s
.type
= FSW_STRING_TYPE_UTF8
;
1517 s
.size
= s
.len
= (int)dno
->g
.size
;
1519 status
= fsw_strdup_coerce(link_target
, volg
->host_string_type
, &s
);
1525 static fsw_status_t
fsw_btrfs_lookup_dir_item(struct fsw_btrfs_volume
*vol
,
1526 uint64_t tree_id
, uint64_t object_id
,
1527 struct fsw_string
*lookup_name
,
1528 struct btrfs_dir_item
**direl_buf
,
1529 struct btrfs_dir_item
**direl_out
1533 fsw_size_t elemsize
;
1534 fsw_size_t allocated
= 0;
1535 struct btrfs_key key
;
1536 struct btrfs_key key_out
;
1537 struct btrfs_dir_item
*cdirel
;
1542 key
.object_id
= object_id
;
1543 key
.type
= GRUB_BTRFS_ITEM_TYPE_DIR_ITEM
;
1544 key
.offset
= fsw_u64_le_swap (~grub_getcrc32c (1, lookup_name
->data
, lookup_name
->size
));
1546 err
= lower_bound (vol
, &key
, &key_out
, tree_id
, &elemaddr
, &elemsize
, NULL
, 0);
1550 if (key_cmp (&key
, &key_out
) != 0)
1551 return FSW_NOT_FOUND
;
1553 if (elemsize
> allocated
)
1555 allocated
= 2 * elemsize
;
1557 FreePool (*direl_buf
);
1558 *direl_buf
= AllocatePool (allocated
+ 1);
1560 return FSW_OUT_OF_MEMORY
;
1563 err
= fsw_btrfs_read_logical (vol
, elemaddr
, *direl_buf
, elemsize
, 0, 1);
1567 for (cdirel
= *direl_buf
;
1568 (uint8_t *) cdirel
- (uint8_t *) *direl_buf
< (fsw_ssize_t
) elemsize
;
1569 cdirel
= (void *) ((uint8_t *) (*direl_buf
+ 1)
1570 + fsw_u16_le_swap (cdirel
->n
)
1571 + fsw_u16_le_swap (cdirel
->m
)))
1573 if (lookup_name
->size
== fsw_u16_le_swap (cdirel
->n
)
1574 && fsw_memeq (cdirel
->name
, lookup_name
->data
, lookup_name
->size
))
1577 if ((uint8_t *) cdirel
- (uint8_t *) *direl_buf
>= (fsw_ssize_t
) elemsize
)
1578 return FSW_NOT_FOUND
;
1580 *direl_out
= cdirel
;
1584 static fsw_status_t
fsw_btrfs_get_root_tree(
1585 struct fsw_btrfs_volume
*vol
,
1586 struct btrfs_key
*key_in
,
1590 struct btrfs_root_item ri
;
1591 struct btrfs_key key_out
;
1593 fsw_size_t elemsize
;
1595 err
= lower_bound (vol
, key_in
, &key_out
, vol
->root_tree
, &elemaddr
, &elemsize
, NULL
, 0);
1599 if (key_in
->object_id
!= key_out
.object_id
|| key_in
->type
!= key_out
.type
)
1600 return FSW_NOT_FOUND
;
1602 err
= fsw_btrfs_read_logical (vol
, elemaddr
, &ri
, sizeof (ri
), 0, 1);
1606 *tree_out
= ri
.tree
;
1610 static fsw_status_t
fsw_btrfs_get_sub_dnode(
1611 struct fsw_btrfs_volume
*vol
,
1612 struct fsw_btrfs_dnode
*dno
,
1613 struct btrfs_dir_item
*cdirel
,
1614 struct fsw_string
*name
,
1615 struct fsw_dnode
**child_dno_out
)
1619 uint64_t tree_id
= dno
->g
.tree_id
;
1622 switch (cdirel
->key
.type
)
1624 case GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM
:
1625 err
= fsw_btrfs_get_root_tree (vol
, &cdirel
->key
, &tree_id
);
1629 child_type
= GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY
;
1630 child_id
= fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK
);
1632 case GRUB_BTRFS_ITEM_TYPE_INODE_ITEM
:
1633 child_type
= cdirel
->type
;
1634 child_id
= cdirel
->key
.object_id
;
1638 DPRINT (L
"btrfs: unrecognised object type 0x%x", cdirel
->key
.type
);
1639 return FSW_VOLUME_CORRUPTED
;
1642 switch(child_type
) {
1643 case GRUB_BTRFS_DIR_ITEM_TYPE_REGULAR
:
1644 child_type
= FSW_DNODE_TYPE_FILE
;
1646 case GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY
:
1647 child_type
= FSW_DNODE_TYPE_DIR
;
1649 case GRUB_BTRFS_DIR_ITEM_TYPE_SYMLINK
:
1650 child_type
= FSW_DNODE_TYPE_SYMLINK
;
1653 child_type
= FSW_DNODE_TYPE_SPECIAL
;
1656 return fsw_dnode_create_with_tree(&dno
->g
, tree_id
, child_id
, child_type
, name
, child_dno_out
);
1659 static fsw_status_t
fsw_btrfs_dir_lookup(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
,
1660 struct fsw_string
*lookup_name
, struct fsw_dnode
**child_dno_out
)
1662 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1663 struct fsw_btrfs_dnode
*dno
= (struct fsw_btrfs_dnode
*)dnog
;
1665 struct fsw_string s
;
1667 *child_dno_out
= NULL
;
1669 /* slave device got empty root */
1670 if (!vol
->is_master
)
1671 return FSW_NOT_FOUND
;
1673 err
= fsw_strdup_coerce(&s
, FSW_STRING_TYPE_UTF8
, lookup_name
);
1677 /* treat '...' under root as top root */
1678 if(dnog
== volg
->root
&& s
.size
== 3 && ((char *)s
.data
)[0]=='.' && ((char *)s
.data
)[1]=='.' && ((char *)s
.data
)[2]=='.')
1681 if(dnog
->tree_id
== vol
->top_tree
) {
1682 fsw_dnode_retain(dnog
);
1683 *child_dno_out
= dnog
;
1686 return fsw_dnode_create_with_tree(dnog
,
1687 vol
->top_tree
, fsw_u64_le_swap(GRUB_BTRFS_OBJECT_ID_CHUNK
),
1688 FSW_DNODE_TYPE_DIR
, lookup_name
, child_dno_out
);
1690 struct btrfs_dir_item
*direl
=NULL
, *cdirel
;
1691 err
= fsw_btrfs_lookup_dir_item(vol
, dnog
->tree_id
, dnog
->dnode_id
, &s
, &direl
, &cdirel
);
1693 err
= fsw_btrfs_get_sub_dnode(vol
, dno
, cdirel
, lookup_name
, child_dno_out
);
1700 static fsw_status_t
fsw_btrfs_get_default_root(struct fsw_btrfs_volume
*vol
, uint64_t root_dir_objectid
)
1703 struct fsw_string s
;
1704 struct btrfs_dir_item
*direl
=NULL
, *cdirel
;
1705 uint64_t default_tree_id
= 0;
1706 struct btrfs_key top_root_key
;
1708 /* Get to top tree id */
1709 top_root_key
.object_id
= fsw_u64_le_swap(5UL);
1710 top_root_key
.type
= GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM
;
1711 top_root_key
.offset
= -1LL;
1712 err
= fsw_btrfs_get_root_tree (vol
, &top_root_key
, &vol
->top_tree
);
1716 s
.type
= FSW_STRING_TYPE_UTF8
;
1719 err
= fsw_btrfs_lookup_dir_item(vol
, vol
->root_tree
, root_dir_objectid
, &s
, &direl
, &cdirel
);
1721 /* if "default" is failed or invalid, use top tree */
1722 if (err
|| /* failed */
1723 cdirel
->type
!= GRUB_BTRFS_DIR_ITEM_TYPE_DIRECTORY
|| /* not dir */
1724 cdirel
->key
.type
!= GRUB_BTRFS_ITEM_TYPE_ROOT_ITEM
|| /* not tree */
1725 cdirel
->key
.object_id
== fsw_u64_le_swap(5UL) || /* same as top */
1726 (err
= fsw_btrfs_get_root_tree (vol
, &cdirel
->key
, &default_tree_id
)))
1727 default_tree_id
= vol
->top_tree
;
1730 err
= fsw_dnode_create_root_with_tree(&vol
->g
, default_tree_id
,
1731 fsw_u64_le_swap (GRUB_BTRFS_OBJECT_ID_CHUNK
), &vol
->g
.root
);
1737 static fsw_status_t
fsw_btrfs_dir_read(struct fsw_volume
*volg
, struct fsw_dnode
*dnog
,
1738 struct fsw_shandle
*shand
, struct fsw_dnode
**child_dno_out
)
1740 struct fsw_btrfs_volume
*vol
= (struct fsw_btrfs_volume
*)volg
;
1741 struct fsw_btrfs_dnode
*dno
= (struct fsw_btrfs_dnode
*)dnog
;
1744 struct btrfs_key key_in
, key_out
;
1746 fsw_size_t elemsize
;
1747 fsw_size_t allocated
= 0;
1748 struct btrfs_dir_item
*direl
= NULL
;
1749 struct fsw_btrfs_leaf_descriptor desc
;
1751 uint64_t tree
= dnog
->tree_id
;
1753 /* slave device got empty root */
1754 if (!vol
->is_master
)
1755 return FSW_NOT_FOUND
;
1757 key_in
.object_id
= dnog
->dnode_id
;
1758 key_in
.type
= GRUB_BTRFS_ITEM_TYPE_DIR_ITEM
;
1759 key_in
.offset
= shand
->pos
;
1761 if((int64_t)key_in
.offset
== -1LL)
1763 return FSW_NOT_FOUND
;
1766 err
= lower_bound (vol
, &key_in
, &key_out
, tree
, &elemaddr
, &elemsize
, &desc
, 0);
1771 DPRINT(L
"key_in %lx:%x:%lx out %lx:%x:%lx elem %lx+%lx\n",
1772 key_in
.object_id
, key_in
.type
, key_in
.offset
,
1773 key_out
.object_id
, key_out
.type
, key_out
.offset
,
1774 elemaddr
, elemsize
);
1775 if (key_out
.type
!= GRUB_BTRFS_ITEM_TYPE_DIR_ITEM
||
1776 key_out
.object_id
!= key_in
.object_id
)
1778 r
= next (vol
, &desc
, &elemaddr
, &elemsize
, &key_out
);
1781 DPRINT(L
"next out %lx:%x:%lx\n",
1782 key_out
.object_id
, key_out
.type
, key_out
.offset
, elemaddr
, elemsize
);
1784 if (key_out
.type
== GRUB_BTRFS_ITEM_TYPE_DIR_ITEM
&&
1785 key_out
.object_id
== key_in
.object_id
&&
1786 fsw_u64_le_swap(key_out
.offset
) <= fsw_u64_le_swap(key_in
.offset
))
1788 r
= next (vol
, &desc
, &elemaddr
, &elemsize
, &key_out
);
1791 DPRINT(L
"next out %lx:%x:%lx\n",
1792 key_out
.object_id
, key_out
.type
, key_out
.offset
, elemaddr
, elemsize
);
1797 struct btrfs_dir_item
*cdirel
;
1798 if (key_out
.type
!= GRUB_BTRFS_ITEM_TYPE_DIR_ITEM
||
1799 key_out
.object_id
!= key_in
.object_id
)
1804 if (elemsize
> allocated
)
1806 allocated
= 2 * elemsize
;
1809 direl
= AllocatePool (allocated
+ 1);
1812 r
= -FSW_OUT_OF_MEMORY
;
1817 err
= fsw_btrfs_read_logical (vol
, elemaddr
, direl
, elemsize
, 0, 1);
1824 for (cdirel
= direl
;
1825 (uint8_t *) cdirel
- (uint8_t *) direl
1826 < (fsw_ssize_t
) elemsize
;
1827 cdirel
= (void *) ((uint8_t *) (direl
+ 1)
1828 + fsw_u16_le_swap (cdirel
->n
)
1829 + fsw_u16_le_swap (cdirel
->m
)))
1831 struct fsw_string s
;
1832 s
.type
= FSW_STRING_TYPE_UTF8
;
1833 s
.size
= s
.len
= fsw_u16_le_swap (cdirel
->n
);
1834 s
.data
= cdirel
->name
;
1835 DPRINT(L
"item key %lx:%x%lx, type %lx, namelen=%lx\n",
1836 cdirel
->key
.object_id
, cdirel
->key
.type
, cdirel
->key
.offset
, cdirel
->type
, s
.size
);
1838 err
= fsw_btrfs_get_sub_dnode(vol
, dno
, cdirel
, &s
, child_dno_out
);
1841 free_iterator (&desc
);
1842 shand
->pos
= key_out
.offset
;
1846 r
= next (vol
, &desc
, &elemaddr
, &elemsize
, &key_out
);
1847 DPRINT(L
"next2 out %lx:%x:%lx\n",
1848 key_out
.object_id
, key_out
.type
, key_out
.offset
, elemaddr
, elemsize
);
1855 free_iterator (&desc
);
1857 r
= r
< 0 ? -r
: FSW_NOT_FOUND
;
1865 struct fsw_fstype_table
FSW_FSTYPE_TABLE_NAME(btrfs
) = {
1866 { FSW_STRING_TYPE_UTF8
, 4, 4, "btrfs" },
1867 sizeof(struct fsw_btrfs_volume
),
1868 sizeof(struct fsw_btrfs_dnode
),
1870 fsw_btrfs_volume_mount
,
1871 fsw_btrfs_volume_free
,
1872 fsw_btrfs_volume_stat
,
1873 fsw_btrfs_dnode_fill
,
1874 fsw_btrfs_dnode_free
,
1875 fsw_btrfs_dnode_stat
,
1876 fsw_btrfs_get_extent
,
1877 fsw_btrfs_dir_lookup
,