btrbk: use "dd" for raw target output, with configurable block size (raw_target_block_size)

We use "dd" instead of shell redirections, as it is common to have
special filesystems (like NFS, SMB, FUSE) mounted on the raw target
path. By using "dd" we make sure to write in reasonably large blocks
(default=128K), which is not always the case when using redirections
(e.g. "gpg > outfile" writes in 8K blocks).

Another approach would be to always pipe through "cat", which uses
st_blksize from fstat(2) (with a minimum of 128K) to determine the
block size.
pull/106/merge
Axel Burri 2016-08-25 12:45:13 +02:00
parent 3dabb507e6
commit 961f96833a
3 changed files with 16 additions and 1 deletions

View File

@ -8,6 +8,7 @@ btrbk-current
* Perform extra metadata check on target subvolume after "btrfs
receive" (adds an additional call to "btrfs subvolume show").
* Bugfix: Replace "realpath" with "readlink" in ssh_filter_btrbk.sh
* Add "raw_target_block_size" configuration option (close #105).
btrbk-0.23.3

14
btrbk
View File

@ -118,6 +118,7 @@ my %config_options = (
raw_target_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
raw_target_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
raw_target_encrypt => { default => undef, accept => [ "no", "gpg" ] },
raw_target_block_size => { default => "128K", accept_regexp => qr/^[0-9]+(kB|k|K|KiB|MB|M|MiB)?$/ },
gpg_keyring => { default => undef, accept_file => { absolute => 1 } },
gpg_recipient => { default => undef, accept_regexp => qr/^[0-9a-zA-Z_@\+\-\.]+$/ },
@ -1279,7 +1280,18 @@ sub btrfs_send_to_file($$$$;@)
};
}
push @cmd_pipe, {
redirect_to_file => { unsafe => "${target_path}/${target_filename}.part" },
# NOTE: We use "dd" instead of shell redirections here, as it is
# common to have special filesystems (like NFS, SMB, FUSE) mounted
# on $target_path. By using "dd" we make sure to write in
# reasonably large blocks (default=128K), which is not always the
# case when using redirections (e.g. "gpg > outfile" writes in 8K
# blocks).
# Another approach would be to always pipe through "cat", which
# uses st_blksize from fstat(2) (with a minimum of 128K) to
# determine the block size.
cmd => [ 'dd', 'status=none', 'bs=' . config_key($target, "raw_target_block_size"), "of=${target_path}/${target_filename}.part" ],
check_unsafe => [ { unsafe => "${target_path}/${target_filename}.part" } ],
#redirect_to_file => { unsafe => "${target_path}/${target_filename}.part" }, # alternative (use shell redirection), less overhead on local filesystems (barely measurable):
rsh => vinfo_rsh($target, disable_compression => $opts{compress} || config_compress_hash($target, "stream_compress")),
rsh_compress_in => $opts{compress} || config_compress_hash($target, "stream_compress"),
compressed_ok => ($opts{compress} ? 1 : 0),

View File

@ -380,6 +380,8 @@ raw_target_compress_threads default|<number>
.PP
raw_target_encrypt gpg|no
.PP
raw_target_block_size <number> (defaults to 128K)
.PP
gpg_keyring <file>
.PP
gpg_recipient <name>