mirror of https://github.com/digint/btrbk
7279 lines
275 KiB
Perl
Executable File
7279 lines
275 KiB
Perl
Executable File
#!/usr/bin/perl
|
|
#
|
|
# btrbk - Create snapshots and remote backups of btrfs subvolumes
|
|
#
|
|
# Copyright (C) 2014-2023 Axel Burri
|
|
#
|
|
# This program is free software: you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
# ---------------------------------------------------------------------
|
|
# The official btrbk website is located at:
|
|
# https://digint.ch/btrbk/
|
|
#
|
|
# Author:
|
|
# Axel Burri <axel@tty0.ch>
|
|
# ---------------------------------------------------------------------
|
|
|
|
use strict;
|
|
use warnings FATAL => qw( all ), NONFATAL => qw( deprecated );
|
|
|
|
use Carp qw(confess);
|
|
use Getopt::Long qw(GetOptions);
|
|
use Time::Local qw( timelocal timegm timegm_nocheck );
|
|
use IPC::Open3 qw(open3);
|
|
use Symbol qw(gensym);
|
|
use Cwd qw(abs_path);
|
|
|
|
our $VERSION = '0.33.0-dev';
|
|
our $AUTHOR = 'Axel Burri <axel@tty0.ch>';
|
|
our $PROJECT_HOME = '<https://digint.ch/btrbk/>';
|
|
|
|
our $BTRFS_PROGS_MIN = '4.12'; # required since btrbk-v0.27.0
|
|
|
|
my $VERSION_INFO = "btrbk command line client, version $VERSION";
|
|
|
|
|
|
my @config_src = ("/etc/btrbk.conf", "/etc/btrbk/btrbk.conf");
|
|
|
|
my %compression = (
|
|
# NOTE: also adapt "compress_list" in ssh_filter_btrbk.sh if you change this
|
|
gzip => { name => 'gzip', format => 'gz', compress_cmd => [ 'gzip', '-c' ], decompress_cmd => [ 'gzip', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
pigz => { name => 'pigz', format => 'gz', compress_cmd => [ 'pigz', '-c' ], decompress_cmd => [ 'pigz', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
|
|
bzip2 => { name => 'bzip2', format => 'bz2', compress_cmd => [ 'bzip2', '-c' ], decompress_cmd => [ 'bzip2', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
pbzip2 => { name => 'pbzip2', format => 'bz2', compress_cmd => [ 'pbzip2', '-c' ], decompress_cmd => [ 'pbzip2', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
|
|
bzip3 => { name => 'bzip3', format => 'bz3', compress_cmd => [ 'bzip3', '-c' ], decompress_cmd => [ 'bzip3', '-d', '-c' ], threads => '-j' },
|
|
xz => { name => 'xz', format => 'xz', compress_cmd => [ 'xz', '-c' ], decompress_cmd => [ 'xz', '-d', '-c' ], level_min => 0, level_max => 9, threads => '-T' },
|
|
lzo => { name => 'lzo', format => 'lzo', compress_cmd => [ 'lzop', '-c' ], decompress_cmd => [ 'lzop', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
lz4 => { name => 'lz4', format => 'lz4', compress_cmd => [ 'lz4', '-c' ], decompress_cmd => [ 'lz4', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
zstd => { name => 'zstd', format => 'zst', compress_cmd => [ 'zstd', '-c' ], decompress_cmd => [ 'zstd', '-d', '-c' ], level_min => 1, level_max => 19, threads => '-T', long => '--long=', adapt => '--adapt' },
|
|
);
|
|
|
|
my $compress_format_alt = join '|', map { $_->{format} } values %compression; # note: this contains duplicate alternations
|
|
my $ipv4_addr_match = qr/(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])/;
|
|
my $ipv6_addr_match = qr/[a-fA-F0-9]*:[a-fA-F0-9]*:[a-fA-F0-9:]+/; # simplified (contains at least two colons), matches "::1", "2001:db8::7"
|
|
my $host_name_match = qr/(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])/;
|
|
my $uuid_match = qr/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/;
|
|
my $btrbk_file_match = qr/(?<name>.+)\.(?<YYYY>[0-9]{4})(?<MM>[0-9]{2})(?<DD>[0-9]{2})(T(?<hh>[0-9]{2})(?<mm>[0-9]{2})((?<ss>[0-9]{2})(?<zz>(Z|[+-][0-9]{4})))?)?(_(?<NN>[0-9]+))?/; # matches "NAME.YYYYMMDD[Thhmm[ss+0000]][_NN]"
|
|
my $timeshift_file_match = qr/(?<YYYY>[0-9]{4})-(?<MM>[0-9]{2})-(?<DD>[0-9]{2})_(?<hh>[0-9]{2})-(?<mm>[0-9]{2})-(?<ss>[0-9]{2})\/(?<name>[^\/]+)/; # matches "YYYY-MM-DD_hh-mm-ss/NAME"
|
|
my $raw_postfix_match = qr/\.btrfs(\.($compress_format_alt))?(\.(gpg|encrypted))?/; # matches ".btrfs[.gz|.bz2|.xz|...][.gpg|.encrypted]"
|
|
my $safe_file_match = qr/[0-9a-zA-Z_@\+\-\.\/]+/; # note: ubuntu uses '@' in the subvolume layout: <https://help.ubuntu.com/community/btrfs>
|
|
|
|
my $group_match = qr/[a-zA-Z0-9_:-]+/;
|
|
my $config_split_match = qr/\s*[,\s]\s*/;
|
|
|
|
my %day_of_week_map = ( sunday => 0, monday => 1, tuesday => 2, wednesday => 3, thursday => 4, friday => 5, saturday => 6 );
|
|
my @syslog_facilities = qw( user mail daemon auth lpr news cron authpriv local0 local1 local2 local3 local4 local5 local6 local7 );
|
|
|
|
my @incremental_prefs_avail = qw(sro srn sao san aro arn);
|
|
my @incremental_prefs_default = qw(sro:1 srn:1 sao:1 san:1 aro:1 arn:1);
|
|
my $incremental_prefs_match = "(defaults|(" . join("|", @incremental_prefs_avail) . ")(:[0-9]+)?)";
|
|
|
|
my %config_options = (
|
|
# NOTE: the parser always maps "no" to undef
|
|
# NOTE: keys "volume", "subvolume" and "target" are hardcoded
|
|
# NOTE: files "." and "no" map to <undef>
|
|
timestamp_format => { default => "long", accept => [qw( short long long-iso )], context => [qw( global volume subvolume )] },
|
|
snapshot_dir => { default => undef, accept_file => { relative => 1, absolute => 1 }, context => [qw( global volume subvolume )] },
|
|
snapshot_name => { c_default => 1, accept_file => { name_only => 1 }, context => [qw( subvolume )], deny_glob_context => 1 }, # NOTE: defaults to the subvolume name (hardcoded)
|
|
snapshot_create => { default => "always", accept => [qw( no always ondemand onchange )], context => [qw( global volume subvolume )] },
|
|
incremental => { default => "yes", accept => [qw( yes no strict )] },
|
|
incremental_prefs => { default => \@incremental_prefs_default, accept => [ qr/$incremental_prefs_match/ ], split => 1 },
|
|
incremental_clones => { default => "yes", accept => [qw( yes no )] },
|
|
incremental_resolve => { default => "mountpoint", accept => [qw( mountpoint directory _all_accessible )] },
|
|
preserve_day_of_week => { default => "sunday", accept => [ (keys %day_of_week_map) ] },
|
|
preserve_hour_of_day => { default => 0, accept => [ (0..23) ] },
|
|
snapshot_preserve => { default => undef, accept => [qw( no )], accept_preserve_matrix => 1, context => [qw( global volume subvolume )], },
|
|
snapshot_preserve_min => { default => "all", accept => [qw( all latest ), qr/[1-9][0-9]*[hdwmy]/ ], context => [qw( global volume subvolume )], },
|
|
target_preserve => { default => undef, accept => [qw( no )], accept_preserve_matrix => 1 },
|
|
target_preserve_min => { default => "all", accept => [qw( all latest no ), qr/[0-9]+[hdwmy]/ ] },
|
|
# target_create_dir => { default => undef, accept => [qw( yes no )] },
|
|
archive_preserve => { default => undef, accept => [qw( no )], accept_preserve_matrix => 1, context => [qw( global )] },
|
|
archive_preserve_min => { default => "all", accept => [qw( all latest no ), qr/[0-9]+[hdwmy]/ ], context => [qw( global )] },
|
|
ssh_identity => { default => undef, accept => [qw( no ) ], accept_file => { absolute => 1 } },
|
|
ssh_user => { default => "root", accept => [qw( no ), qr/[a-z_][a-z0-9_-]*/ ] },
|
|
ssh_compression => { default => undef, accept => [qw( yes no )] },
|
|
ssh_cipher_spec => { default => [ "default" ], accept => [qw( default ), qr/[a-z0-9][a-z0-9@.-]+/ ], split => 1 },
|
|
transaction_log => { default => undef, accept => [qw( no )], accept_file => { absolute => 1 }, context => [qw( global )] },
|
|
transaction_syslog => { default => undef, accept => [qw( no ), @syslog_facilities ], context => [qw( global )] },
|
|
lockfile => { default => undef, accept => [qw( no )], accept_file => { absolute => 1 }, context => [qw( global )] },
|
|
|
|
rate_limit => { default => undef, accept => [qw( no ), qr/[0-9]+[kmgtKMGT]?/ ], require_bin => 'mbuffer' },
|
|
rate_limit_remote => { default => undef, accept => [qw( no ), qr/[0-9]+[kmgtKMGT]?/ ] }, # NOTE: requires 'mbuffer' command on remote hosts
|
|
stream_buffer => { default => undef, accept => [qw( no ), qr/[0-9]+[kmgKMG%]?/ ], require_bin => 'mbuffer' },
|
|
stream_buffer_remote => { default => undef, accept => [qw( no ), qr/[0-9]+[kmgKMG%]?/ ] }, # NOTE: requires 'mbuffer' command on remote hosts
|
|
stream_compress => { default => undef, accept => [qw( no ), (keys %compression) ] },
|
|
stream_compress_level => { default => "default", accept => [qw( default ), qr/[0-9]+/ ] },
|
|
stream_compress_long => { default => "default", accept => [qw( default ), qr/[0-9]+/ ] },
|
|
stream_compress_threads => { default => "default", accept => [qw( default ), qr/[0-9]+/ ] },
|
|
stream_compress_adapt => { default => undef, accept => [qw( yes no )] },
|
|
|
|
raw_target_compress => { default => undef, accept => [qw( no ), (keys %compression) ] },
|
|
raw_target_compress_level => { default => "default", accept => [qw( default ), qr/[0-9]+/ ] },
|
|
raw_target_compress_long => { default => "default", accept => [qw( default ), qr/[0-9]+/ ] },
|
|
raw_target_compress_threads => { default => "default", accept => [qw( default ), qr/[0-9]+/ ] },
|
|
raw_target_encrypt => { default => undef, accept => [qw( no gpg openssl_enc )] },
|
|
raw_target_block_size => { default => "128K", accept => [ qr/[0-9]+[kmgKMG]?/ ] },
|
|
raw_target_split => { default => undef, accept => [qw( no ), qr/[0-9]+([kmgtpezyKMGTPEZY][bB]?)?/ ] },
|
|
gpg_keyring => { default => undef, accept_file => { absolute => 1 } },
|
|
gpg_recipient => { default => undef, accept => [ qr/[0-9a-zA-Z_@\+\-\.]+/ ], split => 1 },
|
|
openssl_ciphername => { default => "aes-256-cbc", accept => [ qr/[0-9a-zA-Z\-]+/ ] },
|
|
openssl_iv_size => { default => undef, accept => [qw( no ), qr/[0-9]+/ ] },
|
|
openssl_keyfile => { default => undef, accept_file => { absolute => 1 } },
|
|
|
|
kdf_backend => { default => undef, accept_file => { absolute => 1 } },
|
|
kdf_keysize => { default => "32", accept => [ qr/[0-9]+/ ] },
|
|
kdf_keygen => { default => "once", accept => [qw( once each )] },
|
|
|
|
group => { default => undef, accept => [ qr/$group_match/ ], allow_multiple => 1, split => 1 },
|
|
noauto => { default => undef, accept => [qw( yes no )] },
|
|
|
|
backend => { default => "btrfs-progs", accept => [qw( btrfs-progs btrfs-progs-btrbk btrfs-progs-sudo btrfs-progs-doas )] },
|
|
backend_local => { default => undef, accept => [qw( no btrfs-progs btrfs-progs-btrbk btrfs-progs-sudo btrfs-progs-doas )] },
|
|
backend_remote => { default => undef, accept => [qw( no btrfs-progs btrfs-progs-btrbk btrfs-progs-sudo btrfs-progs-doas )] },
|
|
backend_local_user => { default => undef, accept => [qw( no btrfs-progs btrfs-progs-btrbk btrfs-progs-sudo btrfs-progs-doas )] },
|
|
|
|
compat => { default => undef, accept => [qw( no busybox ignore_receive_errors )], split => 1 },
|
|
compat_local => { default => undef, accept => [qw( no busybox ignore_receive_errors )], split => 1 },
|
|
compat_remote => { default => undef, accept => [qw( no busybox ignore_receive_errors )], split => 1 },
|
|
safe_commands => { default => undef, accept => [qw( yes no )], context => [qw( global )] },
|
|
btrfs_commit_delete => { default => undef, accept => [qw( yes no after each )],
|
|
deprecated => { MATCH => { regex => qr/^(?:after|each)$/, warn => 'Please use "btrfs_commit_delete yes|no"', replace_key => "btrfs_commit_delete", replace_value => "yes" } } },
|
|
send_protocol => { default => undef, accept => [qw( no 1 2 )] }, # NOTE: requires btrfs-progs-5.19
|
|
send_compressed_data => { default => undef, accept => [qw( yes no )] }, # NOTE: implies send_protocol=2
|
|
|
|
snapshot_qgroup_destroy => { default => undef, accept => [qw( yes no )], context => [qw( global volume subvolume )] },
|
|
target_qgroup_destroy => { default => undef, accept => [qw( yes no )] },
|
|
archive_qgroup_destroy => { default => undef, accept => [qw( yes no )], context => [qw( global )] },
|
|
|
|
archive_exclude => { default => undef, accept_file => { wildcards => 1 }, allow_multiple => 1, context => [qw( global )] },
|
|
archive_exclude_older => { default => undef, accept => [qw( yes no )] },
|
|
|
|
cache_dir => { default => undef, accept_file => { absolute => 1 }, allow_multiple => 1, context => [qw( global )] },
|
|
ignore_extent_data_inline => { default => "yes", accept => [qw( yes no )] },
|
|
|
|
warn_unknown_targets => { default => undef, accept => [qw( yes no )] },
|
|
|
|
# deprecated options
|
|
ssh_port => { default => "default", accept => [qw( default ), qr/[0-9]+/ ],
|
|
deprecated => { DEFAULT => { warn => 'Please use "ssh://hostname[:port]" notation in the "volume" and "target" configuration lines.' } } },
|
|
btrfs_progs_compat => { default => undef, accept => [qw( yes no )],
|
|
deprecated => { DEFAULT => { ABORT => 1, warn => 'This feature has been dropped in btrbk-v0.23.0. Please update to newest btrfs-progs, AT LEAST >= $BTRFS_PROGS_MIN' } } },
|
|
snapshot_preserve_daily => { default => 'all', accept => [qw( all ), qr/[0-9]+/ ], context => [qw( global volume subvolume )],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
|
|
snapshot_preserve_weekly => { default => 0, accept => [qw( all ), qr/[0-9]+/ ], context => [qw( global volume subvolume )],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
|
|
snapshot_preserve_monthly => { default => 'all', accept => [qw( all ), qr/[0-9]+/ ], context => [qw( global volume subvolume )],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
|
|
target_preserve_daily => { default => 'all', accept => [qw( all ), qr/[0-9]+/ ],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
|
|
target_preserve_weekly => { default => 0, accept => [qw( all ), qr/[0-9]+/ ],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
|
|
target_preserve_monthly => { default => 'all', accept => [qw( all ), qr/[0-9]+/ ],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
|
|
resume_missing => { default => "yes", accept => [qw( yes no )],
|
|
deprecated => { yes => { warn => 'ignoring (missing backups are always resumed since btrbk v0.23.0)' },
|
|
no => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve_min latest" and "target_preserve no" if you want to keep only the latest backup', },
|
|
DEFAULT => {} } },
|
|
snapshot_create_always => { default => undef, accept => [qw( yes no )],
|
|
deprecated => { yes => { warn => "Please use \"snapshot_create always\"",
|
|
replace_key => "snapshot_create",
|
|
replace_value => "always",
|
|
},
|
|
no => { warn => "Please use \"snapshot_create no\" or \"snapshot_create ondemand\"",
|
|
replace_key => "snapshot_create",
|
|
replace_value => "ondemand",
|
|
},
|
|
DEFAULT => {},
|
|
},
|
|
},
|
|
receive_log => { default => undef, accept => [qw( sidecar no )], accept_file => { absolute => 1 },
|
|
deprecated => { DEFAULT => { warn => "ignoring" } },
|
|
}
|
|
);
|
|
|
|
my @config_target_types = qw(send-receive raw); # first in list is default
|
|
|
|
my %table_formats = (
|
|
config_volume => {
|
|
table => [ qw( -volume_host -volume_port volume_path ) ],
|
|
long => [ qw( volume_host -volume_port volume_path -volume_rsh ) ],
|
|
raw => [ qw( volume_url volume_host volume_port volume_path volume_rsh ) ],
|
|
single_column => [ qw( volume_url ) ],
|
|
},
|
|
|
|
config_source => {
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_path snapshot_name ) ],
|
|
long => [ qw( source_host -source_port source_subvolume snapshot_path snapshot_name -source_rsh ) ],
|
|
raw => [ qw( source_url source_host source_port source_subvolume snapshot_path snapshot_name source_rsh ) ],
|
|
single_column => [ qw( source_url ) ],
|
|
},
|
|
|
|
config_target => {
|
|
table => [ qw( -target_host -target_port target_path ) ],
|
|
long => [ qw( target_host -target_port target_path -target_rsh ) ],
|
|
raw => [ qw( target_url target_host target_port target_path target_rsh ) ],
|
|
single_column => [ qw( target_url ) ],
|
|
},
|
|
|
|
config => {
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_path snapshot_name -target_host -target_port target_path ) ],
|
|
long => [ qw( -source_host -source_port source_subvolume snapshot_path snapshot_name -target_host -target_port target_path target_type snapshot_preserve target_preserve ) ],
|
|
raw => [ qw( source_url source_host source_port source_subvolume snapshot_path snapshot_name target_url target_host target_port target_path target_type snapshot_preserve target_preserve source_rsh target_rsh ) ],
|
|
},
|
|
|
|
resolved => {
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume ) ],
|
|
long => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume target_type ) ],
|
|
raw => [ qw( type source_url source_host source_port source_subvolume snapshot_subvolume snapshot_name status target_url target_host target_port target_subvolume target_type source_rsh target_rsh ) ],
|
|
},
|
|
|
|
snapshots => {
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status ) ],
|
|
long => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status ) ],
|
|
raw => [ qw( source_url source_host source_port source_subvolume snapshot_subvolume snapshot_name status source_rsh ) ],
|
|
single_column => [ qw( snapshot_url ) ],
|
|
},
|
|
|
|
backups => { # same as resolved, except for single_column
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume ) ],
|
|
long => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port target_subvolume target_type ) ],
|
|
raw => [ qw( type source_url source_host source_port source_subvolume snapshot_subvolume snapshot_name status target_url target_host target_port target_subvolume target_type source_rsh target_rsh ) ],
|
|
single_column => [ qw( target_url ) ],
|
|
},
|
|
|
|
latest => { # same as resolved, except hiding target if not present
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port -target_subvolume ) ],
|
|
long => [ qw( -source_host -source_port source_subvolume snapshot_subvolume status -target_host -target_port -target_subvolume -target_type ) ],
|
|
raw => [ qw( type source_url source_host source_port source_subvolume snapshot_subvolume snapshot_name status target_url target_host target_port target_subvolume target_type source_rsh target_rsh ) ],
|
|
},
|
|
|
|
stats => {
|
|
table => [ qw( -source_host -source_port source_subvolume snapshot_subvolume -target_host -target_port -target_subvolume snapshots -backups ) ],
|
|
long => [ qw( -source_host -source_port source_subvolume snapshot_subvolume -target_host -target_port -target_subvolume snapshot_status backup_status snapshots -backups -correlated -orphaned -incomplete ) ],
|
|
raw => [ qw( source_url source_host source_port source_subvolume snapshot_subvolume snapshot_name target_url target_host target_port target_subvolume snapshot_status backup_status snapshots backups correlated orphaned incomplete ) ],
|
|
RALIGN => { snapshots=>1, backups=>1, correlated=>1, orphaned=>1, incomplete=>1 },
|
|
},
|
|
|
|
schedule => {
|
|
table => [ qw( action -host -port subvolume scheme reason ) ],
|
|
long => [ qw( action -host -port subvolume scheme reason ) ],
|
|
raw => [ qw( topic action url host port path hod dow min h d w m y) ],
|
|
},
|
|
|
|
usage => {
|
|
table => [ qw( -host -port mount_source path size used free ) ],
|
|
long => [ qw( type -host -port mount_source path size used device_size device_allocated device_unallocated device_missing device_used free free_min data_ratio metadata_ratio global_reserve global_reserve_used ) ],
|
|
raw => [ qw( type host port mount_source path size used device_size device_allocated device_unallocated device_missing device_used free free_min data_ratio metadata_ratio global_reserve global_reserve_used ) ],
|
|
RALIGN => { size=>1, used=>1, device_size=>1, device_allocated=>1, device_unallocated=>1, device_missing=>1, device_used=>1, free=>1, free_min=>1, data_ratio=>1, metadata_ratio=>1, global_reserve=>1, global_reserve_used=>1 },
|
|
},
|
|
|
|
transaction => {
|
|
table => [ qw( type status -target_host -target_port target_subvolume -source_host -source_port source_subvolume parent_subvolume ) ],
|
|
long => [ qw( localtime type status duration target_host -target_port target_subvolume source_host -source_port source_subvolume parent_subvolume message ) ],
|
|
tlog => [ qw( localtime type status target_url source_url parent_url message ) ],
|
|
syslog => [ qw( type status target_url source_url parent_url message ) ],
|
|
raw => [ qw( time localtime type status duration target_url source_url parent_url message ) ],
|
|
},
|
|
|
|
origin_tree => {
|
|
table => [ qw( tree uuid parent_uuid received_uuid ) ],
|
|
long => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
|
|
raw => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
|
|
},
|
|
|
|
diff => {
|
|
table => [ qw( flags count size file ) ],
|
|
long => [ qw( flags count size file ) ],
|
|
raw => [ qw( flags count size file ) ],
|
|
RALIGN => { count=>1, size=>1 },
|
|
},
|
|
|
|
fs_list => {
|
|
table => [ qw( -host mount_source mount_subvol mount_point id flags subvolume_path path ) ],
|
|
short => [ qw( -host mount_source id flags path ) ],
|
|
long => [ qw( -host mount_source id top cgen gen uuid parent_uuid received_uuid flags path ) ],
|
|
raw => [ qw( host mount_source mount_subvol mount_point mount_subvolid id top_level cgen gen uuid parent_uuid received_uuid readonly path subvolume_path subvolume_rel_path url ) ],
|
|
single_column => [ qw( url ) ],
|
|
RALIGN => { id=>1, top=>1, cgen=>1, gen=>1 },
|
|
},
|
|
|
|
extent_diff => {
|
|
table => [ qw( total exclusive -diff -set subvol ) ],
|
|
long => [ qw( id cgen gen total exclusive -diff -set subvol ) ],
|
|
raw => [ qw( id cgen gen total exclusive -diff -set subvol ) ],
|
|
RALIGN => { id=>1, cgen=>1, gen=>1, total=>1, exclusive=>1, diff=>1, set=>1 },
|
|
},
|
|
);
|
|
|
|
my @btrfs_cmd = (
|
|
"btrfs subvolume list",
|
|
"btrfs subvolume show",
|
|
"btrfs subvolume snapshot",
|
|
"btrfs subvolume delete",
|
|
"btrfs send",
|
|
"btrfs receive",
|
|
"btrfs filesystem usage",
|
|
"btrfs qgroup destroy",
|
|
);
|
|
my @system_cmd = (
|
|
"readlink",
|
|
"test",
|
|
);
|
|
my %backend_cmd_map = (
|
|
"btrfs-progs-btrbk" => { map +( $_ => [ s/ /-/gr ] ), @btrfs_cmd },
|
|
"btrfs-progs-sudo" => { map +( $_ => [ qw( sudo -n ), split(" ", $_) ] ), @btrfs_cmd, @system_cmd },
|
|
"btrfs-progs-doas" => { map +( $_ => [ qw( doas -n ), split(" ", $_) ] ), @btrfs_cmd, @system_cmd },
|
|
);
|
|
|
|
# keys used in raw target sidecar files (.info):
|
|
my %raw_info_sort = (
|
|
TYPE => 1,
|
|
FILE => 2, # informative only (as of btrbk-0.32.6)
|
|
RECEIVED_UUID => 3,
|
|
RECEIVED_PARENT_UUID => 4,
|
|
compress => 10,
|
|
split => 11,
|
|
encrypt => 12,
|
|
cipher => 13,
|
|
iv => 14,
|
|
# kdf_* (generated by kdf_backend)
|
|
INCOMPLETE => 100,
|
|
);
|
|
|
|
my $raw_info_value_match = qr/[0-9a-zA-Z_-]*/;
|
|
|
|
my %raw_url_cache; # map URL to (fake) btr_tree node
|
|
my %mountinfo_cache; # map MACHINE_ID to mount points (sorted descending by file length)
|
|
my %mount_source_cache; # map URL_PREFIX:mount_source (aka device) to btr_tree node
|
|
my %uuid_cache; # map UUID to btr_tree node
|
|
my %realpath_cache; # map URL to realpath (symlink target). undef denotes an error.
|
|
my %mkdir_cache; # map URL to mkdir status: 1=created, undef=error
|
|
|
|
my $tree_inject_id = 0; # fake subvolume id for injected nodes (negative)
|
|
my $fake_uuid_prefix = 'XXXXXXXX-XXXX-XXXX-XXXX-'; # plus 0-padded inject_id: XXXXXXXX-XXXX-XXXX-XXXX-000000000000
|
|
|
|
my $program_name; # "btrbk" or "lsbtr", default to "btrbk"
|
|
my $safe_commands;
|
|
my $dryrun;
|
|
my $loglevel = 1;
|
|
my $quiet;
|
|
my @exclude_vf;
|
|
my $do_dumper;
|
|
my $do_trace;
|
|
my $show_progress = 0;
|
|
my $output_format;
|
|
my $output_pretty = 0;
|
|
my @output_unit;
|
|
my $lockfile;
|
|
my $tlog_fh;
|
|
my $syslog_enabled = 0;
|
|
my $current_transaction;
|
|
my @transaction_log;
|
|
my %config_override;
|
|
my @tm_now; # current localtime ( sec, min, hour, mday, mon, year, wday, yday, isdst )
|
|
my @stderr; # stderr of last run_cmd
|
|
my %warn_once;
|
|
my %kdf_vars;
|
|
my $kdf_session_key;
|
|
|
|
|
|
$SIG{__DIE__} = sub {
|
|
print STDERR "\nERROR: process died unexpectedly (btrbk v$VERSION)";
|
|
print STDERR "\nPlease contact the author: $AUTHOR\n\n";
|
|
print STDERR "Stack Trace:\n----------------------------------------\n";
|
|
Carp::confess @_;
|
|
};
|
|
|
|
$SIG{INT} = sub {
|
|
print STDERR "\nERROR: Caught SIGINT, dumping transaction log:\n";
|
|
action("signal", status => "SIGINT");
|
|
print_formatted("transaction", \@transaction_log, output_format => "tlog", outfile => *STDERR);
|
|
exit 1;
|
|
};
|
|
|
|
sub VERSION_MESSAGE
|
|
{
|
|
print $VERSION_INFO . "\n";
|
|
}
|
|
|
|
sub ERROR_HELP_MESSAGE
|
|
{
|
|
return if($quiet);
|
|
print STDERR "See '$program_name --help'.\n";
|
|
}
|
|
|
|
sub HELP_MESSAGE
|
|
{
|
|
return if($quiet);
|
|
#80-----------------------------------------------------------------------------
|
|
if($program_name eq "lsbtr") {
|
|
print <<"END_HELP_LSBTR";
|
|
usage: lsbtr [<options>] [[--] <path>...]
|
|
|
|
options:
|
|
-h, --help display this help message
|
|
--version display version information
|
|
-l, --long use long listing format
|
|
-u, --uuid print uuid table (parent/received relations)
|
|
-1, --single-column Print path column only
|
|
--raw print raw table format
|
|
-v, --verbose increase output verbosity
|
|
-c, --config=FILE specify btrbk configuration file
|
|
--override=KEY=VALUE globally override a configuration option
|
|
|
|
For additional information, see $PROJECT_HOME
|
|
END_HELP_LSBTR
|
|
}
|
|
else {
|
|
print <<"END_HELP_BTRBK";
|
|
usage: btrbk [<options>] <command> [[--] <filter>...]
|
|
|
|
options:
|
|
-h, --help display this help message
|
|
--version display version information
|
|
-c, --config=FILE specify configuration file
|
|
-n, --dry-run perform a trial run with no changes made
|
|
--exclude=FILTER exclude configured sections
|
|
-p, --preserve preserve all (do not delete anything)
|
|
--preserve-snapshots preserve snapshots (do not delete snapshots)
|
|
--preserve-backups preserve backups (do not delete backups)
|
|
--wipe delete all but latest snapshots
|
|
-v, --verbose be more verbose (increase logging level)
|
|
-q, --quiet be quiet (do not print backup summary)
|
|
-l, --loglevel=LEVEL set logging level (error, warn, info, debug, trace)
|
|
-t, --table change output to table format
|
|
-L, --long change output to long format
|
|
--format=FORMAT change output format, FORMAT=table|long|raw
|
|
-S, --print-schedule print scheduler details (for the "run" command)
|
|
--progress show progress bar on send-receive operation
|
|
--lockfile=FILE create and check lockfile
|
|
--override=KEY=VALUE globally override a configuration option
|
|
|
|
commands:
|
|
run run snapshot and backup operations
|
|
dryrun don't run btrfs commands; show what would be executed
|
|
snapshot run snapshot operations only
|
|
resume run backup operations, and delete snapshots
|
|
prune only delete snapshots and backups
|
|
archive <src> <dst> recursively copy btrbk snapshot/backup directories
|
|
cp <src>... <dst> copy read-only subvolume (incrementally if possible)
|
|
clean delete incomplete (garbled) backups
|
|
stats print snapshot/backup statistics
|
|
list <subcommand> available subcommands are (default "all"):
|
|
all snapshots and backups
|
|
snapshots snapshots
|
|
backups backups and correlated snapshots
|
|
latest most recent snapshots and backups
|
|
config configured source/snapshot/target relations
|
|
source configured source/snapshot relations
|
|
volume configured volume sections
|
|
target configured targets
|
|
usage print filesystem usage
|
|
ls <path> list all btrfs subvolumes below path
|
|
origin <subvol> print origin information for subvolume
|
|
diff <from> <to> list file changes between related subvolumes
|
|
extents [diff] <path> calculate accurate disk space usage
|
|
|
|
For additional information, see $PROJECT_HOME
|
|
END_HELP_BTRBK
|
|
}
|
|
#80-----------------------------------------------------------------------------
|
|
}
|
|
|
|
sub _log_cont { my $p = shift; print STDERR $p . join("\n${p}... ", grep defined, @_) . "\n"; }
|
|
sub TRACE { print STDERR map { "___ $_\n" } @_ if($loglevel >= 4) }
|
|
sub DEBUG { _log_cont("", @_) if($loglevel >= 3) }
|
|
sub INFO { _log_cont("", @_) if($loglevel >= 2) }
|
|
sub WARN { _log_cont("WARNING: ", @_) if($loglevel >= 1) }
|
|
sub ERROR { _log_cont("ERROR: ", @_) }
|
|
|
|
sub INFO_ONCE {
|
|
my $t = shift;
|
|
if($warn_once{INFO}{$t}) { TRACE("INFO(again): $t", @_) if($do_trace); return 0; }
|
|
else { $warn_once{INFO}{$t} = 1; INFO($t, @_); return 1; }
|
|
}
|
|
sub WARN_ONCE {
|
|
my $t = shift;
|
|
if($warn_once{WARN}{$t}) { TRACE("WARNING(again): $t", @_) if($do_trace); return 0; }
|
|
else { $warn_once{WARN}{$t} = 1; WARN($t, @_); return 1; }
|
|
}
|
|
|
|
sub VINFO {
|
|
return undef unless($do_dumper);
|
|
my $vinfo = shift; my $t = shift || "vinfo"; my $maxdepth = shift // 2;
|
|
print STDERR Data::Dumper->new([$vinfo], [$t])->Maxdepth($maxdepth)->Dump();
|
|
}
|
|
sub SUBVOL_LIST {
|
|
return undef unless($do_dumper);
|
|
my $vol = shift; my $t = shift // "SUBVOL_LIST"; my $svl = vinfo_subvol_list($vol);
|
|
print STDERR "$t:\n " . join("\n ", map { "$vol->{PRINT}/./$_->{SUBVOL_PATH}\t$_->{node}{id}" } @$svl) . "\n";
|
|
}
|
|
|
|
|
|
sub ABORTED($$;$)
|
|
{
|
|
my $config = shift;
|
|
my $abrt_key = shift // die;
|
|
my $abrt = shift;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
unless(defined($abrt)) {
|
|
# no key (only text) set: switch arguments, use default key
|
|
$abrt = $abrt_key;
|
|
$abrt_key = "abort_" . $config->{CONTEXT};
|
|
}
|
|
unless($abrt_key =~ /^skip_/) {
|
|
# keys starting with "skip_" are not actions
|
|
$abrt =~ s/\n/\\\\/g;
|
|
$abrt =~ s/\r//g;
|
|
action($abrt_key,
|
|
status => "ABORT",
|
|
vinfo_prefixed_keys("target", vinfo($config->{url}, $config)),
|
|
message => $abrt,
|
|
);
|
|
}
|
|
$config->{ABORTED} = { key => $abrt_key, text => $abrt };
|
|
}
|
|
|
|
sub IS_ABORTED($;$)
|
|
{
|
|
my $config = shift;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
return undef unless(defined($config->{ABORTED}));
|
|
my $abrt_key = $config->{ABORTED}->{key};
|
|
return undef unless(defined($abrt_key));
|
|
my $filter_prefix = shift;
|
|
return undef if($filter_prefix && ($abrt_key !~ /^$filter_prefix/));
|
|
return $abrt_key;
|
|
}
|
|
|
|
sub ABORTED_TEXT($)
|
|
{
|
|
my $config = shift;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
return "" unless(defined($config->{ABORTED}));
|
|
return $config->{ABORTED}->{text} // "";
|
|
}
|
|
|
|
sub FIX_MANUALLY($$)
|
|
{
|
|
# treated as error, but does not abort config section
|
|
my $config = shift;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
my $msg = shift // die;
|
|
$config->{FIX_MANUALLY} //= [];
|
|
push(@{$config->{FIX_MANUALLY}}, $msg);
|
|
}
|
|
|
|
sub eval_quiet(&)
|
|
{
|
|
local $SIG{__DIE__};
|
|
return eval { $_[0]->() }
|
|
}
|
|
|
|
sub require_data_dumper
|
|
{
|
|
if(eval_quiet { require Data::Dumper; }) {
|
|
Data::Dumper->import("Dumper");
|
|
$Data::Dumper::Sortkeys = 1;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
$do_dumper = 1;
|
|
# silence perl warning: Name "Data::Dumper::Sortkeys" used only once: possible typo at...
|
|
TRACE "Successfully loaded Dumper module: sortkeys=$Data::Dumper::Sortkeys, quotekeys=$Data::Dumper::Quotekeys" if($do_trace);
|
|
} else {
|
|
WARN "Perl module \"Data::Dumper\" not found: data trace dumps disabled!" if($do_trace);
|
|
}
|
|
}
|
|
|
|
sub init_transaction_log($$)
|
|
{
|
|
my $file = shift;
|
|
my $config_syslog_facility = shift;
|
|
if(defined($file) && (not $dryrun)) {
|
|
if(open($tlog_fh, '>>', $file)) {
|
|
# print headers (disabled)
|
|
# print_formatted("transaction", [ ], output_format => "tlog", outfile => $tlog_fh);
|
|
INFO "Using transaction log: $file";
|
|
} else {
|
|
$tlog_fh = undef;
|
|
ERROR "Failed to open transaction log '$file': $!";
|
|
}
|
|
}
|
|
if(defined($config_syslog_facility) && (not $dryrun)) {
|
|
DEBUG "Opening syslog";
|
|
if(eval_quiet { require Sys::Syslog; }) {
|
|
$syslog_enabled = 1;
|
|
Sys::Syslog::openlog("btrbk", "", $config_syslog_facility);
|
|
DEBUG "Syslog enabled";
|
|
}
|
|
else {
|
|
WARN "Syslog disabled: $@";
|
|
}
|
|
}
|
|
action("DEFERRED", %$_) foreach (@transaction_log);
|
|
}
|
|
|
|
sub close_transaction_log()
|
|
{
|
|
if($tlog_fh) {
|
|
DEBUG "Closing transaction log";
|
|
close $tlog_fh || ERROR "Failed to close transaction log: $!";
|
|
}
|
|
if($syslog_enabled) {
|
|
DEBUG "Closing syslog";
|
|
eval_quiet { Sys::Syslog::closelog(); };
|
|
}
|
|
}
|
|
|
|
sub action($@)
|
|
{
|
|
my $type = shift // die;
|
|
my $h = { @_ };
|
|
unless($type eq "DEFERRED") {
|
|
my $time = $h->{time} // time;
|
|
$h->{type} = $type;
|
|
$h->{time} = $time;
|
|
$h->{localtime} = timestamp($time, 'debug-iso');
|
|
push @transaction_log, $h;
|
|
}
|
|
print_formatted("transaction", [ $h ], output_format => "tlog", no_header => 1, outfile => $tlog_fh) if($tlog_fh);
|
|
print_formatted("transaction", [ $h ], output_format => "syslog", no_header => 1) if($syslog_enabled); # dirty hack, this calls syslog()
|
|
return $h;
|
|
}
|
|
|
|
sub start_transaction($@)
|
|
{
|
|
my $type = shift // die;
|
|
my $time = time;
|
|
die("start_transaction() while transaction is running") if($current_transaction);
|
|
my @actions = (ref($_[0]) eq "HASH") ? @_ : { @_ }; # single action is not hashref
|
|
$current_transaction = [];
|
|
foreach (@actions) {
|
|
push @$current_transaction, action($type, %$_, status => ($dryrun ? "dryrun_starting" : "starting"), time => $time);
|
|
}
|
|
}
|
|
|
|
sub end_transaction($$)
|
|
{
|
|
my $type = shift // die;
|
|
my $success = shift; # scalar or coderef: if scalar, status is set for all current transitions
|
|
my $time = time;
|
|
die("end_transaction() while no transaction is running") unless($current_transaction);
|
|
foreach (@$current_transaction) {
|
|
die("end_transaction() has different type") unless($_->{type} eq $type);
|
|
my $status = (ref($success) ? &{$success} ($_) : $success) ? "success" : "ERROR";
|
|
$status = "dryrun_" . $status if($dryrun);
|
|
action($type, %$_, status => $status, time => $time, duration => ($dryrun ? undef : ($time - $_->{time})));
|
|
}
|
|
$current_transaction = undef;
|
|
}
|
|
|
|
sub syslog($)
|
|
{
|
|
return undef unless($syslog_enabled);
|
|
my $line = shift;
|
|
eval_quiet { Sys::Syslog::syslog("info", $line); };
|
|
}
|
|
|
|
sub check_exe($)
|
|
{
|
|
my $cmd = shift // die;
|
|
foreach my $path (split(":", $ENV{PATH})) {
|
|
return 1 if( -x "$path/$cmd" );
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
sub stream_buffer_cmd_text($)
|
|
{
|
|
my $opts = shift;
|
|
my $rl_in = $opts->{rate_limit_in} // $opts->{rate_limit}; # maximum read rate: b,k,M,G
|
|
my $rl_out = $opts->{rate_limit_out}; # maximum write rate: b,k,M,G
|
|
my $bufsize = $opts->{stream_buffer}; # b,k,M,G,% (default: 2%)
|
|
my $blocksize = $opts->{blocksize}; # defaults to 10k
|
|
my $progress = $opts->{show_progress};
|
|
|
|
# return empty array if mbuffer is not needed
|
|
return () unless($rl_in || $rl_out || $bufsize || $progress);
|
|
|
|
# NOTE: mbuffer takes defaults from /etc/mbuffer.rc
|
|
my @cmd = ( "mbuffer" );
|
|
push @cmd, ( "-v", "1" ); # disable warnings (they arrive asynchronously and cant be caught)
|
|
push @cmd, "-q" unless($progress);
|
|
push @cmd, ( "-s", $blocksize ) if($blocksize);
|
|
push @cmd, ( "-m", lc($bufsize) ) if($bufsize);
|
|
push @cmd, ( "-r", lc($rl_in) ) if($rl_in);
|
|
push @cmd, ( "-R", lc($rl_out) ) if($rl_out);
|
|
return { cmd_text => join(' ', @cmd) };
|
|
}
|
|
|
|
sub compress_cmd_text($;$)
|
|
{
|
|
my $def = shift // die;
|
|
my $decompress = shift;
|
|
my $cc = $compression{$def->{key}};
|
|
my @cmd = $decompress ? @{$cc->{decompress_cmd}} : @{$cc->{compress_cmd}};
|
|
|
|
if((not $decompress) && defined($def->{level}) && ($def->{level} ne "default")) {
|
|
my $level = $def->{level};
|
|
if(!defined($cc->{level_min})) {
|
|
WARN_ONCE "Compression level is not supported for '$cc->{name}', ignoring";
|
|
$level = undef;
|
|
} elsif($level < $cc->{level_min}) {
|
|
WARN_ONCE "Compression level capped to minimum for '$cc->{name}': $cc->{level_min}";
|
|
$level = $cc->{level_min};
|
|
} elsif($level > $cc->{level_max}) {
|
|
WARN_ONCE "Compression level capped to maximum for '$cc->{name}': $cc->{level_max}";
|
|
$level = $cc->{level_max};
|
|
}
|
|
push @cmd, '-' . $level if(defined($level));
|
|
}
|
|
if(defined($def->{threads}) && ($def->{threads} ne "default")) {
|
|
my $thread_opt = $cc->{threads};
|
|
if($thread_opt) {
|
|
push @cmd, $thread_opt . $def->{threads};
|
|
}
|
|
else {
|
|
WARN_ONCE "Threading is not supported for '$cc->{name}', ignoring";
|
|
}
|
|
}
|
|
if(defined($def->{long}) && ($def->{long} ne "default")) {
|
|
my $long_opt = $cc->{long};
|
|
if($long_opt) {
|
|
push @cmd, $long_opt . $def->{long};
|
|
}
|
|
else {
|
|
WARN_ONCE "Long distance matching is not supported for '$cc->{name}', ignoring";
|
|
}
|
|
}
|
|
if(defined($def->{adapt})) {
|
|
my $adapt_opt = $cc->{adapt};
|
|
if($adapt_opt) {
|
|
push @cmd, $adapt_opt;
|
|
}
|
|
else {
|
|
WARN_ONCE "Adaptive compression is not supported for '$cc->{name}', ignoring";
|
|
}
|
|
}
|
|
return { cmd_text => join(' ', @cmd) };
|
|
}
|
|
|
|
sub decompress_cmd_text($)
|
|
{
|
|
return compress_cmd_text($_[0], 1);
|
|
}
|
|
|
|
sub _piped_cmd_txt($)
|
|
{
|
|
my $cmd_pipe = shift;
|
|
my $cmd = "";
|
|
my $pipe = "";
|
|
my $last;
|
|
foreach (map $_->{cmd_text}, @$cmd_pipe) {
|
|
die if($last);
|
|
if(/^>/) {
|
|
# can't be first, must be last
|
|
die unless($pipe);
|
|
$last = 1;
|
|
$pipe = ' ';
|
|
}
|
|
$cmd .= $pipe . $_;
|
|
$pipe = ' | ';
|
|
}
|
|
return $cmd;
|
|
}
|
|
|
|
sub quoteshell(@) {
|
|
# replace ' -> '\''
|
|
join ' ', map { "'" . s/'/'\\''/gr . "'" } @_
|
|
}
|
|
|
|
sub _safe_cmd($;$)
|
|
{
|
|
# hashes of form: "{ unsafe => 'string' }" get translated to "'string'"
|
|
my $aref = shift;
|
|
my $offending = shift;
|
|
return join ' ', map {
|
|
if(ref($_)) {
|
|
my $prefix = $_->{prefix} // "";
|
|
my $postfix = $_->{postfix} // "";
|
|
$_ = $_->{unsafe};
|
|
die "cannot quote leading dash for command: $_" if(/^-/);
|
|
# NOTE: all files must be absolute
|
|
my $file = check_file($_, { absolute => 1 }, sanitize => 1 );
|
|
unless(defined($file)) {
|
|
die "uncaught unsafe file: $_" unless($offending);
|
|
push @$offending, $_;
|
|
}
|
|
$_ = $prefix . quoteshell($file // $_) . $postfix;
|
|
}
|
|
$_
|
|
} @$aref;
|
|
}
|
|
|
|
sub run_cmd(@)
|
|
{
|
|
# IPC::Open3 based implementation.
|
|
# NOTE: multiple filters are not supported!
|
|
|
|
my @cmd_pipe_in = (ref($_[0]) eq "HASH") ? @_ : { @_ };
|
|
die unless(scalar(@cmd_pipe_in));
|
|
@stderr = ();
|
|
|
|
my $destructive = 0;
|
|
my @cmd_pipe;
|
|
my @unsafe_cmd;
|
|
my $compressed = undef;
|
|
my $large_output;
|
|
my $stream_options = $cmd_pipe_in[0]->{stream_options} // {};
|
|
my @filter_stderr;
|
|
my $fatal_stderr;
|
|
my $has_rsh;
|
|
|
|
$cmd_pipe_in[0]->{stream_source} = 1;
|
|
$cmd_pipe_in[-1]->{stream_sink} = 1;
|
|
|
|
foreach my $href (@cmd_pipe_in)
|
|
{
|
|
die if(defined($href->{cmd_text}));
|
|
|
|
push @filter_stderr, ((ref($href->{filter_stderr}) eq "ARRAY") ? @{$href->{filter_stderr}} : $href->{filter_stderr}) if($href->{filter_stderr});
|
|
$fatal_stderr = $href->{fatal_stderr} if($href->{fatal_stderr});
|
|
$destructive = 1 unless($href->{non_destructive});
|
|
$has_rsh = $href->{rsh} if($href->{rsh});
|
|
$large_output = 1 if($href->{large_output});
|
|
|
|
if($href->{redirect_to_file}) {
|
|
die unless($href->{stream_sink});
|
|
$href->{cmd_text} = _safe_cmd([ '>', $href->{redirect_to_file} ], \@unsafe_cmd);
|
|
}
|
|
elsif($href->{append_to_file}) {
|
|
die unless($href->{stream_sink});
|
|
$href->{cmd_text} = _safe_cmd([ '>>', $href->{append_to_file} ], \@unsafe_cmd);
|
|
}
|
|
elsif($href->{compress_stdin}) {
|
|
# does nothing if already compressed correctly by stream_compress
|
|
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$href->{compress_stdin}->{key}}->{format})) {
|
|
# re-compress with different algorithm
|
|
push @cmd_pipe, decompress_cmd_text($compressed);
|
|
$compressed = undef;
|
|
}
|
|
unless($compressed) {
|
|
push @cmd_pipe, compress_cmd_text($href->{compress_stdin});
|
|
$compressed = $href->{compress_stdin};
|
|
}
|
|
|
|
next;
|
|
}
|
|
elsif($href->{cmd}) {
|
|
$href->{cmd_text} = _safe_cmd($href->{cmd}, \@unsafe_cmd);
|
|
}
|
|
return undef unless(defined($href->{cmd_text}));
|
|
|
|
my @rsh_compress_in;
|
|
my @rsh_compress_out;
|
|
my @decompress_in;
|
|
|
|
# input stream compression: local, in front of rsh_cmd_pipe
|
|
if($href->{rsh} && $stream_options->{stream_compress} && (not $href->{stream_source})) {
|
|
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$stream_options->{stream_compress}->{key}}->{format})) {
|
|
# re-compress with different algorithm, should be avoided!
|
|
push @rsh_compress_in, decompress_cmd_text($compressed);
|
|
$compressed = undef;
|
|
}
|
|
if(not $compressed) {
|
|
$compressed = $stream_options->{stream_compress};
|
|
push @rsh_compress_in, compress_cmd_text($compressed);
|
|
}
|
|
}
|
|
|
|
if($compressed && (not ($href->{compressed_ok}))) {
|
|
push @decompress_in, decompress_cmd_text($compressed);
|
|
$compressed = undef;
|
|
}
|
|
|
|
# output stream compression: remote, at end of rsh_cmd_pipe
|
|
if($href->{rsh} && $stream_options->{stream_compress} && (not $href->{stream_sink}) && (not $compressed)) {
|
|
$compressed = $stream_options->{stream_compress};
|
|
push @rsh_compress_out, compress_cmd_text($compressed);
|
|
}
|
|
|
|
if($href->{rsh}) {
|
|
# honor stream_buffer_remote, rate_limit_remote for stream source / sink
|
|
my @rsh_stream_buffer_in = $href->{stream_sink} ? stream_buffer_cmd_text($stream_options->{rsh_sink}) : ();
|
|
my @rsh_stream_buffer_out = $href->{stream_source} ? stream_buffer_cmd_text($stream_options->{rsh_source}) : ();
|
|
|
|
my @rsh_cmd_pipe = (
|
|
@decompress_in,
|
|
@rsh_stream_buffer_in,
|
|
$href,
|
|
@rsh_stream_buffer_out,
|
|
@rsh_compress_out,
|
|
);
|
|
@decompress_in = ();
|
|
|
|
# fixup redirect_to_file
|
|
if((scalar(@rsh_cmd_pipe) == 1) && ($rsh_cmd_pipe[0]->{redirect_to_file} || $rsh_cmd_pipe[0]->{append_to_file})) {
|
|
# NOTE: direct redirection in ssh command does not work: "ssh '> outfile'"
|
|
# we need to assemble: "ssh 'cat > outfile'"
|
|
unshift @rsh_cmd_pipe, { cmd_text => 'cat' };
|
|
}
|
|
|
|
my $rsh_text = _safe_cmd($href->{rsh}, \@unsafe_cmd);
|
|
return undef unless(defined($rsh_text));
|
|
|
|
$href->{cmd_text} = $rsh_text . ' ' . quoteshell(_piped_cmd_txt(\@rsh_cmd_pipe));
|
|
}
|
|
|
|
# local stream_buffer, rate_limit and show_progress in front of stream sink
|
|
my @stream_buffer_in = $href->{stream_sink} ? stream_buffer_cmd_text($stream_options->{local_sink}) : ();
|
|
|
|
push @cmd_pipe, (
|
|
@decompress_in, # empty if rsh
|
|
@stream_buffer_in,
|
|
@rsh_compress_in, # empty if not rsh
|
|
$href, # command or rsh_cmd_pipe
|
|
);
|
|
}
|
|
|
|
my $cmd = _piped_cmd_txt(\@cmd_pipe);
|
|
|
|
if(scalar(@unsafe_cmd)) {
|
|
ERROR "Unsafe command `$cmd`", map "Offending string: \"$_\"", @unsafe_cmd;
|
|
return undef;
|
|
}
|
|
|
|
if($dryrun && $destructive) {
|
|
DEBUG "### (dryrun) $cmd";
|
|
return [];
|
|
}
|
|
DEBUG "### $cmd";
|
|
|
|
|
|
# execute command
|
|
my ($pid, $out_fh, $err_fh, @stdout);
|
|
$err_fh = gensym;
|
|
if(eval_quiet { $pid = open3(undef, $out_fh, $err_fh, $cmd); }) {
|
|
chomp(@stdout = readline($out_fh));
|
|
chomp(@stderr = readline($err_fh));
|
|
waitpid($pid, 0);
|
|
if($do_trace) {
|
|
if($large_output) {
|
|
TRACE "Command output lines=" . scalar(@stdout) . " (large_output, not dumped)";
|
|
} else {
|
|
TRACE map("[stdout] $_", @stdout);
|
|
}
|
|
TRACE map("[stderr] $_", @stderr);
|
|
}
|
|
}
|
|
else {
|
|
ERROR "Command execution failed ($!): `$cmd`";
|
|
return undef;
|
|
}
|
|
|
|
# fatal errors
|
|
if($? == -1) {
|
|
ERROR "Command execution failed ($!): `$cmd`";
|
|
return undef;
|
|
}
|
|
elsif ($? & 127) {
|
|
my $signal = $? & 127;
|
|
ERROR "Command execution failed (child died with signal $signal): `$cmd`";
|
|
return undef;
|
|
}
|
|
my $exitcode = $? >> 8;
|
|
|
|
# call hooks: fatal_stderr, filter_stderr
|
|
if(($exitcode == 0) && $fatal_stderr) {
|
|
$exitcode = -1 if(grep &{$fatal_stderr}(), @stderr);
|
|
}
|
|
foreach my $filter_fn (@filter_stderr) {
|
|
@stderr = map { &{$filter_fn} ($exitcode); $_ // () } @stderr;
|
|
}
|
|
|
|
if($exitcode) {
|
|
unshift @stderr, "sh: $cmd";
|
|
if($has_rsh && ($exitcode == 255)) {
|
|
# SSH returns exit status 255 if an error occurred (including
|
|
# network errors, dns failures).
|
|
unshift @stderr, "(note: option \"ssh_identity\" is not set, using ssh defaults)" unless(grep /^-i$/, @$has_rsh);
|
|
unshift @stderr, "SSH command failed (exitcode=$exitcode)";
|
|
} else {
|
|
unshift @stderr, "Command execution failed (exitcode=$exitcode)";
|
|
}
|
|
DEBUG @stderr;
|
|
return undef;
|
|
}
|
|
else {
|
|
DEBUG "Command execution successful";
|
|
}
|
|
return \@stdout;
|
|
}
|
|
|
|
|
|
sub _btrfs_filter_stderr
|
|
{
|
|
if(/^usage: / || /(unrecognized|invalid) option/) {
|
|
WARN_ONCE "Using unsupported btrfs-progs < v$BTRFS_PROGS_MIN";
|
|
}
|
|
# strip error prefix (we print our own)
|
|
# note that this also affects ssh_filter_btrbk.sh error strings
|
|
s/^ERROR: //;
|
|
}
|
|
|
|
|
|
sub btrfs_filesystem_show($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem show", { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
}
|
|
|
|
|
|
sub btrfs_filesystem_df($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem df", { unsafe => $path }),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
}
|
|
|
|
|
|
sub btrfs_filesystem_usage($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $ret = run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem usage", { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to fetch btrfs filesystem usage for: $vol->{PRINT}", @stderr;
|
|
return undef;
|
|
}
|
|
|
|
return undef unless(defined($ret));
|
|
|
|
my %detail;
|
|
foreach(@$ret) {
|
|
$detail{device_size} = $1, next if(/^\s+Device size:\s+(\S+)/);
|
|
$detail{device_allocated} = $1, next if(/^\s+Device allocated:\s+(\S+)/);
|
|
$detail{device_unallocated} = $1, next if(/^\s+Device unallocated:\s+(\S+)/);
|
|
$detail{device_missing} = $1, next if(/^\s+Device missing:\s+(\S+)/);
|
|
$detail{device_used} = $1, next if(/^\s+Used:\s+(\S+)/);
|
|
@detail{qw(free free_min)} = ($1,$2), next if(/^\s+Free \(estimated\):\s+(\S+)\s+\(min: (\S+)\)/);
|
|
$detail{data_ratio} = $1, next if(/^\s+Data ratio:\s+([0-9]+\.[0-9]+)/);
|
|
$detail{metadata_ratio} = $1, next if(/^\s+Metadata ratio:\s+([0-9]+\.[0-9]+)/);
|
|
$detail{used} = $1, next if(/^\s+Used:\s+(\S+)/);
|
|
@detail{qw(global_reserve global_reserve_used)} = ($1,$2), next if(/^\s+Global reserve:\s+(\S+)\s+\(used: (\S+)\)/);
|
|
TRACE "Failed to parse filesystem usage line \"$_\" for: $vol->{PRINT}" if($do_trace);
|
|
}
|
|
DEBUG "Parsed " . scalar(keys %detail) . " filesystem usage detail items: $vol->{PRINT}";
|
|
|
|
foreach (qw(device_size device_used data_ratio)) {
|
|
unless(defined($detail{$_})) {
|
|
ERROR "Failed to parse filesystem usage detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
}
|
|
|
|
# calculate aggregate size / usage
|
|
if($detail{device_size} =~ /^([0-9]+\.[0-9]+)(.*)/) {
|
|
$detail{size} = sprintf('%.2f%s', $1 / $detail{data_ratio}, $2);
|
|
}
|
|
if($detail{device_used} =~ /^([0-9]+\.[0-9]+)(.*)/) {
|
|
$detail{used} = sprintf('%.2f%s', $1 / $detail{data_ratio}, $2);
|
|
}
|
|
|
|
TRACE(Data::Dumper->Dump([\%detail], ["btrfs_filesystem_usage($vol->{URL})"])) if($do_trace && $do_dumper);
|
|
return \%detail;
|
|
}
|
|
|
|
|
|
# returns hashref with keys: (uuid parent_uuid id gen cgen top_level)
|
|
# for btrfs root, returns at least: (id is_root)
|
|
# for btrfs-progs >= 4.1, also returns key: "received_uuid"
|
|
# if present, also returns (unvalidated) keys: (name creation_time flags)
|
|
sub btrfs_subvolume_show($;@)
|
|
{
|
|
my $vol = shift || die;
|
|
my %opts = @_;
|
|
my @cmd_options;
|
|
push(@cmd_options, '--rootid=' . $opts{rootid}) if($opts{rootid}); # btrfs-progs >= 4.12
|
|
my $path = $vol->{PATH} // die;
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume show", @cmd_options, { unsafe => $path }),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
|
|
return undef unless(defined($ret));
|
|
|
|
unless(scalar(@$ret)) {
|
|
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
|
|
# NOTE: the first line starts with a path:
|
|
# - btrfs-progs < 4.12 prints the full (absolute, resolved) path
|
|
# - btrfs-progs >= 4.12 prints the relative path to btrfs root (or "/" if it is the root)
|
|
|
|
my %detail;
|
|
if($ret->[0] =~ / is (btrfs root|toplevel subvolume)$/) {
|
|
# btrfs-progs < 4.4 prints: "<subvol> is btrfs root"
|
|
# btrfs-progs >= 4.4 prints: "<subvol> is toplevel subvolume"
|
|
# btrfs-progs >= 4.8.3 does not enter here, as output shares format with regular subvolumes
|
|
$detail{id} = 5;
|
|
}
|
|
else {
|
|
my %trans = (
|
|
"Name" => "name",
|
|
"uuid" => "uuid",
|
|
"UUID" => "uuid", # btrfs-progs >= 4.1
|
|
"Parent uuid" => "parent_uuid",
|
|
"Parent UUID" => "parent_uuid", # btrfs-progs >= 4.1
|
|
"Received UUID" => "received_uuid", # btrfs-progs >= 4.1
|
|
"Creation time" => "creation_time",
|
|
"Object ID" => "id",
|
|
"Subvolume ID" => "id", # btrfs-progs >= 4.1
|
|
"Generation (Gen)" => "gen",
|
|
"Generation" => "gen", # btrfs-progs >= 4.1
|
|
"Gen at creation" => "cgen",
|
|
"Parent" => "parent_id",
|
|
"Parent ID" => "parent_id", # btrfs-progs >= 4.1
|
|
"Top Level" => "top_level",
|
|
"Top level ID" => "top_level", # btrfs-progs >= 4.1
|
|
"Flags" => "flags",
|
|
"Send transid" => "send_transid", # btrfs-progs >= 5.14.2
|
|
"Send time" => "send_time", # btrfs-progs >= 5.14.2
|
|
"Receive transid" => "receive_transid", # btrfs-progs >= 5.14.2
|
|
"Receive time" => "receive_time", # btrfs-progs >= 5.14.2
|
|
);
|
|
foreach(@$ret) {
|
|
next unless /^\s+(.+):\s+(.*)$/;
|
|
my ($key, $value) = ($1, $2);
|
|
if($trans{$key}) {
|
|
$detail{$trans{$key}} = $value;
|
|
} else {
|
|
DEBUG "Ignoring subvolume detail \"$key: $value\" for: $vol->{PRINT}";
|
|
}
|
|
}
|
|
DEBUG "Parsed " . scalar(keys %detail) . " subvolume detail items: $vol->{PRINT}";
|
|
|
|
# NOTE: as of btrfs-progs v4.6.1, flags are either "-" or "readonly"
|
|
$detail{readonly} = ($detail{flags} =~ /readonly/) ? 1 : 0 if($detail{flags});
|
|
|
|
# validate required keys
|
|
unless((defined($detail{parent_uuid}) && (($detail{parent_uuid} eq '-') || ($detail{parent_uuid} =~ /^$uuid_match$/))) &&
|
|
(defined($detail{id}) && ($detail{id} =~ /^\d+$/) && ($detail{id} >= 5)) &&
|
|
(defined($detail{gen}) && ($detail{gen} =~ /^\d+$/)) &&
|
|
(defined($detail{cgen}) && ($detail{cgen} =~ /^\d+$/)) &&
|
|
(defined($detail{top_level}) && ($detail{top_level} =~ /^\d+$/)) &&
|
|
(defined($detail{readonly})))
|
|
{
|
|
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
|
|
# NOTE: filesystems created with btrfs-progs < 4.16 have no UUID for subvolid=5,
|
|
# assert {uuid} is either valid or undef
|
|
if(defined($detail{uuid}) && ($detail{uuid} !~ /^$uuid_match$/)) {
|
|
if($detail{id} == 5) {
|
|
DEBUG "No UUID on btrfs root (id=5): $vol->{PRINT}";
|
|
} else {
|
|
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
delete $detail{uuid};
|
|
}
|
|
|
|
# NOTE: received_uuid is not required here, as btrfs-progs < 4.1 does not give us that information.
|
|
# no worries, we get this from btrfs_subvolume_list() for all subvols.
|
|
if(defined($detail{received_uuid}) && ($detail{received_uuid} ne '-') && ($detail{received_uuid} !~ /^$uuid_match$/)) {
|
|
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
|
|
VINFO(\%detail, "detail") if($loglevel >=4);
|
|
}
|
|
|
|
if($opts{rootid} && ($detail{id} != $opts{rootid})) {
|
|
ERROR "Failed to parse subvolume detail (rootid mismatch) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
|
|
if($detail{id} == 5) {
|
|
DEBUG "Found btrfs root: $vol->{PRINT}";
|
|
$detail{is_root} = 1;
|
|
}
|
|
|
|
return \%detail;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_list_readonly_flag($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", '-a', '-r', { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my %ro;
|
|
foreach(@$ret) {
|
|
unless(/^ID\s+([0-9]+)\s+gen\s+[0-9]+\s+top level\s+[0-9]+\s+path\s/) {
|
|
ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
DEBUG "Offending line: $_";
|
|
return undef;
|
|
}
|
|
$ro{$1} = 1;
|
|
}
|
|
DEBUG "Parsed " . scalar(keys %ro) . " readonly subvolumes for filesystem at: $vol->{PRINT}";
|
|
return \%ro;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_list($;@)
|
|
{
|
|
my $vol = shift || die;
|
|
my %opts = @_;
|
|
my $path = $vol->{PATH} // die;
|
|
my @filter_options = ('-a');
|
|
push(@filter_options, '-o') if($opts{subvol_only});
|
|
push(@filter_options, '-d') if($opts{deleted_only});
|
|
|
|
# NOTE: btrfs-progs <= 3.17 do NOT support the '-R' flag.
|
|
# NOTE: Support for btrfs-progs <= 3.17 has been dropped in
|
|
# btrbk-0.23, the received_uuid flag very essential!
|
|
my @display_options = ('-c', '-u', '-q', '-R');
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", @filter_options, @display_options, { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my @nodes;
|
|
foreach(@$ret)
|
|
{
|
|
my %node;
|
|
# NOTE: btrfs-progs >= 4.13.2 pads uuid's with 36 whitespaces
|
|
unless(/^ID \s+ ([0-9]+) \s+
|
|
gen \s+ ([0-9]+) \s+
|
|
cgen \s+ ([0-9]+) \s+
|
|
top\ level \s+ ([0-9]+) \s+
|
|
parent_uuid \s+ ([0-9a-f-]+) \s+
|
|
received_uuid \s+ ([0-9a-f-]+) \s+
|
|
uuid \s+ ([0-9a-f-]+) \s+
|
|
path \s+ (.+) $/x) {
|
|
ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
DEBUG "Offending line: $_";
|
|
return undef;
|
|
}
|
|
%node = (
|
|
id => $1,
|
|
gen => $2,
|
|
cgen => $3,
|
|
top_level => $4,
|
|
parent_uuid => $5, # note: parent_uuid="-" if no parent
|
|
received_uuid => $6,
|
|
uuid => $7,
|
|
path => $8 # btrfs path, NOT filesystem path
|
|
);
|
|
|
|
# NOTE: "btrfs subvolume list <path>" prints <FS_TREE> prefix only if
|
|
# the subvolume is reachable within <path>. (as of btrfs-progs-3.18.2)
|
|
#
|
|
# NOTE: Be prepared for this to change in btrfs-progs!
|
|
$node{path} =~ s/^<FS_TREE>\///; # remove "<FS_TREE>/" portion from "path".
|
|
|
|
push @nodes, \%node;
|
|
}
|
|
DEBUG "Parsed " . scalar(@nodes) . " total subvolumes for filesystem at: $vol->{PRINT}";
|
|
|
|
return \@nodes;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_list_complete($)
|
|
{
|
|
my $vol = shift || die;
|
|
|
|
# fetch subvolume list
|
|
my $nodes = btrfs_subvolume_list($vol);
|
|
return undef unless($nodes);
|
|
|
|
# fetch readonly flag
|
|
# NOTE: the only way to get "readonly" flag is via a second call to "btrfs subvol list" with the "-r" option (as of btrfs-progs v4.3.1)
|
|
my $ro = btrfs_subvolume_list_readonly_flag($vol);
|
|
return undef unless(defined($ro));
|
|
foreach (@$nodes) {
|
|
$_->{readonly} = $ro->{$_->{id}} // 0;
|
|
}
|
|
|
|
# btrfs root (id=5) is not provided by btrfs_subvolume_list above, read it separately (best-efford)
|
|
my $tree_root = btrfs_subvolume_show($vol, rootid => 5);
|
|
unless($tree_root) {
|
|
# this is not an error:
|
|
# - btrfs-progs < 4.12 does not support rootid lookup
|
|
# - UUID can be missing if filesystem was created with btrfs-progs < 4.16
|
|
DEBUG "Failed to fetch subvolume detail (old btrfs-progs?) for btrfs root (id=5) on: $vol->{PRINT}";
|
|
$tree_root = { id => 5, is_root => 1 };
|
|
}
|
|
unshift(@$nodes, $tree_root);
|
|
|
|
return $nodes;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_find_new($$;$)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $lastgen = shift // die;
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume find-new", { unsafe => $path }, $lastgen ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
large_output => 1,
|
|
);
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to fetch modified files for: $vol->{PRINT}", @stderr;
|
|
return undef;
|
|
}
|
|
|
|
my %files;
|
|
my $parse_errors = 0;
|
|
my $transid_marker;
|
|
foreach(@$ret)
|
|
{
|
|
if(/^inode \S+ file offset (\S+) len (\S+) disk start \S+ offset \S+ gen (\S+) flags (\S+) (.+)$/) {
|
|
my $file_offset = $1;
|
|
my $len = $2;
|
|
my $gen = $3;
|
|
my $flags = $4;
|
|
my $name = $5;
|
|
$files{$name}->{len} += $len;
|
|
$files{$name}->{new} = 1 if($file_offset == 0);
|
|
$files{$name}->{gen}->{$gen} = 1; # count the generations
|
|
if($flags ne "NONE") {
|
|
$files{$name}->{flags}{$_} = 1 foreach split(/\|/, $flags);
|
|
}
|
|
}
|
|
elsif(/^transid marker was (\S+)$/) {
|
|
$transid_marker = $1;
|
|
}
|
|
else {
|
|
ERROR "Failed to parse output from `btrfs subvolume find-new`:", $_;
|
|
$parse_errors++;
|
|
}
|
|
}
|
|
ERROR "Failed to parse $parse_errors lines from `btrfs subvolume find-new`" if($parse_errors);
|
|
|
|
return { files => \%files,
|
|
transid_marker => $transid_marker,
|
|
parse_errors => $parse_errors,
|
|
};
|
|
}
|
|
|
|
|
|
# returns $target, or undef on error
|
|
sub btrfs_subvolume_snapshot($$)
|
|
{
|
|
my $svol = shift || die;
|
|
my $target_vol = shift // die;
|
|
my $target_path = $target_vol->{PATH} // die;
|
|
my $src_path = $svol->{PATH} // die;
|
|
INFO "[snapshot] source: $svol->{PRINT}";
|
|
INFO "[snapshot] target: $target_vol->{PRINT}";
|
|
start_transaction("snapshot",
|
|
vinfo_prefixed_keys("target", $target_vol),
|
|
vinfo_prefixed_keys("source", $svol),
|
|
);
|
|
my $ret = run_cmd(cmd => vinfo_cmd($svol, "btrfs subvolume snapshot", '-r', { unsafe => $src_path }, { unsafe => $target_path } ),
|
|
rsh => vinfo_rsh($svol),
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
end_transaction("snapshot", defined($ret));
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to create snapshot: $svol->{PRINT} -> $target_path", @stderr;
|
|
return undef;
|
|
}
|
|
return $target_vol;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_delete($@)
|
|
{
|
|
my $vol = shift // die;
|
|
my %opts = @_;
|
|
my $target_type = $vol->{node}{TARGET_TYPE} || "";
|
|
my $ret;
|
|
INFO "[delete] target: $vol->{PRINT}";
|
|
start_transaction($opts{type} // "delete", vinfo_prefixed_keys("target", $vol));
|
|
if($target_type eq "raw") {
|
|
$ret = run_cmd(cmd => [ 'rm', '-f',
|
|
{ unsafe => $vol->{PATH}, postfix => ($vol->{node}{BTRBK_RAW}{split} && ".split_??") },
|
|
{ unsafe => $vol->{PATH}, postfix => ".info" },
|
|
],
|
|
rsh => vinfo_rsh($vol),
|
|
);
|
|
}
|
|
else {
|
|
my @options;
|
|
push @options, "--commit-each" if($opts{commit});
|
|
$ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume delete", @options, { unsafe => $vol->{PATH} } ),
|
|
rsh => vinfo_rsh($vol),
|
|
fatal_stderr => sub { m/^ERROR: /; }, # probably not needed, "btrfs sub delete" returns correct exit status
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
}
|
|
end_transaction($opts{type} // "delete", defined($ret));
|
|
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to delete subvolume: $vol->{PRINT}", @stderr;
|
|
return undef;
|
|
}
|
|
|
|
return $vol;
|
|
}
|
|
|
|
|
|
sub btrfs_qgroup_destroy($@)
|
|
{
|
|
my $vol = shift // die;
|
|
my %opts = @_;
|
|
my $vol_id = $vol->{node}{id};
|
|
unless($vol_id) {
|
|
ERROR "Unknown subvolume_id for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
my $path = $vol->{PATH} // die;
|
|
my $qgroup_id = "0/$vol_id";
|
|
INFO "[qgroup-destroy] qgroup_id: $qgroup_id";
|
|
INFO "[qgroup-destroy] subvolume: $vol->{PRINT}";
|
|
start_transaction($opts{type} // "qgroup_destroy",
|
|
vinfo_prefixed_keys("target", $vol));
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs qgroup destroy", $qgroup_id, { unsafe => $path }),
|
|
rsh => vinfo_rsh($vol),
|
|
filter_stderr => \&_btrfs_filter_stderr,
|
|
);
|
|
end_transaction($opts{type} // "qgroup_destroy", defined($ret));
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to destroy qgroup \"$qgroup_id\" for subvolume: $vol->{PRINT}", @stderr;
|
|
return undef;
|
|
}
|
|
return $vol;
|
|
}
|
|
|
|
sub _btrfs_send_options($$;$$)
|
|
{
|
|
my $snapshot = shift;
|
|
my $target = shift;
|
|
my $parent = shift;
|
|
my $clone_src = shift // [];
|
|
my $send_protocol = config_key($target, "send_protocol");
|
|
my $send_compressed_data = config_key($target, "send_compressed_data");
|
|
my @send_options;
|
|
push(@send_options, '-p', { unsafe => $parent->{PATH} } ) if($parent);
|
|
push(@send_options, '-c', { unsafe => $_ } ) foreach(map { $_->{PATH} } @$clone_src);
|
|
push(@send_options, '--proto', $send_protocol ) if($send_protocol);
|
|
push(@send_options, '--compressed-data' ) if($send_compressed_data);
|
|
#push(@send_options, '-v') if($loglevel >= 3);
|
|
return \@send_options;
|
|
}
|
|
|
|
sub btrfs_send_receive($$;$$$)
|
|
{
|
|
my $snapshot = shift || die;
|
|
my $target = shift || die;
|
|
my $parent = shift;
|
|
my $clone_src = shift // [];
|
|
my $ret_vol_received = shift;
|
|
|
|
my $vol_received = vinfo_child($target, $snapshot->{NAME});
|
|
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
|
|
|
|
print STDOUT "Creating backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
|
|
|
|
INFO "[send/receive] target: $vol_received->{PRINT}";
|
|
INFO "[send/receive] source: $snapshot->{PRINT}";
|
|
INFO "[send/receive] parent: $parent->{PRINT}" if($parent);
|
|
INFO "[send/receive] clone-src: $_->{PRINT}" foreach(@$clone_src);
|
|
|
|
my $stream_options = config_stream_hash($snapshot, $target);
|
|
my $compat_ignore_err = config_key_lru($target, "compat", "ignore_receive_errors");
|
|
|
|
my $send_options = _btrfs_send_options($snapshot, $target, $parent, $clone_src);
|
|
my @receive_options;
|
|
push(@receive_options, '--max-errors=0') if($compat_ignore_err);
|
|
|
|
my @cmd_pipe;
|
|
push @cmd_pipe, {
|
|
cmd => vinfo_cmd($snapshot, "btrfs send", @$send_options, { unsafe => $snapshot->{PATH} }),
|
|
rsh => vinfo_rsh($snapshot, disable_compression => $stream_options->{stream_compress}),
|
|
stream_options => $stream_options,
|
|
filter_stderr => [ \&_btrfs_filter_stderr, sub { $_ = undef if(/^At subvol/) } ],
|
|
};
|
|
|
|
push @cmd_pipe, {
|
|
cmd => vinfo_cmd($target, "btrfs receive", @receive_options, { unsafe => $target->{PATH} . '/' } ),
|
|
rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
|
|
fatal_stderr => sub {
|
|
# NOTE: btrfs-progs < 4.11: if "btrfs send" fails, "btrfs receive" returns 0!
|
|
if($compat_ignore_err && s/^ERROR: (.*)//) {
|
|
WARN "Ignoring btrfs receive error (compat=ignore_receive_errors): $1";
|
|
}
|
|
m/^ERROR: /;
|
|
},
|
|
};
|
|
|
|
my $send_receive_error = 0;
|
|
start_transaction("send-receive",
|
|
vinfo_prefixed_keys("target", $vol_received),
|
|
vinfo_prefixed_keys("source", $snapshot),
|
|
vinfo_prefixed_keys("parent", $parent),
|
|
);
|
|
my $ret = run_cmd(@cmd_pipe);
|
|
my @cmd_err;
|
|
unless(defined($ret)) {
|
|
@cmd_err = @stderr; # save for later
|
|
$send_receive_error = 1;
|
|
}
|
|
|
|
# Read in target subvolume metadata (btrfs subvolume show):
|
|
# Double checking the output increases robustness against exotic
|
|
# revisions of external commands (btrfs-progs, pv, xz, lz4, ...).
|
|
#
|
|
# NOTE: we cannot rely on the underlying shell to have
|
|
# "pipefail" functionality.
|
|
#
|
|
# NOTE: btrfs-progs < 4.11:
|
|
# "cat /dev/null | btrfs receive" returns with exitcode=0 and no
|
|
# error message, having the effect that silently no subvolume is
|
|
# created if any command in @cmd_pipe fail.
|
|
my $is_garbled;
|
|
if($dryrun) {
|
|
INFO "[send/receive] (dryrun, skip) checking target metadata: $vol_received->{PRINT}";
|
|
}
|
|
else {
|
|
INFO "[send/receive] checking target metadata: $vol_received->{PRINT}";
|
|
my $detail = btrfs_subvolume_show($vol_received);
|
|
if(defined($detail)) {
|
|
unless($send_receive_error) {
|
|
# plausibility checks on target detail
|
|
unless($detail->{readonly}) {
|
|
push @cmd_err, "target is not readonly: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
if($detail->{received_uuid} && ($detail->{received_uuid} eq '-')) {
|
|
# NOTE: received_uuid is not in @required_keys (needs btrfs-progs >= 4.1 (BTRFS_PROGS_MIN))
|
|
# so we only check it if it's really present
|
|
push @cmd_err, "received_uuid is not set on target: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
if($parent && ($detail->{parent_uuid} eq '-')) {
|
|
push @cmd_err, "parent_uuid is not set on target: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
if((not $parent) && ($detail->{parent_uuid} ne '-')) {
|
|
push @cmd_err, "parent_uuid is set on target: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
}
|
|
|
|
# incomplete received (garbled) subvolumes are not readonly and have no received_uuid
|
|
$is_garbled = ((not $detail->{readonly}) && defined($detail->{received_uuid}) && ($detail->{received_uuid} eq '-'));
|
|
}
|
|
else {
|
|
push @cmd_err, "failed to check target subvolume: $vol_received->{PRINT}", @stderr;
|
|
$send_receive_error = 1;
|
|
}
|
|
}
|
|
|
|
end_transaction("send-receive", not $send_receive_error);
|
|
|
|
if($send_receive_error) {
|
|
ERROR "Failed to send/receive subvolume: $snapshot->{PRINT}" . ($parent ? " [$parent->{PATH}]" : "") . " -> $vol_received->{PRINT}", @cmd_err;
|
|
}
|
|
|
|
if($is_garbled) {
|
|
# NOTE: btrfs-progs does not delete incomplete received (garbled) subvolumes,
|
|
# we need to do this by hand.
|
|
# TODO: remove this as soon as btrfs-progs handle receive errors correctly.
|
|
if(btrfs_subvolume_delete($vol_received, commit => "after", type => "delete_garbled")) {
|
|
WARN "Deleted partially received (garbled) subvolume: $vol_received->{PRINT}";
|
|
} else {
|
|
WARN "Deletion of partially received (garbled) subvolume failed, assuming clean environment: $vol_received->{PRINT}";
|
|
}
|
|
}
|
|
return $send_receive_error ? undef : 1;
|
|
}
|
|
|
|
|
|
sub btrfs_send_to_file($$$;$$)
|
|
{
|
|
my $source = shift || die;
|
|
my $target = shift || die;
|
|
my $parent = shift;
|
|
my $ret_vol_received = shift;
|
|
my $ret_raw_info = shift;
|
|
my $target_path = $target->{PATH} // die;
|
|
my $parent_uuid = $parent ? $parent->{node}{uuid} : undef ;
|
|
my $received_uuid = $source->{node}{uuid};
|
|
die unless($received_uuid);
|
|
die if($parent && !$parent_uuid);
|
|
|
|
# prepare raw_info (for vinfo_inject_child)
|
|
my %raw_info = (
|
|
TYPE => 'raw',
|
|
RECEIVED_UUID => $received_uuid,
|
|
INCOMPLETE => 1,
|
|
);
|
|
|
|
my $target_filename = $source->{NAME} || die;
|
|
$target_filename .= ".btrfs";
|
|
|
|
my $compress = config_compress_hash($target, "raw_target_compress");
|
|
my $encrypt = config_encrypt_hash($target, "raw_target_encrypt");
|
|
my $split = config_key($target, "raw_target_split");
|
|
my $stream_options = config_stream_hash($source, $target);
|
|
|
|
# make sure we dont re-compress, override "stream_compress" with "raw_target_compress"
|
|
$stream_options->{stream_compress} = $compress if($compress);
|
|
|
|
my $send_options = _btrfs_send_options($source, $target, $parent);
|
|
my @cmd_pipe;
|
|
push @cmd_pipe, {
|
|
cmd => vinfo_cmd($source, "btrfs send", @$send_options, { unsafe => $source->{PATH} } ),
|
|
rsh => vinfo_rsh($source, disable_compression => $stream_options->{stream_compress}),
|
|
stream_options => $stream_options,
|
|
filter_stderr => [ \&_btrfs_filter_stderr, sub { $_ = undef if(/^At subvol/) } ],
|
|
fatal_stderr => sub { m/^ERROR: /; },
|
|
};
|
|
|
|
if($compress) {
|
|
$raw_info{compress} = $compression{$compress->{key}}->{format};
|
|
$target_filename .= '.' . $compression{$compress->{key}}->{format};
|
|
push @cmd_pipe, { compress_stdin => $compress }; # does nothing if already compressed by stream_compress
|
|
}
|
|
if($encrypt) {
|
|
$target_filename .= ($encrypt->{type} eq "gpg") ? '.gpg' : '.encrypted';
|
|
}
|
|
|
|
# NOTE: $ret_vol_received must always be set when function returns!
|
|
my $vol_received = vinfo_child($target, $target_filename);
|
|
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
|
|
|
|
if($encrypt) {
|
|
$raw_info{encrypt} = $encrypt->{type};
|
|
|
|
if($encrypt->{type} eq "gpg") {
|
|
# NOTE: We set "--no-random-seed-file" since one of the btrbk
|
|
# design principles is to never create any files unasked. Enabling
|
|
# "--no-random-seed-file" creates ~/.gnupg/random_seed, and as
|
|
# such depends on $HOME to be set correctly (which e.g. is set to
|
|
# "/" by some cron daemons). From gpg2(1) man page:
|
|
# --no-random-seed-file GnuPG uses a file to store its
|
|
# internal random pool over invocations This makes random
|
|
# generation faster; however sometimes write operations are not
|
|
# desired. This option can be used to achieve that with the cost
|
|
# of slower random generation.
|
|
my @gpg_options = ( '--batch', '--no-tty', '--no-random-seed-file', '--trust-model', 'always' );
|
|
push @gpg_options, ( '--compress-algo', 'none' ) if($compress); # NOTE: if --compress-algo is not set, gpg might still compress according to OpenPGP standard.
|
|
push(@gpg_options, ( '--no-default-keyring', '--keyring', { unsafe => $encrypt->{keyring} } )) if($encrypt->{keyring});
|
|
if($encrypt->{recipient}) {
|
|
push(@gpg_options, '--no-default-recipient');
|
|
push(@gpg_options, map +( '--recipient', $_ ), @{$encrypt->{recipient}});
|
|
}
|
|
push @cmd_pipe, {
|
|
cmd => [ 'gpg', @gpg_options, '--encrypt' ],
|
|
compressed_ok => ($compress ? 1 : 0),
|
|
};
|
|
}
|
|
elsif($encrypt->{type} eq "openssl_enc") {
|
|
# encrypt using "openssl enc"
|
|
$raw_info{cipher} = $encrypt->{ciphername};
|
|
|
|
# NOTE: iv is always generated locally!
|
|
my $iv_size = $encrypt->{iv_size};
|
|
my $iv;
|
|
if($iv_size) {
|
|
INFO "Generating iv for openssl encryption (cipher=$encrypt->{ciphername})";
|
|
$iv = system_urandom($iv_size, 'hex');
|
|
unless($iv) {
|
|
ERROR "Failed generate IV for openssl_enc: $source->{PRINT}";
|
|
return undef;
|
|
}
|
|
$raw_info{iv} = $iv;
|
|
}
|
|
|
|
my $encrypt_key;
|
|
if($encrypt->{keyfile}) {
|
|
if($encrypt->{kdf_backend}) {
|
|
WARN "Both openssl_keyfile and kdf_backend are configured, ignoring kdf_backend!";
|
|
}
|
|
$encrypt_key = '$(cat ' . quoteshell($encrypt->{keyfile}) . ')';
|
|
}
|
|
elsif($encrypt->{kdf_backend}) {
|
|
if($encrypt->{kdf_keygen_each}) {
|
|
$kdf_session_key = undef;
|
|
%kdf_vars = ();
|
|
}
|
|
if($kdf_session_key) {
|
|
INFO "Reusing session key for: $vol_received->{PRINT}";
|
|
}
|
|
else {
|
|
# run kdf backend, set session key and vars
|
|
DEBUG "Generating session key for: $vol_received->{PRINT}";
|
|
my $key_target_text = $encrypt->{kdf_keygen_each} ? "\"$vol_received->{PRINT}\"" : "all raw backups";
|
|
|
|
print STDOUT "\nGenerate session key for $key_target_text:\n";
|
|
my $kdf_values = run_cmd(cmd => [ { unsafe => $encrypt->{kdf_backend} }, $encrypt->{kdf_keysize} ],
|
|
non_destructive => 1,
|
|
);
|
|
unless(defined($kdf_values)) {
|
|
ERROR "Failed to generate session key for $key_target_text", @stderr;
|
|
return undef;
|
|
}
|
|
|
|
return undef unless(defined($kdf_values));
|
|
foreach(@$kdf_values) {
|
|
chomp;
|
|
next if /^\s*$/; # ignore empty lines
|
|
if(/^KEY=([0-9a-fA-f]+)/) {
|
|
$kdf_session_key = $1;
|
|
}
|
|
elsif(/^([a-z_]+)=($raw_info_value_match)/) {
|
|
my $info_key = 'kdf_' . $1;
|
|
my $info_val = $2;
|
|
DEBUG "Adding raw_info from kdf_backend: $info_key=$info_val";
|
|
$kdf_vars{$info_key} = $info_val;
|
|
}
|
|
else {
|
|
ERROR "Ambiguous line from kdf_backend: $encrypt->{kdf_backend}";
|
|
return undef;
|
|
}
|
|
}
|
|
unless($kdf_session_key && (length($kdf_session_key) == ($encrypt->{kdf_keysize} * 2))) {
|
|
ERROR "Ambiguous key value from kdf_backend: $encrypt->{kdf_backend}";
|
|
return undef;
|
|
}
|
|
INFO "Generated session key for: $vol_received->{PRINT}";
|
|
}
|
|
$encrypt_key = $kdf_session_key;
|
|
%raw_info = ( %kdf_vars, %raw_info );
|
|
}
|
|
|
|
my @openssl_options = (
|
|
'-' . $encrypt->{ciphername},
|
|
'-K', $encrypt_key,
|
|
);
|
|
push @openssl_options, ('-iv', $iv) if($iv);
|
|
|
|
push @cmd_pipe, {
|
|
cmd => [ 'openssl', 'enc', '-e', @openssl_options ],
|
|
compressed_ok => ($compress ? 1 : 0),
|
|
};
|
|
}
|
|
else {
|
|
die "Usupported encryption type (raw_target_encrypt)";
|
|
}
|
|
}
|
|
|
|
if($split) {
|
|
# NOTE: we do not append a ".split" suffix on $target_filename here, as this propagates to ".info" file
|
|
$raw_info{split} = $split;
|
|
push @cmd_pipe, {
|
|
cmd => [ 'split', '-b', uc($split), '-', { unsafe => "${target_path}/${target_filename}.split_" } ],
|
|
rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
|
|
compressed_ok => ($compress ? 1 : 0),
|
|
}
|
|
}
|
|
else {
|
|
push @cmd_pipe, {
|
|
# NOTE: We use "dd" instead of shell redirections here, as it is
|
|
# common to have special filesystems (like NFS, SMB, FUSE) mounted
|
|
# on $target_path. By using "dd" we make sure to write in
|
|
# reasonably large blocks (default=128K), which is not always the
|
|
# case when using redirections (e.g. "gpg > outfile" writes in 8K
|
|
# blocks).
|
|
# Another approach would be to always pipe through "cat", which
|
|
# uses st_blksize from fstat(2) (with a minimum of 128K) to
|
|
# determine the block size.
|
|
cmd => [ 'dd', 'status=none', 'bs=' . config_key($target, "raw_target_block_size"), { prefix => "of=", unsafe => "${target_path}/${target_filename}" } ],
|
|
#redirect_to_file => { unsafe => "${target_path}/${target_filename}" }, # alternative (use shell redirection), less overhead on local filesystems (barely measurable):
|
|
rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
|
|
compressed_ok => ($compress ? 1 : 0),
|
|
};
|
|
}
|
|
|
|
$raw_info{FILE} = $target_filename;
|
|
$raw_info{RECEIVED_PARENT_UUID} = $parent_uuid if($parent_uuid);
|
|
# disabled for now, as its not very useful and might leak information:
|
|
# $raw_info{parent_url} = $parent->{URL} if($parent);
|
|
# $raw_info{target_url} = $vol_received->{URL};
|
|
$$ret_raw_info = \%raw_info if($ret_raw_info);
|
|
|
|
print STDOUT "Creating raw backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
|
|
|
|
INFO "[send-to-raw] target: $vol_received->{PRINT}";
|
|
INFO "[send-to-raw] source: $source->{PRINT}";
|
|
INFO "[send-to-raw] parent: $parent->{PRINT}" if($parent);
|
|
|
|
start_transaction("send-to-raw",
|
|
vinfo_prefixed_keys("target", $vol_received),
|
|
vinfo_prefixed_keys("source", $source),
|
|
vinfo_prefixed_keys("parent", $parent),
|
|
);
|
|
my $ret;
|
|
$ret = system_write_raw_info($vol_received, \%raw_info);
|
|
|
|
my @cmd_err;
|
|
if(defined($ret)) {
|
|
$ret = run_cmd(@cmd_pipe);
|
|
@cmd_err = @stderr unless(defined($ret)); # save for later
|
|
}
|
|
else {
|
|
push @cmd_err, "failed to write raw .info file: $vol_received->{PATH}.info", @stderr;
|
|
}
|
|
|
|
if(defined($ret)) {
|
|
# Test target file for "exists and size > 0" after writing, as we
|
|
# can not rely on the exit status of the command pipe, and a shell
|
|
# redirection as well as "dd" always creates the target file.
|
|
# Note that "split" does not create empty files.
|
|
my $test_postfix = ($split ? ".split_aa" : "");
|
|
my $check_file = "${target_path}/${target_filename}${test_postfix}";
|
|
DEBUG "Testing target data file (non-zero size): $check_file";
|
|
$ret = run_cmd(cmd => [ 'test', '-s', { unsafe => $check_file } ],
|
|
rsh => vinfo_rsh($target),
|
|
);
|
|
if(defined($ret)) {
|
|
delete $raw_info{INCOMPLETE};
|
|
$ret = system_write_raw_info($vol_received, { INCOMPLETE => 0 }, append => 1);
|
|
} else {
|
|
push @cmd_err, "failed to check target file (not present or zero length): $check_file";
|
|
}
|
|
}
|
|
end_transaction("send-to-raw", defined($ret));
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to send btrfs subvolume to raw file: $source->{PRINT}" . ($parent ? " [$parent->{PATH}]" : "") . " -> $vol_received->{PRINT}", @cmd_err;
|
|
return undef;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub system_list_mountinfo($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $file = '/proc/self/mountinfo'; # NOTE: /proc/self/mounts is deprecated
|
|
my $ret = run_cmd(cmd => [ 'cat', $file ],
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
return undef unless(defined($ret));
|
|
unless(@$ret) {
|
|
ERROR "Failed to parse \"$vol->{URL_PREFIX}$file\": no output";
|
|
return undef;
|
|
}
|
|
|
|
my @mountinfo;
|
|
foreach(@$ret)
|
|
{
|
|
# https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
|
unless(/^(?<mount_id>[0-9]+) # mount ID: unique identifier of the mount (may be reused after umount)
|
|
\s(?<parent_id>[0-9]+) # parent ID: ID of parent (or of self for the top of the mount tree)
|
|
\s(?<st_dev>[0-9]+:[0-9]+) # major:minor: value of st_dev for files on filesystem
|
|
\s(?<fs_root>\S+) # root: root of the mount within the filesystem
|
|
\s(?<mount_point>\S+) # mount point: mount point relative to the process's root
|
|
\s(?<mount_options>\S+) # mount options: per mount options
|
|
(\s\S+)* # optional fields: zero or more fields of the form "tag[:value]"
|
|
\s- # separator: marks the end of the optional fields
|
|
\s(?<fs_type>\S+) # filesystem type: name of filesystem of the form "type[.subtype]"
|
|
\s(?<mount_source>\S+) # mount source: filesystem specific information or "none"
|
|
\s(?<super_options>\S+)$ # super options: per super block options
|
|
/x)
|
|
{
|
|
ERROR "Failed to parse \"$vol->{URL_PREFIX}$file\"";
|
|
DEBUG "Offending line: $_";
|
|
return undef;
|
|
}
|
|
my %line = %+;
|
|
|
|
unless(defined(check_file($line{mount_point}, { absolute => 1 }))) {
|
|
ERROR "Ambiguous mount point in \"$vol->{URL_PREFIX}$file\": $line{mount_point}";
|
|
return undef;
|
|
}
|
|
|
|
# merge super_options and mount_options to MNTOPS.
|
|
my %mntops;
|
|
foreach (split(',', delete($line{super_options})),
|
|
split(',', delete($line{mount_options})))
|
|
{
|
|
if(/^(.+?)=(.+)$/) {
|
|
$mntops{$1} = $2;
|
|
} else {
|
|
$mntops{$_} = 1;
|
|
}
|
|
}
|
|
$mntops{rw} = 0 if($mntops{ro}); # e.g. mount_options="ro", super_options="rw"
|
|
|
|
# decode values (octal, e.g. "\040" = whitespace)
|
|
s/\\([0-7]{3})/chr(oct($1))/eg foreach(values %line, values %mntops);
|
|
|
|
$line{MNTOPS} = \%mntops;
|
|
push @mountinfo, \%line;
|
|
}
|
|
# TRACE(Data::Dumper->Dump([\@mountinfo], ["mountinfo"])) if($do_trace && $do_dumper);
|
|
return \@mountinfo;
|
|
}
|
|
|
|
|
|
sub system_testdir($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "test", '-d', { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
return undef unless(defined($ret));
|
|
DEBUG "Directory exists: $vol->{PRINT}";
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub system_realpath($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $compat = config_key_lru($vol, "compat", "busybox");
|
|
my @options = ("-v"); # report error messages
|
|
push @options, "-e" unless($compat); # all components must exist (not available in busybox!)
|
|
push @options, "-f" if($compat); # all but the last component must exist.
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "readlink", @options, { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my $realpath = scalar(@$ret) ? (check_file($ret->[0], { absolute => 1 }) // "") : "";
|
|
unless($realpath) {
|
|
ERROR "Failed to parse output of `realpath` for \"$vol->{PRINT}\": \"$ret->[0]\"";
|
|
return undef;
|
|
}
|
|
DEBUG "Real path for \"$vol->{PRINT}\" is: $realpath";
|
|
return undef if($compat && !system_testdir($vol));
|
|
return $realpath;
|
|
}
|
|
|
|
|
|
sub system_mkdir($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $path = $vol->{PATH} // die;;
|
|
INFO "Creating directory: $vol->{PRINT}/";
|
|
start_transaction("mkdir", vinfo_prefixed_keys("target", $vol));
|
|
my $ret = run_cmd(cmd => [ 'mkdir', '-p', { unsafe => $path } ],
|
|
rsh => vinfo_rsh($vol),
|
|
);
|
|
end_transaction("mkdir", defined($ret));
|
|
return undef unless(defined($ret));
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub system_read_raw_info_dir($)
|
|
{
|
|
my $droot = shift // die;
|
|
my $ret = run_cmd(
|
|
# NOTE: we cannot simply "cat" all files here, as it will fail if no files found
|
|
cmd => [ 'find', { unsafe => $droot->{PATH} },
|
|
'-maxdepth', '1',
|
|
'-type', 'f',
|
|
'!', '-size', '0',
|
|
'-name', '\*.btrfs.\*info', # match ".btrfs[.gz|.bz2|.xz|...][.gpg].info"
|
|
'-print0',
|
|
'-exec', 'cat \{\} \;',
|
|
'-exec', 'printf "\0\0" \;',
|
|
],
|
|
rsh => vinfo_rsh($droot),
|
|
non_destructive => 1,
|
|
);
|
|
unless(defined($ret)) {
|
|
ERROR("Failed to read *.btrfs.*.info files in: $droot->{PATH}");
|
|
return undef;
|
|
}
|
|
|
|
my @raw_targets;
|
|
foreach my $info_text (split "\000\000", join "\n", @$ret) {
|
|
unless($info_text =~ s/^(.*?)\000//s) {
|
|
ERROR("Error while parsing command output for: $droot->{PATH}");
|
|
return undef;
|
|
}
|
|
my $info_file = check_file($1, { absolute => 1 }, error_statement => 'for raw info file')
|
|
// return undef;
|
|
my $name = ($info_file =~ s/^.*\///r);
|
|
$name =~ s/\.info$//;
|
|
|
|
my $raw_info = {
|
|
INFO_FILE => $info_file,
|
|
NAME => $name,
|
|
};
|
|
foreach my $line (split "\n", $info_text) {
|
|
my ($key, $value) = ($line =~ /^([a-zA-Z_]+)=(.*)/);
|
|
next unless $key;
|
|
if($key eq "FILE") {
|
|
WARN("Ignoring ambiguous \"FILE=$value\" from raw info file, using \"$name\": $info_file") if($value ne $name);
|
|
next;
|
|
}
|
|
unless($value =~ /^$raw_info_value_match$/) {
|
|
ERROR("Failed to parse \"$key=$value\" in raw info file: $info_file");
|
|
return undef;
|
|
}
|
|
$raw_info->{$key} = $value;
|
|
}
|
|
|
|
# input validation (we need to abort here, or the backups will be resumed)
|
|
unless($raw_info->{TYPE} && ($raw_info->{TYPE} eq 'raw')) {
|
|
ERROR("Unsupported \"type\" in raw info file: $info_file");
|
|
return undef;
|
|
}
|
|
unless($raw_info->{RECEIVED_UUID} && ($raw_info->{RECEIVED_UUID} =~ /^$uuid_match$/)) {
|
|
ERROR("Missing/Illegal \"received_uuid\" in raw info file: $info_file");
|
|
return undef;
|
|
}
|
|
if(defined $raw_info->{RECEIVED_PARENT_UUID}) {
|
|
unless(($raw_info->{RECEIVED_PARENT_UUID} eq '-') || ($raw_info->{RECEIVED_PARENT_UUID} =~ /^$uuid_match$/)) {
|
|
ERROR("Illegal \"RECEIVED_PARENT_UUID\" in raw info file: $info_file");
|
|
return undef;
|
|
}
|
|
}
|
|
else {
|
|
$raw_info->{RECEIVED_PARENT_UUID} = '-';
|
|
}
|
|
|
|
push @raw_targets, $raw_info;
|
|
}
|
|
|
|
DEBUG("Parsed " . @raw_targets . " raw info files in path: $droot->{PATH}");
|
|
TRACE(Data::Dumper->Dump([\@raw_targets], ["system_read_raw_info_dir($droot->{URL})"])) if($do_trace && $do_dumper);
|
|
|
|
return \@raw_targets;
|
|
}
|
|
|
|
|
|
sub system_write_raw_info($$;@)
|
|
{
|
|
my $vol = shift // die;
|
|
my $raw_info = shift // die;
|
|
my %opts = @_;
|
|
my $append = $opts{append};
|
|
my $info_file = $vol->{PATH} . '.info';
|
|
|
|
# sort by %raw_info_sort, then by key
|
|
my @line = $append ? () : ("#btrbk-v$VERSION", "# Do not edit this file");
|
|
my @subst;
|
|
push @line, '#t=' . time;
|
|
foreach(sort { (($raw_info_sort{$a} // 99) <=> ($raw_info_sort{$b} // 99)) || ($a cmp $b) } keys %$raw_info) {
|
|
push @line, ($_ . '=%s');
|
|
push @subst, $raw_info->{$_};
|
|
}
|
|
|
|
DEBUG "Writing (" . ($append ? "append:" . join(",", keys %$raw_info) : "create") . ") raw info file: $info_file";
|
|
my $ret = run_cmd(
|
|
{ cmd => [ 'printf', quoteshell(join('\n', @line, "")), map quoteshell($_), @subst ] },
|
|
{ ($append ? "append_to_file" : "redirect_to_file") => { unsafe => $info_file },
|
|
rsh => vinfo_rsh($vol),
|
|
});
|
|
return undef unless(defined($ret));
|
|
|
|
return $info_file;
|
|
}
|
|
|
|
|
|
sub system_urandom($;$) {
|
|
my $size = shift;
|
|
my $format = shift || 'hex';
|
|
die unless(($size > 0) && ($size <= 256)); # sanity check
|
|
|
|
unless(open(URANDOM, '<', '/dev/urandom')) {
|
|
ERROR "Failed to open /dev/urandom: $!";
|
|
return undef;
|
|
}
|
|
binmode URANDOM;
|
|
my $rand;
|
|
my $rlen = read(URANDOM, $rand, $size);
|
|
close(FILE);
|
|
unless(defined($rand) && ($rlen == $size)) {
|
|
ERROR "Failed to read from /dev/urandom: $!";
|
|
return undef;
|
|
}
|
|
|
|
if($format eq 'hex') {
|
|
my $hex = unpack('H*', $rand);
|
|
die unless(length($hex) == ($size * 2)); # paranoia check
|
|
return $hex;
|
|
}
|
|
elsif($format eq 'bin') {
|
|
return $rand;
|
|
}
|
|
die "unsupported format";
|
|
}
|
|
|
|
sub read_extentmap_cache($)
|
|
{
|
|
my $vol = shift;
|
|
my $cache_dir = config_key($vol, 'cache_dir');
|
|
return undef unless($cache_dir);
|
|
my $uuid = $vol->{node}{uuid} // die;
|
|
foreach (@$cache_dir) {
|
|
my $file = "$_/${uuid}.extentmap.bin";
|
|
next unless (-f $file);
|
|
|
|
DEBUG "Reading extentmap cache: $file";
|
|
if(open(my $fh, '<:raw', $file)) {
|
|
my @range;
|
|
my $buf;
|
|
read($fh, $buf, 24 + 8 * 2); # read header
|
|
my ($v, $gen, $time) = unpack('a24Q<Q<', $buf);
|
|
unless(($v =~ /^btrbk_extentmap_v1/) && $gen && $time) {
|
|
ERROR "Ambigous cache file: $file";
|
|
next;
|
|
}
|
|
if($gen != $vol->{node}{gen}) {
|
|
WARN "Subvolume generation has changed (cache=$gen, subvol=$vol->{node}{gen}), ignoring cache: $file";
|
|
next;
|
|
}
|
|
while(read $fh, $buf, 8 * 2) { # read / unpack two words
|
|
push @range, [ unpack('Q<Q<', $buf) ];
|
|
#TRACE "read_extentmap_cache: range " . join("..", @{$range[-1]});
|
|
};
|
|
DEBUG "Read " . scalar(@range) . " regions (gen=$gen, timestamp='" . localtime($time) . "') from: $file";
|
|
return \@range;
|
|
} else {
|
|
ERROR "Failed to open '$file': $!";
|
|
}
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
sub write_extentmap_cache($)
|
|
{
|
|
my $vol = shift;
|
|
my $extmap = $vol->{EXTENTMAP};
|
|
my $cache_dir = config_key($vol, 'cache_dir');
|
|
return undef unless($extmap && $cache_dir);
|
|
my $uuid = $vol->{node}{uuid} // die;
|
|
foreach (@$cache_dir) {
|
|
unless(-d $_) {
|
|
WARN_ONCE "Ignoring cache_dir (not a directory): $_";
|
|
next;
|
|
}
|
|
my $file = "$_/${uuid}.extentmap.bin";
|
|
|
|
INFO "Writing extentmap cache: $file";
|
|
if(open(my $fh, '>:raw', $file)) {
|
|
# pack Q: unsigned quad (64bit, Documentation/filesystems/fiemap.txt)
|
|
print $fh pack('a24Q<Q<', "btrbk_extentmap_v1", $vol->{node}{gen}, time);
|
|
print $fh pack('Q<*', map(@{$_}, @$extmap));
|
|
close($fh);
|
|
} else {
|
|
ERROR "Failed to create '$file': $!";
|
|
}
|
|
}
|
|
}
|
|
|
|
# returns extents range (sorted array of [start,end], inclusive) from FIEMAP ioctl
|
|
sub filefrag_extentmap($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $starttime = time;
|
|
|
|
INFO("Fetching extent map (filefrag): $vol->{PRINT}");
|
|
|
|
# NOTE: this returns exitstatus=0 if file is not found, or no files found
|
|
my $ret = run_cmd(cmd => [ 'find', { unsafe => $vol->{PATH} }, '-xdev', '-type', 'f',
|
|
'-exec', 'filefrag -b1 -v \{\} +' ],
|
|
large_output => 1);
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to fetch extent map: $vol->{PRINT}", @stderr;
|
|
return undef;
|
|
}
|
|
WARN_ONCE "Configuration option \"ignore_extent_data_inline=no\" not available for filefrag (please install \"IO::AIO\" perl module)" unless(config_key($vol, "ignore_extent_data_inline"));
|
|
|
|
my @range; # array of [start,end]
|
|
foreach (@$ret) {
|
|
#my $file = $1 if(/^File size of (.*?) is/);
|
|
if(/^\s*[0-9]+:\s*[0-9]+\.\.\s*[0-9]+:\s*([0-9]+)\.\.\s*([0-9]+):/) {
|
|
# NOTE: filefrag (v1.45.5) returns wrong (?) physical_offset for
|
|
# "inline" regions unless run with `-b1` (blocksize=1) option.
|
|
#
|
|
# For btrfs file systems it does not make much sense to consider
|
|
# the "inline" extents anyways: these are stored in metadata
|
|
# section and are not really part of the used disk space.
|
|
#
|
|
# # filefrag -v MYFILE
|
|
# File size of MYFILE is 2307 (1 block of 4096 bytes)
|
|
# ext: logical_offset: physical_offset: length: expected: flags:
|
|
# 0: 0.. 4095: 0.. 4095: 4096: last,not_aligned,inline,eof
|
|
# # filefrag -v -b1 MYFILE
|
|
# File size of MYFILE is 2307 (4096 block of 1 bytes)
|
|
# ext: logical_offset: physical_offset: length: expected: flags:
|
|
# 0: 0.. 4095: 0.. 4095: 4096: last,not_aligned,inline,eof
|
|
next if(/inline/);
|
|
push @range, [ $1, $2 ];
|
|
}
|
|
}
|
|
DEBUG("Parsed " . scalar(@range) . " regions in " . (time - $starttime) . "s for: $vol->{PRINT}");
|
|
return extentmap_merge(\@range);
|
|
}
|
|
|
|
|
|
# returns extents range (sorted array of [start,end], inclusive) from FIEMAP ioctl
|
|
sub aio_extentmap($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $starttime = time;
|
|
my $ignore_inline = config_key($vol, "ignore_extent_data_inline");
|
|
|
|
INFO("Fetching extent map: $vol->{PRINT}");
|
|
|
|
# NOTE: this returns exitstatus=0 if file is not found, or no files found
|
|
my $ret = run_cmd(cmd => [ 'find', { unsafe => $vol->{PATH} }, '-xdev', '-type', 'f' ],
|
|
large_output => 1 );
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to find files in: $vol->{PRINT}", @stderr;
|
|
return undef;
|
|
}
|
|
|
|
DEBUG("Reading ioctl FIEMAP of " . scalar(@$ret) . " files");
|
|
|
|
IO::AIO::max_outstanding(128); # < 1024 (max file descriptors)
|
|
IO::AIO::max_poll_reqs(32);
|
|
|
|
my @range;
|
|
my $count = 0;
|
|
my $inline_count = 0;
|
|
foreach my $file (@$ret) {
|
|
IO::AIO::aio_open($file, IO::AIO::O_RDONLY(), 0, sub {
|
|
# graceful abort on file open errors (check $count below)
|
|
return unless($_[0]); # [ $fh ]
|
|
|
|
# note: aio_fiemap returns byte range (not blocks)
|
|
# see: Documentation/filesystems/fiemap.rst
|
|
IO::AIO::aio_fiemap($_[0], 0, undef, 0, undef, sub {
|
|
$count++;
|
|
foreach(@{$_[0]}) { # [ $logical, $physical, $length, $flags ]
|
|
if($_->[3] & IO::AIO::FIEMAP_EXTENT_DATA_INLINE()) {
|
|
$inline_count++;
|
|
next if($ignore_inline);
|
|
WARN_ONCE "Ambigous inline region [$_->[1] .. $_->[1] + $_->[2] - 1] for $file" if((($_->[1] != 0) || ($_->[2] != 4096)));
|
|
}
|
|
push @range, [ $_->[1], $_->[1] + $_->[2] - 1 ];
|
|
}
|
|
});
|
|
});
|
|
# poll, or the above eats up all our filedescriptors
|
|
IO::AIO::poll_cb(); # takes "max_outstanding" and "max_poll_reqs" settings
|
|
}
|
|
|
|
IO::AIO::flush();
|
|
|
|
WARN "Failed to open $count / " . scalar(@$ret) . " files" if($count != scalar(@$ret));
|
|
|
|
DEBUG("Parsed " . scalar(@range) . " regions (" . ($ignore_inline ? "ignored " : "") . "$inline_count \"inline\") for $count files in " . (time - $starttime) . "s for: $vol->{PRINT}");
|
|
return extentmap_merge(\@range);
|
|
}
|
|
|
|
|
|
sub extentmap_total_blocks($)
|
|
{
|
|
my $extmap = shift;
|
|
my $count = 0;
|
|
foreach(@{$extmap->{rmap}}) {
|
|
$count += ($_->[1] - $_->[0] + 1);
|
|
}
|
|
return $count;
|
|
}
|
|
|
|
|
|
sub extentmap_size($)
|
|
{
|
|
my $extmap = shift; # merged ranges
|
|
return undef unless($extmap);
|
|
my $size = 0;
|
|
foreach(@$extmap) {
|
|
$size += $_->[1] - $_->[0] + 1;
|
|
}
|
|
return $size;
|
|
}
|
|
|
|
|
|
sub extentmap_merge(@) {
|
|
return undef unless(scalar(@_));
|
|
my @range = sort { $a->[0] <=> $b->[0] } map @$_, @_;
|
|
my @merged;
|
|
my $start = -1;
|
|
my $end = -2;
|
|
foreach (@range) {
|
|
if($_->[0] <= $end + 1) {
|
|
# range overlaps the preceeding one, or is adjacent to it
|
|
$end = $_->[1] if($_->[1] > $end);
|
|
}
|
|
else {
|
|
push @merged, [ $start, $end ] if($start >= 0);
|
|
$start = $_->[0];
|
|
$end = $_->[1];
|
|
}
|
|
}
|
|
push @merged, [ $start, $end ] if($start >= 0);
|
|
DEBUG "extentmap: merged " . scalar(@range) . " regions into " . scalar(@merged) . " regions";
|
|
return \@merged;
|
|
}
|
|
|
|
|
|
# ( A \ B ) : data in A that is not in B (relative complement of B in A)
|
|
sub extentmap_diff($$)
|
|
{
|
|
my $l = shift // die; # A, sorted
|
|
my $r = shift; # B, sorted
|
|
return $l unless($r); # A \ 0 = A
|
|
|
|
my $i = 0;
|
|
my $rn = scalar(@$r);
|
|
my @diff;
|
|
|
|
foreach(@$l) {
|
|
my $l_start = $_->[0];
|
|
my $l_end = $_->[1];
|
|
while(($i < $rn) && ($r->[$i][1] < $l_start)) { # r_end < l_start
|
|
# advance r to next overlapping
|
|
$i++;
|
|
}
|
|
while(($i < $rn) && ($r->[$i][0] <= $l_end)) { # r_start <= l_end
|
|
# while overlapping, advance l_start
|
|
my $r_start = $r->[$i][0];
|
|
my $r_end = $r->[$i][1];
|
|
|
|
push @diff, [ $l_start, $r_start - 1 ] if($l_start < $r_start);
|
|
$l_start = $r_end + 1;
|
|
last if($l_start > $l_end);
|
|
$i++;
|
|
}
|
|
push @diff, [ $l_start, $l_end ] if($l_start <= $l_end);
|
|
}
|
|
DEBUG "extentmap: relative complement ( B=" . scalar(@$r) . ' \ A=' . scalar(@$l) . " ) = " . scalar(@diff) . " regions";
|
|
return \@diff;
|
|
}
|
|
|
|
|
|
sub btr_tree($$$$)
|
|
{
|
|
my $vol = shift;
|
|
my $vol_root_id = shift || die;
|
|
my $mount_source = shift || die; # aka device
|
|
my $mountpoints = shift || die; # all known mountpoints for this filesystem: arrayref of mountinfo
|
|
die unless($vol_root_id >= 5);
|
|
|
|
# return parsed tree from %mount_source_cache if present
|
|
my $host_mount_source = $vol->{URL_PREFIX} . $mount_source;
|
|
my $cached_tree = $mount_source_cache{$host_mount_source};
|
|
TRACE "mount_source_cache " . ($cached_tree ? "HIT" : "MISS") . ": $host_mount_source" if($do_trace);
|
|
if($cached_tree) {
|
|
TRACE "btr_tree: returning cached tree at id=$vol_root_id" if($do_trace);
|
|
my $node = $cached_tree->{ID_HASH}{$vol_root_id};
|
|
ERROR "Unknown subvolid=$vol_root_id in btrfs tree of $host_mount_source" unless($node);
|
|
return $node;
|
|
}
|
|
|
|
my $node_list = btrfs_subvolume_list_complete($vol);
|
|
return undef unless(ref($node_list) eq "ARRAY");
|
|
my $vol_root;
|
|
|
|
TRACE "btr_tree: processing subvolume list of: $vol->{PRINT}" if($do_trace);
|
|
|
|
# return a reference to the cached root if we already know the tree,
|
|
# making sure every tree is only stored once, which is essential
|
|
# e.g. when injecting nodes. die if duplicate UUID exist on
|
|
# different file systems (no matter if local or remote).
|
|
#
|
|
# note: this relies on subvolume UUID's to be "universally unique"
|
|
# (which is why cloning btrfs filesystems using "dd" is a bad idea)
|
|
#
|
|
# note: a better way would be to always compare the UUID of
|
|
# subvolid=5. unfortunately this is not possible for filesystems
|
|
# created with btrfs-progs < 4.16 (no UUID for subvolid=5).
|
|
foreach(@$node_list) {
|
|
my $node_uuid = $_->{uuid};
|
|
next unless($node_uuid);
|
|
if($uuid_cache{$node_uuid}) {
|
|
# at least one uuid of $node_list is already known
|
|
TRACE "uuid_cache HIT: $node_uuid" if($do_trace);
|
|
$vol_root = $uuid_cache{$node_uuid}->{TREE_ROOT}->{ID_HASH}->{$vol_root_id};
|
|
unless($vol_root) {
|
|
# check for deleted subvolumes: e.g. still mounted, but deleted elsewhere
|
|
my $deleted_nodes = btrfs_subvolume_list($vol, deleted_only => 1);
|
|
return undef unless(ref($deleted_nodes) eq "ARRAY");
|
|
if(grep ($_->{id} eq $vol_root_id), @$deleted_nodes) {
|
|
ERROR "Subvolume is deleted: id=$vol_root_id mounted on: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
ERROR "Subvolume id=$vol_root_id is not present on known btrfs tree: $vol->{PRINT}",
|
|
"Possible causes:",
|
|
" - Mismatch in mountinfo",
|
|
" - Subvolume was deleted while btrbk is running",
|
|
" - Duplicate UUID present on multiple filesystems: $node_uuid";
|
|
ERROR "Refusing to run on unstable environment; exiting";
|
|
exit 1;
|
|
}
|
|
INFO "Assuming same filesystem: \"$vol_root->{TREE_ROOT}->{host_mount_source}\", \"$host_mount_source\"";
|
|
TRACE "btr_tree: returning already parsed tree at id=$vol_root->{id}" if($do_trace);
|
|
$mount_source_cache{$host_mount_source} = $vol_root->{TREE_ROOT};
|
|
return $vol_root;
|
|
}
|
|
last; # check only first UUID (for performance)
|
|
}
|
|
|
|
# fill our hashes and uuid_cache
|
|
my %id;
|
|
my %uuid_hash;
|
|
my %received_uuid_hash;
|
|
my %parent_uuid_hash;
|
|
my $gen_max = 0;
|
|
foreach my $node (@$node_list) {
|
|
my $node_id = $node->{id};
|
|
my $node_uuid = $node->{uuid};
|
|
die unless($node_id >= 5);
|
|
die "duplicate node id" if(exists($id{$node_id}));
|
|
$id{$node_id} = $node;
|
|
if($node_uuid) {
|
|
# NOTE: uuid on btrfs root (id=5) is not always present
|
|
$uuid_hash{$node_uuid} = $node;
|
|
$uuid_cache{$node_uuid} = $node;
|
|
# hacky: if root node has no "uuid", it also has no "received_uuid" and no "gen"
|
|
push(@{$received_uuid_hash{$node->{received_uuid}}}, $node) if($node->{received_uuid} ne '-');
|
|
push(@{$parent_uuid_hash{$node->{parent_uuid}}}, $node) if($node->{parent_uuid} ne '-');
|
|
$gen_max = $node->{gen} if($node->{gen} > $gen_max);
|
|
}
|
|
elsif(not $node->{is_root}) {
|
|
die "missing uuid on subvolume";
|
|
}
|
|
$node->{SUBTREE} = [];
|
|
}
|
|
my $tree_root = $id{5} // die "missing btrfs root";
|
|
$tree_root->{ID_HASH} = \%id;
|
|
$tree_root->{UUID_HASH} = \%uuid_hash;
|
|
$tree_root->{RECEIVED_UUID_HASH} = \%received_uuid_hash;
|
|
$tree_root->{PARENT_UUID_HASH} = \%parent_uuid_hash;
|
|
$tree_root->{GEN_MAX} = $gen_max;
|
|
$tree_root->{URL_PREFIX} = $vol->{URL_PREFIX}; # hacky, first url prefix for logging
|
|
|
|
# NOTE: host_mount_source is NOT dependent on MACHINE_ID:
|
|
# if we return already present tree (see above), the value of
|
|
# host_mount_source will still point to the mount_source of the
|
|
# first machine.
|
|
$tree_root->{mount_source} = $mount_source;
|
|
$tree_root->{host_mount_source} = $host_mount_source; # unique identifier, e.g. "/dev/sda1" or "ssh://hostname[:port]/dev/sda1"
|
|
|
|
$vol_root = $id{$vol_root_id};
|
|
unless($vol_root) {
|
|
ERROR "Failed to resolve tree root for subvolid=$vol_root_id: " . ($vol->{PRINT} // $vol->{id});
|
|
return undef;
|
|
}
|
|
|
|
# set REL_PATH and tree references (TREE_ROOT, SUBTREE, TOP_LEVEL)
|
|
foreach my $node (@$node_list) {
|
|
unless($node->{is_root}) {
|
|
# note: it is possible that id < top_level, e.g. after restoring
|
|
my $top_level = $id{$node->{top_level}};
|
|
die "missing top_level reference" unless(defined($top_level));
|
|
|
|
push(@{$top_level->{SUBTREE}}, $node);
|
|
$node->{TOP_LEVEL} = $top_level;
|
|
|
|
# "path" always starts with set REL_PATH
|
|
my $rel_path = $node->{path};
|
|
unless($top_level->{is_root}) {
|
|
die unless($rel_path =~ s/^\Q$top_level->{path}\E\///);
|
|
}
|
|
$node->{REL_PATH} = $rel_path; # relative to {TOP_LEVEL}->{path}
|
|
}
|
|
$node->{TREE_ROOT} = $tree_root;
|
|
add_btrbk_filename_info($node);
|
|
}
|
|
|
|
# add known mountpoints to nodes
|
|
my %mountpoints_hash;
|
|
foreach(@$mountpoints) {
|
|
my $node_id = $_->{MNTOPS}{subvolid};
|
|
my $node = $id{$node_id};
|
|
unless($node) {
|
|
WARN "Unknown subvolid=$node_id (in btrfs tree of $host_mount_source) for mountpoint: $vol->{URL_PREFIX}$_->{mount_point}";
|
|
next;
|
|
}
|
|
$mountpoints_hash{$node_id} = $node;
|
|
push @{$node->{MOUNTINFO}}, $_; # if present, node is mounted at MOUNTINFO
|
|
}
|
|
$tree_root->{MOUNTED_NODES} = [ (values %mountpoints_hash) ]; # list of mounted nodes
|
|
|
|
TRACE "btr_tree: returning tree at id=$vol_root->{id}" if($do_trace);
|
|
VINFO($vol_root, "node") if($loglevel >=4);
|
|
|
|
$mount_source_cache{$host_mount_source} = $tree_root;
|
|
return $vol_root;
|
|
}
|
|
|
|
|
|
sub btr_tree_inject_node($$$)
|
|
{
|
|
my $top_node = shift;
|
|
my $detail = shift;
|
|
my $rel_path = shift;
|
|
my $subtree = $top_node->{SUBTREE} // die;
|
|
my $tree_root = $top_node->{TREE_ROOT};
|
|
|
|
die unless($detail->{parent_uuid} && $detail->{received_uuid} && exists($detail->{readonly}));
|
|
|
|
$tree_inject_id -= 1;
|
|
$tree_root->{GEN_MAX} += 1;
|
|
|
|
my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
|
|
my $node = {
|
|
%$detail, # make a copy
|
|
TREE_ROOT => $tree_root,
|
|
SUBTREE => [],
|
|
TOP_LEVEL => $top_node,
|
|
REL_PATH => $rel_path,
|
|
INJECTED => 1,
|
|
id => $tree_inject_id,
|
|
uuid => $uuid,
|
|
gen => $tree_root->{GEN_MAX},
|
|
cgen => $tree_root->{GEN_MAX},
|
|
};
|
|
push(@$subtree, $node);
|
|
$uuid_cache{$uuid} = $node;
|
|
$tree_root->{ID_HASH}->{$tree_inject_id} = $node;
|
|
$tree_root->{UUID_HASH}->{$uuid} = $node;
|
|
push( @{$tree_root->{RECEIVED_UUID_HASH}->{$node->{received_uuid}}}, $node ) if($node->{received_uuid} ne '-');
|
|
push( @{$tree_root->{PARENT_UUID_HASH}->{$node->{parent_uuid}}}, $node ) if($node->{parent_uuid} ne '-');
|
|
return $node;
|
|
}
|
|
|
|
|
|
# returns array of { path, mountinfo }
|
|
# NOTE: includes subvolumes hidden by other mountpoint
|
|
sub __fs_info
|
|
{
|
|
my $node = shift;
|
|
my $url_prefix = shift;
|
|
my @ret = $node->{MOUNTINFO} ? map +{ path => $url_prefix . $_->{mount_point}, mountinfo => $_ }, @{$node->{MOUNTINFO}} : ();
|
|
return @ret if($node->{is_root});
|
|
return ((map +{ path => $_->{path} . '/' . $node->{REL_PATH}, mountinfo => $_->{mountinfo} }, __fs_info($node->{TOP_LEVEL}, $url_prefix)), @ret);
|
|
}
|
|
|
|
sub _fs_info
|
|
{
|
|
my $node = shift // die;
|
|
my $url_prefix = shift // $node->{TREE_ROOT}{URL_PREFIX};
|
|
my @ret = __fs_info($node, $url_prefix);
|
|
@ret = ({ path => "$url_prefix<$node->{TREE_ROOT}{mount_source}>/$node->{path}",
|
|
mountinfo => undef }) unless(scalar(@ret));
|
|
return @ret;
|
|
}
|
|
|
|
sub _fs_path
|
|
{
|
|
my @ret = map $_->{path}, _fs_info(@_);
|
|
return wantarray ? @ret : $ret[0];
|
|
}
|
|
|
|
|
|
sub _is_correlated($$)
|
|
{
|
|
my $a = shift; # node a
|
|
my $b = shift; # node b
|
|
return 0 if($a->{is_root} || $b->{is_root});
|
|
return 0 unless($a->{readonly} && $b->{readonly});
|
|
return (($a->{uuid} eq $b->{received_uuid}) ||
|
|
($b->{uuid} eq $a->{received_uuid}) ||
|
|
(($a->{received_uuid} ne '-') && ($a->{received_uuid} eq $b->{received_uuid})));
|
|
}
|
|
|
|
|
|
sub _is_same_fs_tree($$)
|
|
{
|
|
return ($_[0]->{TREE_ROOT}{host_mount_source} eq $_[1]->{TREE_ROOT}{host_mount_source});
|
|
}
|
|
|
|
|
|
sub _is_child_of
|
|
{
|
|
my $node = shift;
|
|
my $uuid = shift;
|
|
foreach(@{$node->{SUBTREE}}) {
|
|
return 1 if($_->{uuid} eq $uuid);
|
|
return 1 if(_is_child_of($_, $uuid));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
sub _get_longest_match
|
|
{
|
|
my $node = shift;
|
|
my $path = shift;
|
|
my $check_path = shift; # MUST have a trailing slash
|
|
$path .= '/' unless($path =~ /\/$/); # correctly handle root path="/"
|
|
|
|
return undef unless($check_path =~ /^\Q$path\E/);
|
|
foreach(@{$node->{SUBTREE}}) {
|
|
my $ret = _get_longest_match($_, $path . $_->{REL_PATH}, $check_path);
|
|
return $ret if($ret);
|
|
}
|
|
return { node => $node,
|
|
path => $path };
|
|
}
|
|
|
|
|
|
sub vinfo($$)
|
|
{
|
|
my $url = shift // die;
|
|
my $config = shift;
|
|
|
|
my ($url_prefix, $path) = check_url($url);
|
|
die "invalid url: $url" unless(defined($path));
|
|
my $print = $path;
|
|
my $name = $path;
|
|
$name =~ s/^.*\///;
|
|
$name = '/' if($name eq "");
|
|
|
|
my $host = undef;
|
|
my $port = undef;
|
|
if($url_prefix) {
|
|
$host = $url_prefix;
|
|
die unless($host =~ s/^ssh:\/\///);
|
|
$port = $1 if($host =~ s/:([1-9][0-9]*)$//);
|
|
$print = $host . (defined($port) ? "[$port]:" : ":") . $path;
|
|
$host =~ s/^\[//; # remove brackets from ipv6_addr
|
|
$host =~ s/\]$//; # remove brackets from ipv6_addr
|
|
}
|
|
|
|
# Note that PATH and URL have no trailing slash, except if "/".
|
|
# Note that URL and URL_PREFIX can contain ipv6 address in brackets (e.g. "[::1]").
|
|
return {
|
|
HOST => $host, # hostname|ipv4_address|ipv6_address|<undef>
|
|
PORT => $port, # port|<undef>
|
|
NAME => $name,
|
|
PATH => $path,
|
|
PRINT => $print, # "hostname:/path" or "hostname[port]:/path"
|
|
URL => $url_prefix . $path, # ssh://hostname[:port]/path
|
|
URL_PREFIX => $url_prefix, # ssh://hostname[:port] (or "" if local)
|
|
MACHINE_ID => $url_prefix || "LOCAL:", # unique: "LOCAL:" or hostname and port
|
|
CONFIG => $config,
|
|
|
|
# These are added in vinfo_init_root
|
|
#NODE_SUBDIR => undef,
|
|
#VINFO_MOUNTPOINT => undef,
|
|
}
|
|
}
|
|
|
|
|
|
sub vinfo_child($$;$)
|
|
{
|
|
my $parent = shift || die;
|
|
my $rel_path = shift // die;
|
|
my $config = shift; # override parent config
|
|
my $name = $rel_path;
|
|
my $subvol_dir = "";
|
|
$subvol_dir = $1 if($name =~ s/^(.*)\///);
|
|
|
|
# Note that PATH and URL intentionally contain "//" if $parent->{PATH} = "/".
|
|
my $vinfo = {
|
|
HOST => $parent->{HOST},
|
|
PORT => $parent->{PORT},
|
|
NAME => $name,
|
|
PATH => "$parent->{PATH}/$rel_path",
|
|
PRINT => "$parent->{PRINT}" . ($parent->{PRINT} =~ /\/$/ ? "" : "/") . $rel_path,
|
|
URL => "$parent->{URL}/$rel_path",
|
|
URL_PREFIX => $parent->{URL_PREFIX},
|
|
MACHINE_ID => $parent->{MACHINE_ID},
|
|
CONFIG => $config // $parent->{CONFIG},
|
|
VINFO_MOUNTPOINT => $parent->{VINFO_MOUNTPOINT},
|
|
|
|
# NOTE: these are NOT present in non-child vinfo, and should be used
|
|
# only for printing and comparing results of vinfo_subvol_list.
|
|
SUBVOL_PATH => $rel_path,
|
|
SUBVOL_DIR => $subvol_dir, # SUBVOL_PATH=SUBVOL_DIR/NAME
|
|
};
|
|
|
|
# TRACE "vinfo_child: created from \"$parent->{PRINT}\": $info{PRINT}" if($do_trace);
|
|
return $vinfo;
|
|
}
|
|
|
|
|
|
sub vinfo_rsh($;@)
|
|
{
|
|
my $vinfo = shift || die;
|
|
my %opts = @_;
|
|
my $host = $vinfo->{HOST};
|
|
return undef unless(defined($host));
|
|
|
|
my $config = $vinfo->{CONFIG};
|
|
die unless($config);
|
|
|
|
# as of btrbk-0.28.0, ssh port is a property of a "vinfo", set with
|
|
# "ssh://hostname[:port]" in 'volume' and 'target' sections. Note
|
|
# that the port number is also used for the MACHINE_ID to
|
|
# distinguish virtual machines on same host with different ports.
|
|
my $ssh_port = $vinfo->{PORT};
|
|
unless($ssh_port) {
|
|
# PORT defaults to ssh_port (DEPRECATED)
|
|
$ssh_port = config_key($config, "ssh_port") // "default";
|
|
$ssh_port = undef if($ssh_port eq "default");
|
|
}
|
|
my $ssh_user = config_key($config, "ssh_user");
|
|
my $ssh_identity = config_key($config, "ssh_identity");
|
|
my $ssh_compression = config_key($config, "ssh_compression");
|
|
my $ssh_cipher_spec = join(",", @{config_key($config, "ssh_cipher_spec")});
|
|
my @ssh_options; # as of btrbk-0.29.0, we run ssh without -q (catching @stderr)
|
|
push(@ssh_options, '-p', $ssh_port) if($ssh_port);
|
|
push(@ssh_options, '-c', $ssh_cipher_spec) if($ssh_cipher_spec ne "default");
|
|
push(@ssh_options, '-i', { unsafe => $ssh_identity }) if($ssh_identity); # NOTE: hackily used in run_cmd on errors
|
|
if($opts{disable_compression}) {
|
|
push(@ssh_options, '-o', 'compression=no'); # force ssh compression=no (in case it is defined in ssh_config)
|
|
} elsif($ssh_compression) {
|
|
push(@ssh_options, '-C');
|
|
}
|
|
my $ssh_dest = $ssh_user ? $ssh_user . '@' . $host : $host;
|
|
return ['ssh', @ssh_options, $ssh_dest ];
|
|
}
|
|
|
|
|
|
sub vinfo_cmd($$@)
|
|
{
|
|
my $vinfo = shift || die;
|
|
my $cmd = shift || die;
|
|
my @cmd_args = @_;
|
|
my $backend = config_key_lru($vinfo, "backend") // die;
|
|
my $cmd_mapped = $backend_cmd_map{$backend}{$cmd} // [ split(" ", $cmd) ];
|
|
return [ @$cmd_mapped, @cmd_args ];
|
|
}
|
|
|
|
sub _get_btrbk_date(@)
|
|
{
|
|
my %a = @_; # named capture buffers (%+) from $btrbk_file_match
|
|
|
|
my @tm = ( ($a{ss} // 0), ($a{mm} // 0), ($a{hh} // 0), $a{DD}, ($a{MM} - 1), ($a{YYYY} - 1900) );
|
|
my $NN = $a{NN} // 0;
|
|
my $zz = $a{zz};
|
|
my $has_exact_time = defined($a{hh}); # false if timestamp_format=short
|
|
|
|
my $time;
|
|
if(defined($zz)) {
|
|
eval_quiet { $time = timegm(@tm); };
|
|
} else {
|
|
eval_quiet { $time = timelocal(@tm); };
|
|
}
|
|
unless(defined($time)) {
|
|
# WARN "$@"; # sadly Time::Local croaks, which also prints the line number from here.
|
|
return undef;
|
|
}
|
|
|
|
# handle ISO 8601 time offset
|
|
if(defined($zz)) {
|
|
my $offset;
|
|
if($zz eq 'Z') {
|
|
$offset = 0; # Zulu time == UTC
|
|
}
|
|
elsif($zz =~ /^([+-])([0-9][0-9])([0-9][0-9])$/) {
|
|
$offset = ( $3 * 60 ) + ( $2 * 60 * 60 );
|
|
$offset *= -1 if($1 eq '-');
|
|
}
|
|
else {
|
|
return undef;
|
|
}
|
|
$time -= $offset;
|
|
}
|
|
|
|
return [ $time, $NN, $has_exact_time ];
|
|
}
|
|
|
|
sub add_btrbk_filename_info($;$)
|
|
{
|
|
my $node = shift;
|
|
my $raw_info = shift;
|
|
return undef if($node->{is_root});
|
|
my $rel_path = $node->{REL_PATH};
|
|
|
|
# NOTE: unless long-iso file format is encountered, the timestamp is interpreted in local timezone.
|
|
|
|
my %match;
|
|
if($raw_info) {
|
|
if($rel_path =~ /^(?:(?<subdir>.*)\/)?$btrbk_file_match$raw_postfix_match$/) {
|
|
%match = ( %+, family => "btrbk" );
|
|
}
|
|
} else {
|
|
if($rel_path =~ /^(?:(?<subdir>.*)\/)?$btrbk_file_match$/) {
|
|
%match = ( %+, family => "btrbk" );
|
|
}
|
|
elsif($rel_path =~ /^(?:(?<subdir>.*)\/)?$timeshift_file_match$/) {
|
|
%match = ( %+, family => "timeshift" );
|
|
}
|
|
};
|
|
return undef unless $match{name};
|
|
my $btrbk_date = _get_btrbk_date(%match); # use named capture buffers of previous match
|
|
unless($btrbk_date) {
|
|
WARN "Illegal timestamp on subvolume \"$node->{REL_PATH}\", ignoring";
|
|
return undef;
|
|
}
|
|
|
|
$node->{BTRBK_BASENAME} = $match{name};
|
|
$node->{BTRBK_DATE} = $btrbk_date;
|
|
$node->{BTRBK_RAW} = $raw_info if($raw_info);
|
|
$node->{BTRBK_FAMILY} = $match{family};
|
|
# NOTE: BTRBK_TAG is uniqe within TOP_LEVEL->SUBTREE: we cannot use $node->{TOP_LEVEL}{uuid} here (unavailable on old filesystems)
|
|
$node->{BTRBK_TAG} = join(":", $match{name}, $match{family}, ($match{subdir} // ""));
|
|
return $node;
|
|
}
|
|
|
|
|
|
sub _find_mountpoint($$)
|
|
{
|
|
my $root = shift;
|
|
my $path = shift;
|
|
$path .= '/' unless($path =~ /\/$/); # append trailing slash
|
|
while (my $tree = $root->{SUBTREE}) {
|
|
my $m = undef;
|
|
foreach (@$tree) {
|
|
$m = $_, last if($path =~ /^\Q$_->{mount_point}\E\//);
|
|
}
|
|
last unless defined $m;
|
|
$root = $m;
|
|
}
|
|
TRACE "resolved mount point for \"$path\": $root->{mount_point} (mount_source=$root->{mount_source}, subvolid=" . ($root->{MNTOPS}->{subvolid} // '<undef>') . ")" if($do_trace);
|
|
return $root;
|
|
}
|
|
|
|
sub mountinfo_tree($)
|
|
{
|
|
my $vol = shift;
|
|
my $mountinfo = $mountinfo_cache{$vol->{MACHINE_ID}};
|
|
TRACE "mountinfo_cache " . ($mountinfo ? "HIT" : "MISS") . ": $vol->{MACHINE_ID}" if($do_trace);
|
|
unless($mountinfo) {
|
|
$mountinfo = system_list_mountinfo($vol);
|
|
return undef unless($mountinfo);
|
|
$mountinfo_cache{$vol->{MACHINE_ID}} = $mountinfo;
|
|
}
|
|
return $mountinfo->[0]->{TREE_ROOT} if($mountinfo->[0]->{TREE_ROOT});
|
|
|
|
my %id = map +( $_->{mount_id} => $_ ), @$mountinfo;
|
|
my $tree_root;
|
|
foreach my $node (@$mountinfo) {
|
|
my $parent = $id{$node->{parent_id}};
|
|
if($parent && ($node->{mount_id} != $node->{parent_id})) {
|
|
$node->{PARENT} = $parent;
|
|
push @{$parent->{SUBTREE}}, $node;
|
|
} else {
|
|
die "multiple root mount points" if($tree_root);
|
|
$tree_root = $node;
|
|
}
|
|
# populate cache (mount points are always real paths)
|
|
$realpath_cache{$vol->{URL_PREFIX} . $node->{mount_point}} = $node->{mount_point};
|
|
}
|
|
die "no root mount point" unless($tree_root);
|
|
$_->{TREE_ROOT} = $tree_root foreach (@$mountinfo);
|
|
$tree_root->{MOUNTINFO_LIST} = $mountinfo;
|
|
|
|
return $tree_root;
|
|
}
|
|
|
|
|
|
sub vinfo_realpath($@)
|
|
{
|
|
my $vol = shift // die;
|
|
my $url = $vol->{URL} // die;
|
|
return $realpath_cache{$url} if(exists($realpath_cache{$url}));
|
|
return $realpath_cache{$url} = system_realpath($vol);
|
|
}
|
|
|
|
|
|
sub vinfo_mkdir($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $url = $vol->{URL} // die;
|
|
return $mkdir_cache{$url} if(exists($mkdir_cache{$url}));
|
|
return -1 if(vinfo_realpath($vol));
|
|
return undef unless($mkdir_cache{$url} = system_mkdir($vol));
|
|
$vol->{SUBDIR_CREATED} = 1;
|
|
delete $realpath_cache{$url}; # clear realpath cache (allow retry)
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub vinfo_mountpoint
|
|
{
|
|
my $vol = shift // die;
|
|
my %args = @_;
|
|
|
|
DEBUG "Resolving mount point for: $vol->{PRINT}";
|
|
my $mountinfo_root = mountinfo_tree($vol)
|
|
or return undef;
|
|
|
|
my $realpath = vinfo_realpath($vol)
|
|
or return undef;
|
|
|
|
my $mountpoint = _find_mountpoint($mountinfo_root, $realpath);
|
|
|
|
# handle autofs
|
|
if($mountpoint->{fs_type} eq 'autofs') {
|
|
if($args{autofs_retry}) {
|
|
DEBUG "Non-btrfs autofs mount point for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
DEBUG "Found autofs mount point, triggering automount on $mountpoint->{mount_point} for: $vol->{PRINT}";
|
|
btrfs_subvolume_show(vinfo($vol->{URL_PREFIX} . $mountpoint->{mount_point}, $vol->{CONFIG}));
|
|
$mountinfo_cache{$vol->{MACHINE_ID}} = undef;
|
|
return vinfo_mountpoint($vol, %args, autofs_retry => 1);
|
|
}
|
|
|
|
if($args{fs_type} && ($mountpoint->{fs_type} ne $args{fs_type})) {
|
|
ERROR "Not a btrfs filesystem (mountpoint=\"$mountpoint->{mount_point}\", fs_type=\"$mountpoint->{fs_type}\"): $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
DEBUG "Mount point for \"$vol->{PRINT}\": $mountpoint->{mount_point} (mount_source=$mountpoint->{mount_source}, fs_type=$mountpoint->{fs_type})";
|
|
return ($realpath, $mountpoint);
|
|
}
|
|
|
|
|
|
sub vinfo_init_root($)
|
|
{
|
|
my $vol = shift || die;
|
|
|
|
@stderr = (); # clear @stderr (propagated for logging)
|
|
my ($real_path, $mountpoint) = vinfo_mountpoint($vol, fs_type => 'btrfs');
|
|
return undef unless($mountpoint);
|
|
|
|
my @same_source_mounts = grep { $_->{mount_source} eq $mountpoint->{mount_source} } @{$mountpoint->{TREE_ROOT}{MOUNTINFO_LIST}};
|
|
foreach my $mnt (grep { !defined($_->{MNTOPS}{subvolid}) } @same_source_mounts) {
|
|
# kernel <= 4.2 does not have subvolid=NN in /proc/self/mounts, read it with btrfs-progs
|
|
DEBUG "No subvolid provided in mounts for: $mnt->{mount_point}";
|
|
my $detail = btrfs_subvolume_show(vinfo($vol->{URL_PREFIX} . $mnt->{mount_point}, $vol->{CONFIG}));
|
|
return undef unless($detail);
|
|
$mnt->{MNTOPS}{subvolid} = $detail->{id} || die; # also affects %mountinfo_cache
|
|
}
|
|
|
|
# read btrfs tree for the mount point
|
|
@stderr = (); # clear @stderr (propagated for logging)
|
|
my $mnt_path = $mountpoint->{mount_point};
|
|
my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
|
|
my $mnt_tree_root = btr_tree($mnt_vol, $mountpoint->{MNTOPS}{subvolid}, $mountpoint->{mount_source}, \@same_source_mounts);
|
|
return undef unless($mnt_tree_root);
|
|
|
|
# find longest match in btrfs tree
|
|
$real_path .= '/' unless($real_path =~ /\/$/); # correctly handle root path="/"
|
|
my $ret = _get_longest_match($mnt_tree_root, $mnt_path, $real_path) // die;
|
|
my $tree_root = $ret->{node};
|
|
return undef unless($tree_root);
|
|
|
|
# set NODE_SUBDIR if $vol->{PATH} points to a regular (non-subvolume) directory.
|
|
# in other words, "PATH=<path_to_subvolume>/NODE_SUBDIR"
|
|
my $node_subdir = $real_path;
|
|
die unless($node_subdir =~ s/^\Q$ret->{path}\E//); # NOTE: $ret->{path} has trailing slash!
|
|
$node_subdir =~ s/\/+$//;
|
|
$vol->{NODE_SUBDIR} = $node_subdir if($node_subdir ne '');
|
|
$vol->{node} = $tree_root;
|
|
|
|
$vol->{VINFO_MOUNTPOINT} = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
|
|
$vol->{VINFO_MOUNTPOINT}{node} = $mnt_tree_root;
|
|
|
|
return $tree_root;
|
|
}
|
|
|
|
|
|
sub vinfo_init_raw_root($;@)
|
|
{
|
|
my $droot = shift || die;
|
|
my $tree_root = $raw_url_cache{$droot->{URL}};
|
|
TRACE "raw_url_cache " . ($tree_root ? "HIT" : "MISS") . ": URL=$droot->{URL}" if($do_trace);
|
|
unless($tree_root) {
|
|
if(my $real_path = vinfo_realpath($droot)) {
|
|
my $real_url = $droot->{URL_PREFIX} . $real_path;
|
|
$tree_root = $raw_url_cache{$real_url};
|
|
TRACE "raw_url_cache " . ($tree_root ? "HIT" : "MISS") . ": REAL_URL=$real_url" if($do_trace);
|
|
}
|
|
}
|
|
|
|
unless($tree_root) {
|
|
DEBUG "Creating raw subvolume list: $droot->{PRINT}";
|
|
|
|
# create fake btr_tree
|
|
$tree_root = { id => 5,
|
|
is_root => 1,
|
|
mount_source => '@raw_tree', # for _fs_path (logging)
|
|
host_mount_source => $droot->{URL} . '@raw_tree', # for completeness (this is never used)
|
|
GEN_MAX => 1,
|
|
SUBTREE => [],
|
|
UUID_HASH => {},
|
|
RECEIVED_UUID_HASH => {},
|
|
PARENT_UUID_HASH => {},
|
|
URL_PREFIX => $droot->{URL_PREFIX}, # for _fs_path (logging)
|
|
MOUNTINFO => [ { mount_point => $droot->{PATH} } ], # for _fs_path (logging)
|
|
};
|
|
$tree_root->{TREE_ROOT} = $tree_root;
|
|
|
|
# list and parse *.info
|
|
my $raw_info_ary = system_read_raw_info_dir($droot);
|
|
return undef unless($raw_info_ary);
|
|
|
|
# inject nodes to fake btr_tree
|
|
$droot->{node} = $tree_root;
|
|
foreach my $raw_info (sort { $a->{NAME} cmp $b->{NAME} } @$raw_info_ary)
|
|
{
|
|
# Set btrfs subvolume information from filename info.
|
|
#
|
|
# Important notes:
|
|
# - Raw targets have a fake uuid and parent_uuid.
|
|
# - RECEIVED_PARENT_UUID in BTRBK_RAW is the "parent of the
|
|
# source subvolume", NOT the "parent of the received subvolume".
|
|
#
|
|
# If restoring a backup from raw btrfs images (using "incremental yes|strict"):
|
|
# "btrfs send -p parent source > svol.btrfs", the backups
|
|
# on the target will get corrupted (unusable!) as soon as
|
|
# an any files in the chain gets deleted.
|
|
#
|
|
# We need to make sure btrbk will NEVER delete those (see _raw_depends):
|
|
# - svol.<timestamp>--<received_uuid_0>.btrfs : root (full) image
|
|
# - svol.<timestamp>--<received_uuid-n>[@<received_uuid_n-1>].btrfs : incremental image
|
|
|
|
my $subvol = vinfo_child($droot, $raw_info->{NAME});
|
|
unless(vinfo_inject_child($droot, $subvol, {
|
|
TARGET_TYPE => $raw_info->{TYPE},
|
|
parent_uuid => '-', # NOTE: correct value gets inserted below
|
|
# Incomplete raw fakes get same semantics as real subvolumes (readonly=0, received_uuid='-')
|
|
received_uuid => ($raw_info->{INCOMPLETE} ? '-' : $raw_info->{RECEIVED_UUID}),
|
|
readonly => ($raw_info->{INCOMPLETE} ? 0 : 1),
|
|
}, $raw_info))
|
|
{
|
|
ERROR("Failed create raw node \"$raw_info->{NAME}\" from raw info file: \"$raw_info->{INFO_FILE}\"");
|
|
return undef;
|
|
}
|
|
}
|
|
|
|
my @subvol_list = @{vinfo_subvol_list($droot, sort => 'path')};
|
|
DEBUG "Found " . scalar(@subvol_list) . " raw subvolume backups in: $droot->{PRINT}";
|
|
|
|
# set parent_uuid based on RECEIVED_PARENT_UUID
|
|
foreach my $node (@{$tree_root->{SUBTREE}}) {
|
|
my $parents = $tree_root->{RECEIVED_UUID_HASH}{$node->{BTRBK_RAW}{RECEIVED_PARENT_UUID}} // [];
|
|
my $parent = (grep { $_->{BTRBK_RAW}{RECEIVED_PARENT_UUID} eq '-' } @$parents)[0] // $parents->[0]; # if multiple candidates, prefer non-incremental
|
|
TRACE "vinfo_init_raw_root: $node->{BTRBK_RAW}{NAME} parent=" . ($parent ? $parent->{BTRBK_RAW}{NAME} : "") if($do_trace);
|
|
next unless $parent;
|
|
$node->{parent_uuid} = $parent->{uuid};
|
|
push @{$tree_root->{PARENT_UUID_HASH}{$node->{parent_uuid}}}, $node;
|
|
}
|
|
}
|
|
|
|
$droot->{node} = $tree_root;
|
|
$droot->{VINFO_MOUNTPOINT} = $droot; # fake mountpoint
|
|
$raw_url_cache{$droot->{URL}} = $tree_root;
|
|
|
|
return $tree_root;
|
|
}
|
|
|
|
|
|
sub _vinfo_subtree_list
|
|
{
|
|
my $tree = shift;
|
|
my $vinfo_parent = shift;
|
|
my $filter_readonly = shift; # if set, return only read-only
|
|
my $list = shift // [];
|
|
my $path_prefix = shift // "";
|
|
my $depth = shift // 0;
|
|
|
|
# if $vinfo_parent->{NODE_SUBDIR} is set, vinfo_parent->{PATH} does
|
|
# not point to a subvolume directly, but to "<path_to_subvolume>/NODE_SUBDIR".
|
|
# skip nodes wich are not in NODE_SUBDIR, or strip NODE_SUBDIR from from rel_path.
|
|
my $node_subdir_filter = ($depth == 0) ? $vinfo_parent->{NODE_SUBDIR} : undef;
|
|
foreach my $node (@{$tree->{SUBTREE}}) {
|
|
my $rel_path = $node->{REL_PATH};
|
|
if(defined($node_subdir_filter)) {
|
|
next unless($rel_path =~ s/^\Q$node_subdir_filter\E\///);
|
|
}
|
|
my $path = $path_prefix . $rel_path; # always points to a subvolume
|
|
|
|
# filter readonly, push vinfo_child
|
|
if(!$filter_readonly || $node->{readonly}) {
|
|
my $vinfo = vinfo_child($vinfo_parent, $path);
|
|
$vinfo->{node} = $node;
|
|
|
|
# add some additional information to vinfo
|
|
$vinfo->{subtree_depth} = $depth;
|
|
|
|
push(@$list, $vinfo);
|
|
}
|
|
|
|
_vinfo_subtree_list($node, $vinfo_parent, $filter_readonly, $list, $path . '/', $depth + 1);
|
|
}
|
|
return $list;
|
|
}
|
|
|
|
|
|
sub vinfo_subvol_list($;@)
|
|
{
|
|
my $vol = shift || die;
|
|
my %opts = @_;
|
|
|
|
TRACE "Creating subvolume list for: $vol->{PRINT}" if($do_trace);
|
|
|
|
# recurse into tree from $vol->{node}, returns arrayref of vinfo
|
|
my $subvol_list = _vinfo_subtree_list($vol->{node}, $vol, $opts{readonly});
|
|
|
|
if($opts{sort}) {
|
|
if($opts{sort} eq 'path') {
|
|
my @sorted = sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } @$subvol_list;
|
|
return \@sorted;
|
|
}
|
|
else { die; }
|
|
}
|
|
return $subvol_list;
|
|
}
|
|
|
|
|
|
# returns vinfo_child if $node is in tree below $vol (or equal if allow_equal), or undef
|
|
sub vinfo_resolved($$;@)
|
|
{
|
|
my $node = shift || die;
|
|
my $vol = shift || die; # root vinfo node
|
|
my %opts = @_;
|
|
my $top_id = $vol->{node}{id};
|
|
my @path;
|
|
my $nn = $node;
|
|
while(($nn->{id} != $top_id) && (!$nn->{is_root})) {
|
|
unshift(@path, $nn->{REL_PATH});
|
|
$nn = $nn->{TOP_LEVEL};
|
|
}
|
|
if(scalar(@path) == 0) {
|
|
return $vol if($opts{allow_equal} && not defined($vol->{NODE_SUBDIR}));
|
|
return undef;
|
|
}
|
|
return undef if($nn->{is_root} && (!$vol->{node}{is_root}));
|
|
my $jpath = join('/', @path);
|
|
if(defined($vol->{NODE_SUBDIR})) {
|
|
return undef unless($jpath =~ s/^\Q$vol->{NODE_SUBDIR}\E\///);
|
|
}
|
|
if(defined($opts{btrbk_direct_leaf})) {
|
|
return undef if($jpath =~ /\//);
|
|
return undef unless(exists($node->{BTRBK_BASENAME}) && ($node->{BTRBK_BASENAME} eq $opts{btrbk_direct_leaf}))
|
|
}
|
|
my $vinfo = vinfo_child($vol, $jpath);
|
|
$vinfo->{node} = $node;
|
|
return $vinfo;
|
|
}
|
|
|
|
|
|
# returns vinfo if $node is below any mountpoint of $vol
|
|
sub vinfo_resolved_all_mountpoints($$)
|
|
{
|
|
my $node = shift || die;
|
|
my $vol = shift || die;
|
|
my $tree_root = $vol->{node}{TREE_ROOT};
|
|
foreach my $mnt_node (@{$tree_root->{MOUNTED_NODES}}) {
|
|
foreach my $mountinfo (@{$mnt_node->{MOUNTINFO}}) {
|
|
my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mountinfo->{mount_point}, $vol->{CONFIG});
|
|
$mnt_vol->{node} = $mnt_node;
|
|
TRACE "vinfo_resolved_all_mountpoints: trying mountpoint: $mnt_vol->{PRINT}" if($do_trace);
|
|
my $vinfo = vinfo_resolved($node, $mnt_vol, allow_equal => 1);
|
|
return $vinfo if($vinfo);
|
|
}
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
sub vinfo_subvol($$)
|
|
{
|
|
my $vol = shift || die;
|
|
my $subvol_path = shift // die;
|
|
foreach (@{vinfo_subvol_list($vol)}) {
|
|
return $_ if($_->{SUBVOL_PATH} eq $subvol_path);
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
|
|
sub vinfo_inject_child($$$;$)
|
|
{
|
|
my $vinfo = shift;
|
|
my $vinfo_child = shift;
|
|
my $detail = shift;
|
|
my $raw_info = shift;
|
|
my $node;
|
|
|
|
my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
|
|
my $rel_path = $node_subdir . $vinfo_child->{SUBVOL_PATH};
|
|
|
|
$node = btr_tree_inject_node($vinfo->{node}, $detail, $rel_path);
|
|
return undef unless(add_btrbk_filename_info($node, $raw_info));
|
|
|
|
$vinfo_child->{node} = $node;
|
|
TRACE "vinfo_inject_child: injected child id=$node->{id} to $vinfo->{PRINT}" if($do_trace);
|
|
return $vinfo_child;
|
|
}
|
|
|
|
|
|
# returns hash: ( $prefix_{url,path,host,name,subvol_path,rsh} => value, ... )
|
|
sub vinfo_prefixed_keys($$)
|
|
{
|
|
my $prefix = shift // die;
|
|
my $vinfo = shift;
|
|
return () unless($vinfo);
|
|
my %ret;
|
|
if($prefix) {
|
|
$ret{$prefix} = $vinfo->{PRINT};
|
|
$prefix .= '_';
|
|
}
|
|
foreach (qw( URL PATH HOST PORT NAME )) {
|
|
$ret{$prefix . lc($_)} = $vinfo->{$_};
|
|
}
|
|
$ret{$prefix . "subvolume"} = $vinfo->{PATH};
|
|
my $rsh = vinfo_rsh($vinfo);
|
|
$ret{$prefix . "rsh"} = $rsh ? _safe_cmd($rsh) : undef,
|
|
return %ret;
|
|
}
|
|
|
|
|
|
sub vinfo_assign_config($;$)
|
|
{
|
|
my $vinfo = shift || die;
|
|
my $vinfo_snapshot_root = shift;
|
|
my $config = $vinfo->{CONFIG} || die;
|
|
die if($config->{VINFO});
|
|
$config->{VINFO} = $vinfo;
|
|
$config->{VINFO_SNAPROOT} = $vinfo_snapshot_root;
|
|
}
|
|
|
|
|
|
sub vinfo_snapshot_root($)
|
|
{
|
|
my $vinfo = shift;
|
|
return $vinfo->{CONFIG}{VINFO_SNAPROOT};
|
|
}
|
|
|
|
|
|
sub config_subsection($$;$)
|
|
{
|
|
my $config = shift || die;
|
|
my $context = shift || die;
|
|
die if grep($_->{CONTEXT} ne $context, @{$config->{SUBSECTION}});
|
|
return @{$config->{SUBSECTION}};
|
|
}
|
|
|
|
sub vinfo_subsection($$;$)
|
|
{
|
|
# if config: must have SUBSECTION key
|
|
# if vinfo: must have CONFIG key
|
|
my $config_or_vinfo = shift || die;
|
|
my $context = shift || die;
|
|
my $include_aborted = shift;
|
|
my @config_list;
|
|
my $vinfo_check;
|
|
if(exists($config_or_vinfo->{SUBSECTION})) {
|
|
@config_list = config_subsection($config_or_vinfo, $context);
|
|
}
|
|
else {
|
|
@config_list = config_subsection($config_or_vinfo->{CONFIG}, $context);
|
|
die unless($config_or_vinfo->{CONFIG}->{VINFO} == $config_or_vinfo); # check back reference
|
|
}
|
|
|
|
return map {
|
|
die unless($_->{VINFO} == $_->{VINFO}->{CONFIG}->{VINFO}); # check all back references
|
|
($include_aborted || !$_->{ABORTED}) ? $_->{VINFO} : ()
|
|
} @config_list;
|
|
}
|
|
|
|
|
|
# allow (absolute) path / url with wildcards
|
|
# allow group (exact match)
|
|
# allow host[:port] (exact match)
|
|
sub vinfo_filter_statement($$) {
|
|
my $filter = shift // die;
|
|
my $origin = shift // die;
|
|
my %ret = ( unparsed => $filter, origin => $origin, reason => "$origin: $filter" );
|
|
|
|
my ($url_prefix, $path) = check_url($filter, accept_wildcards => 1);
|
|
unless($path) {
|
|
# allow relative path with wildcards
|
|
$url_prefix = "";
|
|
$path = check_file($filter, { relative => 1, wildcards => 1 }, sanitize => 1);
|
|
}
|
|
if($path) {
|
|
# support "*some*file*", "*/*"
|
|
my $regex = join('[^\/]*', map(quotemeta($_), split(/\*+/, lc($url_prefix) . $path, -1)));
|
|
if($path =~ /^\//) {
|
|
$ret{url_regex} = qr/^$regex$/; # absolute path, match full string
|
|
} else {
|
|
$ret{url_regex} = qr/\/$regex$/; # match end of string
|
|
}
|
|
}
|
|
|
|
$ret{group_eq} = $filter if($filter =~ /^$group_match$/);
|
|
|
|
if($filter =~ /^(?<host>$host_name_match|$ipv4_addr_match|\[$ipv6_addr_match\])(:(?<port>[1-9][0-9]*))?$/) {
|
|
my ($host, $port) = ( $+{host}, $+{port} );
|
|
$host =~ s/^\[//; # remove brackets from ipv6_addr
|
|
$host =~ s/\]$//; # remove brackets from ipv6_addr
|
|
$ret{host_port_eq} = { host => $host, port => $port };
|
|
}
|
|
elsif($filter =~ /^$ipv6_addr_match$/) {
|
|
$ret{host_port_eq} = { host => $filter } ;
|
|
}
|
|
|
|
TRACE 'vinfo_filter_statement: filter="' . $filter . '" url_regex="' . ($ret{url_regex} // "<undef>") . '" group_eq="' . ($ret{group_eq} // "<undef>") . '" host_port_eq="' . ($ret{host_port_eq} ? $ret{host_port_eq}{host} . ":" . ($ret{host_port_eq}{port} // "<undef>") : "<undef>") . '"' if($do_trace);
|
|
return undef unless(exists($ret{url_regex}) || exists($ret{group_eq}) || exists($ret{host_port_eq}));
|
|
return \%ret;
|
|
}
|
|
|
|
|
|
sub vinfo_match($$;@)
|
|
{
|
|
my $filter = shift;
|
|
my $vinfo = shift;
|
|
my %opts = @_;
|
|
my $flag_matched = $opts{flag_matched};
|
|
|
|
# never match dummy volume section
|
|
return 0 if($vinfo->{CONFIG}{DUMMY});
|
|
|
|
# match URL against sane path (can contain "//", see vinfo_child),
|
|
# no wildcards
|
|
my ($url_prefix, $path) = check_url($vinfo->{URL});
|
|
my $url = defined($path) ? lc($url_prefix) . $path : undef;
|
|
my $count = 0;
|
|
foreach my $ff (@$filter) {
|
|
if(defined($ff->{group_eq}) && (grep { $ff->{group_eq} eq $_ } @{$vinfo->{CONFIG}{group}})) {
|
|
TRACE "filter \"$ff->{unparsed}\" equals $vinfo->{CONFIG}{CONTEXT} group: $vinfo->{PRINT}" if($do_trace);
|
|
return $ff unless($flag_matched);
|
|
#push @{$ff->{$flag_matched}}, 'group=' . $ff->{group_eq};
|
|
$ff->{$flag_matched} = 1;
|
|
$count++;
|
|
}
|
|
if(defined($ff->{url_regex}) && defined($url) && ($url =~ /$ff->{url_regex}/)) {
|
|
TRACE "filter \"$ff->{unparsed}\" matches $vinfo->{CONFIG}{CONTEXT} url: $vinfo->{PRINT}" if($do_trace);
|
|
return $ff unless($flag_matched);
|
|
#push @{$ff->{$flag_matched}}, $vinfo->{CONFIG}{CONTEXT} . '=' . $vinfo->{PRINT};
|
|
$ff->{$flag_matched} = 1;
|
|
$count++;
|
|
}
|
|
if(defined($ff->{host_port_eq}) && defined($vinfo->{HOST})) {
|
|
my $host = $ff->{host_port_eq}{host};
|
|
my $port = $ff->{host_port_eq}{port};
|
|
if((lc($host) eq lc($vinfo->{HOST})) &&
|
|
(!defined($port) || (defined($vinfo->{PORT}) && ($port == $vinfo->{PORT}))))
|
|
{
|
|
TRACE "filter \"$ff->{unparsed}\" matches $vinfo->{CONFIG}{CONTEXT} host: $vinfo->{PRINT}" if($do_trace);
|
|
return $ff unless($flag_matched);
|
|
#push @{$ff->{$flag_matched}}, $vinfo->{CONFIG}{CONTEXT} . '=' . $vinfo->{PRINT};
|
|
$ff->{$flag_matched} = 1;
|
|
$count++;
|
|
}
|
|
}
|
|
}
|
|
return $count;
|
|
}
|
|
|
|
|
|
sub get_related_snapshots($$;$)
|
|
{
|
|
my $snaproot = shift || die;
|
|
my $svol = shift // die;
|
|
my $btrbk_basename = shift; # if set, also filter by direct_leaf
|
|
my @ret = map( { vinfo_resolved($_, $snaproot, btrbk_direct_leaf => $btrbk_basename) // () }
|
|
@{_related_nodes($svol->{node}, readonly => 1, omit_self => 1)} );
|
|
|
|
if($do_trace) { TRACE "get_related_snapshots: found: $_->{PRINT}" foreach(@ret); }
|
|
DEBUG "Found " . scalar(@ret) . " related snapshots of \"$svol->{PRINT}\" in: $snaproot->{PRINT}" . (defined($btrbk_basename) ? "/$btrbk_basename.*" : "");
|
|
return @ret;
|
|
}
|
|
|
|
|
|
sub _correlated_nodes($$)
|
|
{
|
|
my $dnode = shift || die; # any node on target filesystem
|
|
my $snode = shift || die;
|
|
my @ret;
|
|
|
|
if($snode->{is_root}) {
|
|
TRACE "Skip search for correlated targets: source subvolume is btrfs root: " . _fs_path($snode) if($do_trace);
|
|
return @ret;
|
|
}
|
|
unless($snode->{readonly}) {
|
|
TRACE "Skip search for correlated targets: source subvolume is not read-only: " . _fs_path($snode) if($do_trace);
|
|
return @ret;
|
|
}
|
|
|
|
# find matches by comparing uuid / received_uuid
|
|
my $uuid = $snode->{uuid};
|
|
my $received_uuid = $snode->{received_uuid};
|
|
$received_uuid = undef if($received_uuid eq '-');
|
|
|
|
my $received_uuid_hash = $dnode->{TREE_ROOT}{RECEIVED_UUID_HASH};
|
|
my $uuid_hash = $dnode->{TREE_ROOT}{UUID_HASH};
|
|
|
|
# match uuid/received_uuid combinations
|
|
my @match;
|
|
push(@match, @{ $received_uuid_hash->{$uuid} // [] }); # match src.uuid == target.received_uuid
|
|
if($received_uuid) {
|
|
push(@match, $uuid_hash->{$received_uuid} ); # match src.received_uuid == target.uuid
|
|
push(@match, @{ $received_uuid_hash->{$received_uuid} // [] }); # match src.received_uuid == target.received_uuid
|
|
}
|
|
|
|
@ret = grep($_->{readonly}, @match);
|
|
TRACE "correlated_nodes: dst=\"" . _fs_path($dnode) . "\", src=\"" . _fs_path($snode) . "\": [" . join(", ", map _fs_path($_),@ret) . "]" if($do_trace);
|
|
return @ret;
|
|
}
|
|
|
|
|
|
# returns array of vinfo of receive targets matching btrbk name
|
|
sub get_receive_targets($$;@)
|
|
{
|
|
my $droot = shift || die;
|
|
my $src_vol = shift || die;
|
|
my %opts = @_;
|
|
my @ret;
|
|
|
|
my @correlated = _correlated_nodes($droot->{node}, $src_vol->{node});
|
|
my @unexpected;
|
|
foreach (@correlated) {
|
|
my $vinfo = vinfo_resolved($_, $droot); # returns undef if not below $droot
|
|
if(exists($_->{BTRBK_RAW})) {
|
|
TRACE "get_receive_targets: found raw receive target: " . _fs_path($_) if($do_trace);
|
|
}
|
|
elsif($vinfo && ($vinfo->{SUBVOL_PATH} eq $src_vol->{NAME})) { # direct leaf, (SUBVOL_DIR = "", matching NAME)
|
|
TRACE "get_receive_targets: found receive target (exact-match): $vinfo->{PRINT}" if($do_trace);
|
|
}
|
|
elsif($vinfo && (not $opts{exact})) {
|
|
TRACE "get_receive_targets: found receive target (non-exact-match): $vinfo->{PRINT}" if($do_trace);
|
|
}
|
|
else {
|
|
TRACE "get_receive_targets: skip unexpected match: " . _fs_path($_) if($do_trace);
|
|
push @unexpected, { src_vol => $src_vol, target_node => $_ };
|
|
if($opts{warn} && config_key($droot, "warn_unknown_targets")) {
|
|
WARN "Receive target of \"$src_vol->{PRINT}\" already exists at a foreign location: " . ($vinfo ? $vinfo->{PRINT} : _fs_path($_));
|
|
}
|
|
next;
|
|
}
|
|
push(@ret, $vinfo);
|
|
}
|
|
push(@{$opts{ret_unexpected_only}}, @unexpected) if($opts{ret_unexpected_only} && scalar(@unexpected) && !scalar(@ret));
|
|
return @ret;
|
|
}
|
|
|
|
|
|
# returns best correlated receive target within droot (independent of btrbk name)
|
|
sub get_best_correlated($$;@)
|
|
{
|
|
my $droot = shift || die;
|
|
my $src_vol = shift || die;
|
|
my %opts = @_;
|
|
my $inaccessible_nodes = $opts{push_inaccessible_nodes};
|
|
|
|
my @correlated = _correlated_nodes($droot->{node}, $src_vol->{node}); # all matching src_vol, from droot->TREE_ROOT
|
|
foreach (@correlated) {
|
|
my $vinfo = vinfo_resolved($_, $droot); # $vinfo is within $droot
|
|
return [ $src_vol, $vinfo ] if($vinfo);
|
|
}
|
|
if($opts{fallback_all_mountpoints}) {
|
|
foreach (@correlated) {
|
|
my $vinfo = vinfo_resolved_all_mountpoints($_, $droot); # $vinfo is within any mountpoint of filesystem at $droot
|
|
return [ $src_vol, $vinfo ] if($vinfo);
|
|
}
|
|
}
|
|
push @$inaccessible_nodes, @correlated if($inaccessible_nodes);
|
|
return undef;
|
|
}
|
|
|
|
|
|
# returns all related readonly nodes (by parent_uuid relationship), unsorted.
|
|
sub _related_nodes($;@)
|
|
{
|
|
my $snode = shift // die;
|
|
my %opts = @_;
|
|
TRACE "related_nodes: resolving related subvolumes of: " . _fs_path($snode) if($do_trace);
|
|
|
|
# iterate parent chain
|
|
my @related_nodes;
|
|
my $uuid_hash = $snode->{TREE_ROOT}{UUID_HASH};
|
|
my $parent_uuid_hash = $snode->{TREE_ROOT}{PARENT_UUID_HASH};
|
|
my $node = $snode;
|
|
my $uuid = $node->{uuid};
|
|
my $abort_distance = 4096;
|
|
|
|
# climb up parent chain
|
|
my $distance = 0; # parent distance
|
|
while(($distance < $abort_distance) && defined($node) && ($node->{parent_uuid} ne "-")) {
|
|
$uuid = $node->{parent_uuid};
|
|
$node = $uuid_hash->{$uuid};
|
|
TRACE "related_nodes: d=$distance uuid=$uuid : parent: " . ($node ? _fs_path($node) : "<deleted>") if($do_trace);
|
|
$distance++;
|
|
}
|
|
if($distance >= $abort_distance) {
|
|
my $logmsg = "Parent UUID chain exceeds depth=$abort_distance" .
|
|
($opts{fatal} ? " for: " : ", ignoring related parents of uuid=$uuid for: ") . _fs_path($snode);
|
|
DEBUG $logmsg;
|
|
WARN_ONCE $logmsg unless($opts{nowarn});
|
|
return undef if($opts{fatal});
|
|
}
|
|
TRACE "related_nodes: d=$distance uuid=$uuid : top of parent chain" if($do_trace);
|
|
|
|
# push related children (even if parent node is missing -> siblings)
|
|
my @nn;
|
|
$abort_distance = $abort_distance;
|
|
$distance = $distance * (-1); # child distance (from top parent)
|
|
while($uuid) {
|
|
push @related_nodes, $node if($node && (!$opts{readonly} || $node->{readonly}));
|
|
my $children = $parent_uuid_hash->{$uuid};
|
|
if($children) {
|
|
if($distance >= $abort_distance) {
|
|
my $logmsg = "Parent/child relations exceed depth=$abort_distance" .
|
|
($opts{fatal} ? " for: " : ", ignoring related children of uuid=$uuid for: ") . _fs_path($snode);
|
|
DEBUG $logmsg;
|
|
WARN_ONCE $logmsg unless($opts{nowarn});
|
|
return undef if($opts{fatal});
|
|
} else {
|
|
push @nn, { MARK_UUID => $uuid, MARK_DISTANCE => ($distance + 1) }, @$children;
|
|
}
|
|
}
|
|
|
|
if($do_trace) {
|
|
if($node) {
|
|
if($node->{readonly}) {
|
|
TRACE "related_nodes: d=$distance uuid=$uuid : push related readonly: " . _fs_path($node);
|
|
} else {
|
|
TRACE "related_nodes: d=$distance uuid=$uuid : " . ($opts{readonly} ? "" : "push ") . "related not readonly: " . _fs_path($node);
|
|
}
|
|
} else {
|
|
TRACE "related_nodes: d=$distance uuid=$uuid : related missing: <deleted>";
|
|
}
|
|
if($children && ($distance < $abort_distance)) {
|
|
TRACE "related_nodes: d=$distance uuid=$uuid : postpone " . scalar(@$children) . " children";
|
|
}
|
|
}
|
|
|
|
$node = shift @nn;
|
|
if(exists($node->{MARK_DISTANCE})) {
|
|
# marker reached, restore distance
|
|
$distance = $node->{MARK_DISTANCE};
|
|
TRACE "related_nodes: d=$distance uuid=$node->{MARK_UUID} : processing children" if($do_trace);
|
|
$node = shift @nn;
|
|
}
|
|
$uuid = $node->{uuid};
|
|
}
|
|
|
|
if($opts{omit_self}) {
|
|
my $snode_id = $snode->{id};
|
|
my @filtered = grep { $_->{id} != $snode_id } @related_nodes;
|
|
TRACE "related_nodes: found total=" . scalar(@filtered) . " related readonly subvolumes" if($do_trace);
|
|
return \@filtered;
|
|
}
|
|
TRACE "related_nodes: found total=" . scalar(@related_nodes) . " related readonly subvolumes (including self)" if($do_trace);
|
|
return \@related_nodes;
|
|
}
|
|
|
|
|
|
sub get_btrbk_snapshot_siblings($;@)
|
|
{
|
|
my $sroot = shift || die;
|
|
my %opts = @_;
|
|
my $readonly = $opts{readonly};
|
|
my $tag;
|
|
my $subtree;
|
|
if(my $rvol = $opts{refvol}) {
|
|
$tag = $rvol->{node}{BTRBK_TAG};
|
|
$subtree = $rvol->{node}{TOP_LEVEL}{SUBTREE};
|
|
TRACE "Creating snapshot siblings list for: $rvol->{PRINT}" if($do_trace);
|
|
} elsif($opts{name}) {
|
|
my $family = $opts{family} // "btrbk";
|
|
$tag = join(":", $opts{name}, $family, ($sroot->{NODE_SUBDIR} // ""));
|
|
$subtree = $sroot->{node}{SUBTREE};
|
|
TRACE "Creating snapshot siblings list for: $sroot->{PRINT}/$opts{name}.*" if($do_trace);
|
|
} else {
|
|
die;
|
|
}
|
|
return [] unless defined($tag);
|
|
my @ret = map { vinfo_resolved($_, $sroot) // die _fs_path($_) . " is not in $sroot->{PRINT}"
|
|
} grep {
|
|
defined($_->{BTRBK_TAG}) && ($_->{BTRBK_TAG} eq $tag) &&
|
|
(!$readonly || $_->{readonly})
|
|
} @$subtree;
|
|
TRACE "Found " . scalar(@ret) . " btrbk snapshot siblings for tag: $tag" if($do_trace);
|
|
if($opts{sort} && ($opts{sort} eq "desc")) {
|
|
@ret = sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } @ret;
|
|
} elsif($opts{sort}) {
|
|
@ret = sort { cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) } @ret;
|
|
}
|
|
return \@ret;
|
|
}
|
|
|
|
|
|
# returns parent, along with clone sources
|
|
sub get_best_parent($$;@)
|
|
{
|
|
my $svol = shift // die;
|
|
my $droot = shift || die;
|
|
my %opts = @_;
|
|
my $snaproot = $opts{snaproot};
|
|
my $ret_clone_src = $opts{clone_src};
|
|
my $ret_target_parent_node = $opts{target_parent_node};
|
|
my $strict_related = $opts{strict_related};
|
|
|
|
TRACE "get_best_parent: resolving best common parent for subvolume: $svol->{PRINT} (droot=$droot->{PRINT})" if($do_trace);
|
|
|
|
# honor incremental_resolve option
|
|
my $source_incremental_resolve = config_key($svol, "incremental_resolve");
|
|
my $target_incremental_resolve = config_key($droot, "incremental_resolve");
|
|
my $resolve_sroot = $snaproot ? (($source_incremental_resolve eq "mountpoint") ? $snaproot->{VINFO_MOUNTPOINT} : $snaproot) : $svol->{VINFO_MOUNTPOINT};
|
|
my $resolve_droot = ($source_incremental_resolve eq "mountpoint") ? $droot->{VINFO_MOUNTPOINT} : $droot;
|
|
|
|
# NOTE: Using parents from different mount points does NOT work, see
|
|
# <https://github.com/kdave/btrfs-progs/issues/96>.
|
|
# btrfs-progs-4.20.2 fails if the parent subvolume is not on same
|
|
# mountpoint as the source subvolume:
|
|
# - btrfs send -p: "ERROR: not on mount point: /path/to/mountpoint"
|
|
# - btrfs receive: "ERROR: parent subvol is not reachable from inside the root subvol"
|
|
#
|
|
# Note that specifying clones from outside the mount point would work for btrfs send,
|
|
# but btrfs receive fails with same error as above (tested with v5.13).
|
|
my $source_fallback_all_mountpoints = ($source_incremental_resolve eq "_all_accessible");
|
|
my $target_fallback_all_mountpoints = ($target_incremental_resolve eq "_all_accessible");
|
|
|
|
my @inaccessible_nodes;
|
|
my %gbc_opts = ( push_inaccessible_nodes => \@inaccessible_nodes,
|
|
fallback_all_mountpoints => $target_fallback_all_mountpoints,
|
|
);
|
|
|
|
# resolve correlated subvolumes by parent_uuid relationship.
|
|
# no warnings on aborted search (due to deep relations).
|
|
my %c_rel_id; # map id to c_related
|
|
my @c_related; # candidates for parent (correlated + related), unsorted
|
|
foreach (@{_related_nodes($svol->{node}, readonly => 1, omit_self => 1, nowarn => 1)}) {
|
|
my $vinfo = vinfo_resolved($_, $resolve_sroot);
|
|
if((not $vinfo) && $source_fallback_all_mountpoints) { # related node is not under $resolve_sroot
|
|
$vinfo = vinfo_resolved_all_mountpoints($_, $svol);
|
|
}
|
|
if($vinfo) {
|
|
my $correlated = get_best_correlated($resolve_droot, $vinfo, %gbc_opts);
|
|
push @c_related, $correlated if($correlated);
|
|
$c_rel_id{$_->{id}} = $correlated;
|
|
} else {
|
|
DEBUG "Related subvolume is not accessible within $source_incremental_resolve \"$resolve_sroot->{PRINT}\": " . _fs_path($_);
|
|
}
|
|
}
|
|
# sort by cgen
|
|
my $cgen_ref = $svol->{node}{readonly} ? $svol->{node}{cgen} : $svol->{node}{gen};
|
|
|
|
my %c_map; # map correlated candidates to incremental_prefs strategy
|
|
|
|
# all_related: by parent_uuid relationship, ordered by cgen
|
|
$c_map{aro} = [ sort { ($cgen_ref - $a->[0]{node}{cgen}) <=> ($cgen_ref - $b->[0]{node}{cgen}) }
|
|
grep { $_->[0]{node}{cgen} <= $cgen_ref } @c_related ];
|
|
$c_map{arn} = [ sort { ($a->[0]{node}{cgen} - $cgen_ref) <=> ($b->[0]{node}{cgen} - $cgen_ref) }
|
|
grep { $_->[0]{node}{cgen} > $cgen_ref } @c_related ];
|
|
|
|
# NOTE: While _related_nodes() returns deep parent_uuid
|
|
# relations, there is always a chance that these relations get
|
|
# broken.
|
|
#
|
|
# Consider parent_uuid chain ($svol readonly)
|
|
# B->A, C->B, delete B: C has no relation to A.
|
|
# This is especially true for backups and archives (btrfs receive)
|
|
#
|
|
# For snapshots (here: S=$svol readwrite) the scenario is different:
|
|
# A->S, B->S, C->S, delete B: A still has a relation to C.
|
|
#
|
|
# resolve correlated subvolumes in same directory matching btrbk file name scheme
|
|
if(defined($svol->{node}{BTRBK_TAG})) {
|
|
my $snapshot_siblings = get_btrbk_snapshot_siblings($resolve_sroot, refvol => $svol, readonly => 1);
|
|
my @sbdl_older = sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) }
|
|
grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) < 0 } @$snapshot_siblings;
|
|
my @sbdl_newer = sort { cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
|
|
grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) > 0 } @$snapshot_siblings;
|
|
|
|
# snapdir_all: btrbk snapshot siblings, ordered by btrbk timestamp
|
|
$c_map{sao} = [ map { $c_rel_id{$_->{node}{id}} // get_best_correlated($resolve_droot, $_, %gbc_opts) // () } @sbdl_older ];
|
|
$c_map{san} = [ map { $c_rel_id{$_->{node}{id}} // get_best_correlated($resolve_droot, $_, %gbc_opts) // () } @sbdl_newer ];
|
|
|
|
# snapdir_related: btrbk snapshot siblings, with parent_uuid relationship, ordered by btrbk timestamp
|
|
$c_map{sro} = [ map { $c_rel_id{$_->{node}{id}} // () } @sbdl_older ];
|
|
$c_map{srn} = [ map { $c_rel_id{$_->{node}{id}} // () } @sbdl_newer ];
|
|
}
|
|
|
|
if(scalar @inaccessible_nodes) { # populated by get_best_correlated()
|
|
WARN "Best common parent for \"$svol->{PRINT}\" is not accessible within target $target_incremental_resolve \"$resolve_droot->{PRINT}\", ignoring: " . join(", ", map('"' . _fs_path($_) . '"',@inaccessible_nodes));
|
|
}
|
|
|
|
# resolve parent (and required clone sources) according to incremental_prefs
|
|
|
|
if($do_trace) {
|
|
TRACE "get_best_parent: related reference cgen=$svol->{node}{cgen}";
|
|
foreach my $search (@incremental_prefs_avail) {
|
|
TRACE map("get_best_parent: ${search}: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen}) $_->[1]{PRINT}", @{$c_map{$search}});
|
|
}
|
|
}
|
|
|
|
my @parent;
|
|
my @isk = map { $_ eq "defaults" ? @incremental_prefs_default : $_ } @{config_key($svol, "incremental_prefs")};
|
|
foreach(@isk) {
|
|
TRACE "processing incremental_prefs: $_";
|
|
my ($k, $n) = split /:/;
|
|
my $c_list = $c_map{$k} // next;
|
|
for(1 .. ($n // @$c_list)) {
|
|
my $cc = shift @$c_list // last;
|
|
next if(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent);
|
|
DEBUG "Resolved " . (@parent ? "clone source" : "parent") . " (" .
|
|
"next closest " . ($k =~ /n/ ? "newer" : "older") .
|
|
" by " . ($k =~ /s/ ? "timestamp in snapdir" : "cgen") .
|
|
", " . ($k =~ /r/ ? "with" : "regardless of") . " parent_uuid relationship" .
|
|
"): $cc->[0]{PRINT}" if($loglevel >= 3);
|
|
push @parent, $cc;
|
|
}
|
|
}
|
|
|
|
# assemble results
|
|
unless(scalar @parent) {
|
|
DEBUG("No suitable common parents of \"$svol->{PRINT}\" found in src=\"$resolve_sroot->{PRINT}/\", target=\"$resolve_droot->{PRINT}/\"");
|
|
return undef;
|
|
}
|
|
|
|
if($strict_related && (!grep(exists($c_rel_id{$_->[0]{node}{id}}), @parent))) {
|
|
# no relations by parent_uuid found
|
|
WARN "No related common parent found (by parent_uuid relationship) for: $svol->{PRINT}",
|
|
"Hint: setting option \"incremental\" to \"yes\" (instead of \"strict\") will use parent: " . join(", ", map $_->[0]{PRINT}, @parent);
|
|
return undef;
|
|
}
|
|
|
|
my $ret_parent = shift @parent;
|
|
$$ret_clone_src = [ map $_->[0], @parent ] if($ret_clone_src);
|
|
$$ret_target_parent_node = $ret_parent->[1]{node} if($ret_target_parent_node);
|
|
return $ret_parent->[0];
|
|
}
|
|
|
|
|
|
sub get_latest_related_snapshot($$;$)
|
|
{
|
|
my $sroot = shift || die;
|
|
my $svol = shift // die;
|
|
my $btrbk_basename = shift;
|
|
my $latest = undef;
|
|
my $gen = -1;
|
|
foreach (get_related_snapshots($sroot, $svol, $btrbk_basename)) {
|
|
if($_->{node}{cgen} > $gen) {
|
|
$latest = $_;
|
|
$gen = $_->{node}{cgen};
|
|
}
|
|
}
|
|
if($latest) {
|
|
DEBUG "Latest snapshot child for \"$svol->{PRINT}#$svol->{node}{gen}\" is: $latest->{PRINT}#$latest->{node}{cgen}";
|
|
} else {
|
|
DEBUG "No latest snapshots found for: $svol->{PRINT}";
|
|
}
|
|
return $latest;
|
|
}
|
|
|
|
|
|
sub check_file($$;@)
|
|
{
|
|
my $file = shift // die;
|
|
my $accept = shift || die;
|
|
my %opts = @_;
|
|
my $sanitize = $opts{sanitize};
|
|
my $error_statement = $opts{error_statement}; # if not defined, no error messages are printed
|
|
|
|
if($accept->{absolute} && $accept->{relative}) {
|
|
# accepted, matches either absolute or relative
|
|
}
|
|
elsif($accept->{absolute}) {
|
|
unless($file =~ /^\//) {
|
|
ERROR "Only absolute files allowed $error_statement" if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
}
|
|
elsif($accept->{relative}) {
|
|
if($file =~ /^\//) {
|
|
ERROR "Only relative files allowed $error_statement" if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
}
|
|
elsif($accept->{name_only}) {
|
|
if($file =~ /\//) {
|
|
ERROR "Invalid file name ${error_statement}: $file" if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
}
|
|
elsif(not $accept->{wildcards}) {
|
|
die("accept_type must contain either 'relative' or 'absolute'");
|
|
}
|
|
|
|
if($file =~ /\n/) {
|
|
ERROR "Unsupported newline in file ${error_statement}: " . ($file =~ s/\n/\\n/gr) if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
if(($file =~ /^\.\.$/) || ($file =~ /^\.\.\//) || ($file =~ /\/\.\.\//) || ($file =~ /\/\.\.$/)) {
|
|
ERROR "Illegal directory traversal ${error_statement}: $file" if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
if($sanitize) {
|
|
$file =~ s/^\s+//;
|
|
$file =~ s/\s+$//;
|
|
$file =~ s/\/(\.?\/)+/\//g; # sanitize "//", "/./" -> "/"
|
|
$file =~ s/\/\.$/\//; # sanitize trailing "/." -> "/"
|
|
$file =~ s/\/$// unless($file eq '/'); # remove trailing slash
|
|
}
|
|
elsif(($file =~ /^\s/) || ($file =~ /\s$/)) {
|
|
ERROR "Illegal leading/trailing whitespace ${error_statement}: \"$file\"" if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
if($safe_commands && $file !~ /^$safe_file_match$/) {
|
|
ERROR "Invalid file name (restricted by \"safe_commands\" option) ${error_statement}: \"$file\"" if(defined($error_statement));
|
|
return undef;
|
|
}
|
|
|
|
return $file;
|
|
}
|
|
|
|
|
|
sub check_url($;@)
|
|
{
|
|
my $url = shift // die;
|
|
my %opts = @_;
|
|
my $url_prefix = "";
|
|
|
|
if($url =~ /^ssh:\/\//) {
|
|
if($url =~ s/^(ssh:\/\/($host_name_match|$ipv4_addr_match|\[$ipv6_addr_match\])(:[1-9][0-9]*)?)\//\//) {
|
|
$url_prefix = $1;
|
|
}
|
|
}
|
|
elsif($url =~ s/^($host_name_match|$ipv4_addr_match|\[$ipv6_addr_match\]):\//\//) {
|
|
# convert "my.host.com:/my/path", "[2001:db8::7]:/my/path" to ssh url
|
|
$url_prefix = "ssh://" . $1;
|
|
}
|
|
# if no url prefix match, treat it as file and let check_file() print errors
|
|
|
|
return ( $url_prefix, check_file($url, { absolute => 1, wildcards => $opts{accept_wildcards} }, sanitize => 1, %opts) );
|
|
}
|
|
|
|
|
|
sub config_key($$;$)
|
|
{
|
|
my $config = shift || die;
|
|
my $key = shift || die;
|
|
my $match = shift;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
|
|
my $val;
|
|
if(exists($config_override{$key})) {
|
|
TRACE "config_key: OVERRIDE key=$key to value=" . ($config_override{$key} // "<undef>") if($do_trace);
|
|
$val = $config_override{$key};
|
|
}
|
|
else {
|
|
while(not exists($config->{$key})) {
|
|
# note: while all config keys exist in "meta" context (at least with default values),
|
|
# we also allow fake configs (CONTEXT="cmdline") which have no PARENT.
|
|
return undef unless($config->{PARENT});
|
|
$config = $config->{PARENT};
|
|
}
|
|
$val = $config->{$key};
|
|
}
|
|
return undef unless defined($val);
|
|
return $val unless defined($match);
|
|
|
|
if(ref($val) eq "ARRAY") {
|
|
return grep(/^$match$/, @$val) ? $match : undef;
|
|
} else {
|
|
return ($val eq $match) ? $match : undef;
|
|
}
|
|
}
|
|
|
|
|
|
sub config_key_lru($$;$) {
|
|
my $vinfo = shift || die;
|
|
my $key = shift || die;
|
|
my $match = shift;
|
|
my $retval;
|
|
if(defined($vinfo->{HOST})) {
|
|
$retval //= config_key($vinfo, $key . "_remote", $match);
|
|
} else {
|
|
$retval //= config_key($vinfo, $key . "_local_user", $match) if($>); # $EUID, $EFFECTIVE_USER_ID
|
|
$retval //= config_key($vinfo, $key . "_local", $match);
|
|
}
|
|
$retval //= config_key($vinfo, $key, $match);
|
|
return $retval;
|
|
}
|
|
|
|
|
|
sub config_preserve_hash($$;@)
|
|
{
|
|
my $config = shift || die;
|
|
my $prefix = shift || die;
|
|
my %opts = @_;
|
|
if($opts{wipe}) {
|
|
return { hod => 0, dow => 'sunday', min => 'latest', min_q => 'latest' };
|
|
}
|
|
my $preserve = config_key($config, $prefix . "_preserve") // {};
|
|
my %ret = ( %$preserve, # make a copy (don't pollute config)
|
|
hod => config_key($config, "preserve_hour_of_day"),
|
|
dow => config_key($config, "preserve_day_of_week")
|
|
);
|
|
my $preserve_min = config_key($config, $prefix . "_preserve_min");
|
|
if(defined($preserve_min)) {
|
|
$ret{min} = $preserve_min; # used for raw schedule output
|
|
if(($preserve_min eq 'all') || ($preserve_min eq 'latest')) {
|
|
$ret{min_q} = $preserve_min;
|
|
}
|
|
elsif($preserve_min =~ /^([0-9]+)([hdwmy])$/) {
|
|
$ret{min_n} = $1;
|
|
$ret{min_q} = $2;
|
|
}
|
|
else { die; }
|
|
}
|
|
return \%ret;
|
|
}
|
|
|
|
|
|
sub config_compress_hash($$)
|
|
{
|
|
my $config = shift || die;
|
|
my $config_key = shift || die;
|
|
my $compress_key = config_key($config, $config_key);
|
|
return undef unless($compress_key);
|
|
return {
|
|
key => $compress_key,
|
|
level => config_key($config, $config_key . "_level"),
|
|
long => config_key($config, $config_key . "_long"),
|
|
threads => config_key($config, $config_key . "_threads"),
|
|
adapt => config_key($config, $config_key . "_adapt"),
|
|
};
|
|
}
|
|
|
|
|
|
sub config_stream_hash($$)
|
|
{
|
|
my $source = shift || die;
|
|
my $target = shift || die;
|
|
return {
|
|
stream_compress => config_compress_hash($target, "stream_compress"),
|
|
|
|
# for remote source, limits read rate of ssh stream output after decompress
|
|
# for remote target, limits read rate of "btrfs send"
|
|
# for both local, limits read rate of "btrfs send"
|
|
# for raw targets, limits read rate of "btrfs send | xz" (raw_target_compress)
|
|
local_sink => {
|
|
stream_buffer => config_key($target, "stream_buffer"),
|
|
rate_limit => config_key($target, "rate_limit"),
|
|
show_progress => $show_progress,
|
|
},
|
|
|
|
# limits read rate of "btrfs send"
|
|
rsh_source => { # limit read rate after "btrfs send", before compression
|
|
stream_buffer => config_key($source, "stream_buffer_remote"),
|
|
rate_limit => config_key($source, "rate_limit_remote"),
|
|
#rate_limit_out => config_key($source, "rate_limit_remote"), # limit write rate
|
|
},
|
|
|
|
# limits read rate of ssh stream output
|
|
rsh_sink => {
|
|
stream_buffer => config_key($target, "stream_buffer_remote"),
|
|
rate_limit => config_key($target, "rate_limit_remote"),
|
|
#rate_limit_in => config_key($target, "rate_limit_remote"),
|
|
},
|
|
};
|
|
}
|
|
|
|
|
|
sub config_encrypt_hash($$)
|
|
{
|
|
my $config = shift || die;
|
|
my $config_key = shift || die;
|
|
my $encrypt_type = config_key($config, $config_key);
|
|
return undef unless($encrypt_type);
|
|
return {
|
|
type => $encrypt_type,
|
|
keyring => config_key($config, "gpg_keyring"),
|
|
recipient => config_key($config, "gpg_recipient"),
|
|
iv_size => config_key($config, "openssl_iv_size"),
|
|
ciphername => config_key($config, "openssl_ciphername"),
|
|
keyfile => config_key($config, "openssl_keyfile"),
|
|
kdf_keygen_each => (config_key($config, "kdf_keygen") eq "each"),
|
|
kdf_backend => config_key($config, "kdf_backend"),
|
|
kdf_keysize => config_key($config, "kdf_keysize"),
|
|
};
|
|
}
|
|
|
|
|
|
sub config_dump_keys($;@)
|
|
{
|
|
my $config = shift || die;
|
|
my %opts = @_;
|
|
my @ret;
|
|
my $maxlen = 0;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
|
|
foreach my $key (sort keys %config_options)
|
|
{
|
|
my $val;
|
|
next if($config_options{$key}->{deprecated}{DEFAULT});
|
|
next unless($opts{all} || exists($config->{$key}) || exists($config_override{$key}));
|
|
next if($config_options{$key}{context} && !grep(/^$config->{CONTEXT}$/, @{$config_options{$key}{context}}));
|
|
$val = config_key($config, $key);
|
|
|
|
my @va = (ref($val) eq "ARRAY") ? ($config_options{$key}->{split} ? join(" ", @$val) : @$val) : $val;
|
|
foreach(@va) {
|
|
if(defined($_)) {
|
|
if($config_options{$key}->{accept_preserve_matrix}) {
|
|
$_ = format_preserve_matrix($_, format => "config");
|
|
}
|
|
}
|
|
$_ //= grep(/^no$/, @{$config_options{$key}{accept} // []}) ? "no" : "<unset>";
|
|
my $comment = $_ eq "<unset>" ? "# " : "";
|
|
my $len = length($key);
|
|
$maxlen = $len if($len > $maxlen);
|
|
push @ret, { comment => $comment, key => $key, val => $_, len => $len };
|
|
}
|
|
}
|
|
return map { ($opts{prefix} // "") . $_->{comment} . $_->{key} . (' ' x (1 + $maxlen - $_->{len})) . ' ' . $_->{val} } @ret;
|
|
}
|
|
|
|
|
|
sub append_config_option($$$$;@)
|
|
{
|
|
my $config = shift;
|
|
my $key = shift;
|
|
my $value = shift;
|
|
my $context = shift;
|
|
my %opts = @_;
|
|
my $error_statement = $opts{error_statement} // "";
|
|
|
|
my $opt = $config_options{$key};
|
|
|
|
# accept only keys listed in %config_options
|
|
unless($opt) {
|
|
ERROR "Unknown option \"$key\" $error_statement";
|
|
return undef;
|
|
}
|
|
|
|
if($opt->{context} && !grep(/^$context$/, @{$opt->{context}}) && ($context ne "OVERRIDE")) {
|
|
ERROR "Option \"$key\" is only allowed in " . join(" or ", @{$opt->{context}}) . " context $error_statement";
|
|
return undef;
|
|
}
|
|
|
|
if($opt->{deny_glob_context} && $config->{GLOB_CONTEXT}) {
|
|
ERROR "Option \"$key\" is not allowed on section with wildcards $error_statement";
|
|
return undef;
|
|
}
|
|
|
|
my $ovalue = $value;
|
|
|
|
if($value eq "") {
|
|
$value = "yes";
|
|
TRACE "option \"$key\" has no value, setting to \"yes\"" if($do_trace);
|
|
}
|
|
|
|
if($opt->{split}) {
|
|
$value = [ split($config_split_match, $value) ];
|
|
}
|
|
|
|
my $accepted;
|
|
if($opt->{accept}) {
|
|
$accepted = 1;
|
|
foreach my $val (ref($value) ? @$value : $value) {
|
|
$accepted = 0, last unless(grep { $val =~ /^$_$/ } @{$opt->{accept}});
|
|
TRACE "option \"$key=$val\" found in accept list" if($do_trace);
|
|
}
|
|
}
|
|
if(!$accepted && $opt->{accept_file}) {
|
|
# be very strict about file options, for security sake
|
|
$value = check_file($value, $opt->{accept_file}, sanitize => 1, error_statement => ($error_statement ? "for option \"$key\" $error_statement" : undef));
|
|
return undef unless(defined($value));
|
|
|
|
TRACE "option \"$key=$value\" is a valid file, accepted" if($do_trace);
|
|
$value = "no" if($value eq "."); # maps to undef later
|
|
$accepted = 1;
|
|
}
|
|
if(!$accepted && $opt->{accept_preserve_matrix}) {
|
|
my %preserve;
|
|
my $s = ' ' . $value;
|
|
while($s =~ s/\s+(\*|[0-9]+)([hdwmyHDWMY])//) {
|
|
my $n = $1;
|
|
my $q = lc($2); # qw( h d w m y )
|
|
$n = 'all' if($n eq '*');
|
|
if(exists($preserve{$q})) {
|
|
ERROR "Value \"$value\" failed input validation for option \"$key\": multiple definitions of '$q' $error_statement";
|
|
return undef;
|
|
}
|
|
$preserve{$q} = $n;
|
|
}
|
|
unless($s eq "") {
|
|
ERROR "Value \"$value\" failed input validation for option \"$key\" $error_statement";
|
|
return undef;
|
|
}
|
|
TRACE "adding preserve matrix $context context:" . Data::Dumper->new([\%preserve], [ $key ])->Indent(0)->Pad(' ')->Quotekeys(0)->Pair('=>')->Dump() if($do_trace && $do_dumper);
|
|
$config->{$key} = \%preserve;
|
|
return $config;
|
|
}
|
|
if(!$accepted) {
|
|
if($ovalue eq "") {
|
|
ERROR "Unsupported empty value for option \"$key\" $error_statement";
|
|
} else {
|
|
ERROR "Unsupported value \"$ovalue\" for option \"$key\" $error_statement";
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
if($opt->{require_bin} && (not check_exe($opt->{require_bin}))) {
|
|
WARN "Found option \"$key\", but required executable \"$opt->{require_bin}\" does not exist on your system. Please install \"$opt->{require_bin}\".";
|
|
WARN "Ignoring option \"$key\" $error_statement";
|
|
$value = "no";
|
|
}
|
|
|
|
if($opt->{deprecated}) {
|
|
my $dh = $opt->{deprecated}{$value} // $opt->{deprecated}{DEFAULT} // {};
|
|
$dh = $opt->{deprecated}{MATCH} if($opt->{deprecated}{MATCH} && ($value =~ $opt->{deprecated}{MATCH}{regex}));
|
|
if($dh->{ABORT}) {
|
|
ERROR "Deprecated (incompatible) option \"$key\" found $error_statement, refusing to continue", $dh->{warn};
|
|
return undef;
|
|
}
|
|
my @wmsg;
|
|
push @wmsg, "Found deprecated option \"$key $value\" $error_statement", $dh->{warn} if($dh->{warn});
|
|
if(defined($dh->{replace_key})) {
|
|
$key = $dh->{replace_key};
|
|
$value = $dh->{replace_value};
|
|
push @wmsg, "Using \"$key $value\"";
|
|
}
|
|
WARN @wmsg if(@wmsg);
|
|
if($dh->{FAILSAFE_PRESERVE}) {
|
|
unless($config_override{FAILSAFE_PRESERVE}) { # warn only once
|
|
WARN "Entering failsafe mode:";
|
|
WARN " - preserving ALL snapshots for ALL subvolumes";
|
|
WARN " - ignoring ALL targets (skipping backup creation)";
|
|
WARN " - please read \"doc/upgrade_to_v0.23.0.md\"";
|
|
$config_override{FAILSAFE_PRESERVE} = "Failsafe mode active (deprecated configuration)";
|
|
}
|
|
$config_override{snapshot_preserve_min} = 'all';
|
|
return $config;
|
|
}
|
|
}
|
|
|
|
if($opt->{allow_multiple}) {
|
|
my $aref = $config->{$key} // [];
|
|
my @val = ref($value) ? @$value : $value;
|
|
push(@$aref, @val);
|
|
TRACE "pushing option \"$key=[" . join(",", @val) . "]\" to $aref=[" . join(',', @$aref) . "]" if($do_trace);
|
|
$value = $aref;
|
|
}
|
|
elsif(exists($config->{$key})) {
|
|
unless($opt->{c_default}) { # note: computed defaults are already present
|
|
WARN "Option \"$key\" redefined $error_statement";
|
|
}
|
|
}
|
|
|
|
TRACE "adding option \"$key=$value\" to $context context" if($do_trace);
|
|
$value = undef if($value eq "no"); # we don't want to check for "no" all the time
|
|
$config->{$key} = $value;
|
|
return $config;
|
|
}
|
|
|
|
|
|
sub parse_config_line($$$;@)
|
|
{
|
|
my ($cur, $key, $value, %opts) = @_;
|
|
my $root = $cur;
|
|
$root = $root->{PARENT} while($root->{CONTEXT} ne "global");
|
|
my $error_statement = $opts{error_statement} // "";
|
|
|
|
if($key eq "volume")
|
|
{
|
|
$value =~ s/^"(.*)"$/$1/;
|
|
$value =~ s/^'(.*)'$/$1/;
|
|
|
|
$cur = $root;
|
|
TRACE "config: context forced to: $cur->{CONTEXT}" if($do_trace);
|
|
|
|
# be very strict about file options, for security sake
|
|
my ($url_prefix, $path) = check_url($value, error_statement => "for option \"$key\" $error_statement");
|
|
return undef unless(defined($path));
|
|
TRACE "config: adding volume \"$url_prefix$path\" to global context" if($do_trace);
|
|
die unless($cur->{CONTEXT} eq "global");
|
|
my $volume = { CONTEXT => "volume",
|
|
PARENT => $cur,
|
|
SUBSECTION => [],
|
|
url => $url_prefix . $path,
|
|
};
|
|
push(@{$cur->{SUBSECTION}}, $volume);
|
|
$cur = $volume;
|
|
}
|
|
elsif($key eq "subvolume")
|
|
{
|
|
$value =~ s/^"(.*)"$/$1/;
|
|
$value =~ s/^'(.*)'$/$1/;
|
|
|
|
while($cur->{CONTEXT} ne "volume") {
|
|
if($cur->{CONTEXT} eq "global") {
|
|
TRACE "config: adding dummy volume context" if($do_trace);
|
|
my $volume = { CONTEXT => "volume",
|
|
PARENT => $cur,
|
|
SUBSECTION => [],
|
|
DUMMY => 1,
|
|
url => "/dev/null",
|
|
};
|
|
push(@{$cur->{SUBSECTION}}, $volume);
|
|
$cur = $volume;
|
|
last;
|
|
}
|
|
$cur = $cur->{PARENT} || die;
|
|
TRACE "config: context changed to: $cur->{CONTEXT}" if($do_trace);
|
|
}
|
|
# be very strict about file options, for security sake
|
|
my $url;
|
|
if(!$cur->{DUMMY} && (my $rel_path = check_file($value, { relative => 1, wildcards => 1 }, sanitize => 1))) {
|
|
$url = ($rel_path eq '.') ? $cur->{url} : $cur->{url} . '/' . $rel_path;
|
|
}
|
|
else {
|
|
my ($url_prefix, $path) = check_url($value, accept_wildcards => 1, error_statement => "for option \"$key\"" . ($cur->{DUMMY} ? " (if no \"volume\" section is declared)" : "") . " $error_statement");
|
|
return undef unless(defined($path));
|
|
$url = $url_prefix . $path;
|
|
}
|
|
|
|
# snapshot_name defaults to subvolume name (or volume name if subvolume=".")
|
|
my $default_snapshot_name = $url;
|
|
$default_snapshot_name =~ s/^.*\///;
|
|
$default_snapshot_name = 'ROOT' if($default_snapshot_name eq ""); # if volume="/"
|
|
|
|
TRACE "config: adding subvolume \"$url\" to volume context: $cur->{url}" if($do_trace);
|
|
my $subvolume = { CONTEXT => "subvolume",
|
|
PARENT => $cur,
|
|
# SUBSECTION => [], # handled by target propagation
|
|
url => $url,
|
|
snapshot_name => $default_snapshot_name, # computed default (c_default)
|
|
};
|
|
$subvolume->{GLOB_CONTEXT} = 1 if($value =~ /\*/);
|
|
push(@{$cur->{SUBSECTION}}, $subvolume);
|
|
$cur = $subvolume;
|
|
}
|
|
elsif($key eq "target")
|
|
{
|
|
if($cur->{CONTEXT} eq "target") {
|
|
$cur = $cur->{PARENT} || die;
|
|
TRACE "config: context changed to: $cur->{CONTEXT}" if($do_trace);
|
|
}
|
|
|
|
# As of btrbk-0.28.0, target_type is optional and defaults to "send-receive"
|
|
my $target_type = $config_target_types[0];
|
|
$target_type = lc($1) if($value =~ s/^([a-zA-Z_-]+)\s+//);
|
|
unless(grep(/^\Q$target_type\E$/, @config_target_types)) {
|
|
ERROR "Unknown target type \"$target_type\" $error_statement";
|
|
return undef;
|
|
}
|
|
|
|
$value =~ s/^"(.*)"$/$1/;
|
|
$value =~ s/^'(.*)'$/$1/;
|
|
|
|
my ($url_prefix, $path) = check_url($value, error_statement => "for option \"$key\" $error_statement");
|
|
return undef unless(defined($path));
|
|
|
|
TRACE "config: adding target \"$url_prefix$path\" (type=$target_type) to $cur->{CONTEXT} context" . ($cur->{url} ? ": $cur->{url}" : "") if($do_trace);
|
|
my $target = { CONTEXT => "target",
|
|
PARENT => $cur,
|
|
target_type => $target_type,
|
|
url => $url_prefix . $path,
|
|
};
|
|
# NOTE: target sections are propagated to the apropriate SUBSECTION in _config_propagate_target()
|
|
$cur->{TARGET} //= [];
|
|
push(@{$cur->{TARGET}}, $target);
|
|
$cur = $target;
|
|
}
|
|
else
|
|
{
|
|
$value =~ s/^"(.*)"$/$1/;
|
|
$value =~ s/^'(.*)'$/$1/;
|
|
return append_config_option($cur, $key, $value, $cur->{CONTEXT}, error_statement => $error_statement);
|
|
}
|
|
|
|
return $cur;
|
|
}
|
|
|
|
|
|
sub _config_propagate_target
|
|
{
|
|
my $cur = shift;
|
|
foreach my $subsection (@{$cur->{SUBSECTION}}) {
|
|
my @propagate_target;
|
|
foreach my $target (@{$cur->{TARGET}}) {
|
|
TRACE "propagating target \"$target->{url}\" from $cur->{CONTEXT} context to: $subsection->{CONTEXT} $subsection->{url}" if($do_trace);
|
|
die if($target->{SUBSECTION});
|
|
|
|
# don't propagate if a target of same target_type and url already exists in subsection
|
|
if($subsection->{TARGET} &&
|
|
grep({ ($_->{url} eq $target->{url}) && ($_->{target_type} eq $target->{target_type}) } @{$subsection->{TARGET}}))
|
|
{
|
|
DEBUG "Skip propagation of \"target $target->{target_type} $target->{url}\" from $cur->{CONTEXT} context to \"$subsection->{CONTEXT} $subsection->{url}\": same target already exists";
|
|
next;
|
|
}
|
|
|
|
my %copy = ( %$target, PARENT => $subsection );
|
|
push @propagate_target, \%copy;
|
|
}
|
|
$subsection->{TARGET} //= [];
|
|
unshift @{$subsection->{TARGET}}, @propagate_target; # maintain config order: propagated targets go in front of already defined targets
|
|
if($subsection->{CONTEXT} eq "subvolume") {
|
|
# finally create missing SUBSECTION in subvolume context
|
|
die if($subsection->{SUBSECTION});
|
|
$subsection->{SUBSECTION} = $subsection->{TARGET};
|
|
}
|
|
else {
|
|
# recurse into SUBSECTION
|
|
_config_propagate_target($subsection);
|
|
}
|
|
}
|
|
delete $cur->{TARGET};
|
|
return $cur;
|
|
}
|
|
|
|
|
|
sub _config_collect_values
|
|
{
|
|
my $config = shift;
|
|
my $key = shift;
|
|
my @values;
|
|
push(@values, @{$config->{$key}}) if(ref($config->{$key}) eq "ARRAY");
|
|
foreach (@{$config->{SUBSECTION}}) {
|
|
push(@values, _config_collect_values($_, $key));
|
|
}
|
|
return @values;
|
|
}
|
|
|
|
|
|
sub init_config(@)
|
|
{
|
|
my %defaults = ( CONTEXT => "meta", SRC_FILE => "DEFAULTS", @_ );
|
|
# set defaults
|
|
foreach (keys %config_options) {
|
|
next if $config_options{$_}->{deprecated}; # don't pollute hash with deprecated options
|
|
$defaults{$_} = $config_options{$_}->{default};
|
|
}
|
|
return { CONTEXT => "global", SUBSECTION => [], PARENT => \%defaults };
|
|
}
|
|
|
|
|
|
sub _config_file(@) {
|
|
my @config_files = @_;
|
|
foreach my $file (@config_files) {
|
|
TRACE "config: checking for file: $file" if($do_trace);
|
|
return $file if(-r "$file");
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
sub parse_config($)
|
|
{
|
|
my $file = shift;
|
|
return undef unless($file);
|
|
|
|
my $root = init_config(SRC_FILE => $file);
|
|
my $cur = $root;
|
|
|
|
TRACE "config: open configuration file: $file" if($do_trace);
|
|
open(FILE, '<', $file) or die $!;
|
|
while (<FILE>) {
|
|
chomp;
|
|
s/((?:[^"'#]*(?:"[^"]*"|'[^']*'))*[^"'#]*)#.*/$1/; # remove comments
|
|
next if /^\s*$/; # ignore empty lines
|
|
s/^\s*//; # remove leading whitespace
|
|
s/\s*$//; # remove trailing whitespace
|
|
TRACE "config: parsing line $. with context=$cur->{CONTEXT}: \"$_\"" if($do_trace);
|
|
unless(/^([a-zA-Z_]+)(?:\s+(.*))?$/) {
|
|
ERROR "Parse error in \"$file\" line $.";
|
|
$root = undef;
|
|
last;
|
|
}
|
|
unless($cur = parse_config_line($cur, lc($1), $2 // "", error_statement => "in \"$file\" line $.")) {
|
|
$root = undef;
|
|
last;
|
|
}
|
|
TRACE "line processed: new context=$cur->{CONTEXT}" if($do_trace);
|
|
}
|
|
close FILE || ERROR "Failed to close configuration file: $!";
|
|
|
|
_config_propagate_target($root);
|
|
|
|
return $root;
|
|
}
|
|
|
|
|
|
# sets $target->{CONFIG}->{ABORTED} on failure
|
|
# sets $target->{SUBVOL_RECEIVED}
|
|
sub macro_send_receive(@)
|
|
{
|
|
my %info = @_;
|
|
my $source = $info{source} || die;
|
|
my $target = $info{target} || die;
|
|
my $parent = $info{parent};
|
|
my @clone_src = @{ $info{clone_src} // [] }; # copy array
|
|
my $config_target = $target->{CONFIG};
|
|
my $target_type = $config_target->{target_type} || die;
|
|
my $incremental = config_key($config_target, "incremental");
|
|
|
|
# check for existing target subvolume
|
|
if(my $err_vol = vinfo_subvol($target, $source->{NAME})) {
|
|
my $err_msg = "Please delete stray subvolume: \"btrfs subvolume delete $err_vol->{PRINT}\"";
|
|
ABORTED($config_target, "Target subvolume \"$err_vol->{PRINT}\" already exists");
|
|
FIX_MANUALLY($config_target, $err_msg);
|
|
ERROR ABORTED_TEXT($config_target) . ", aborting send/receive of: $source->{PRINT}";
|
|
ERROR $err_msg;
|
|
return undef;
|
|
}
|
|
|
|
if($incremental)
|
|
{
|
|
# create backup from latest common
|
|
if($parent) {
|
|
INFO "Creating incremental backup...";
|
|
}
|
|
elsif($incremental ne "strict") {
|
|
INFO "No common parent subvolume present, creating non-incremental backup...";
|
|
}
|
|
else {
|
|
WARN "Backup to $target->{PRINT} failed: no common parent subvolume found for \"$source->{PRINT}\", and option \"incremental\" is set to \"strict\"";
|
|
ABORTED($config_target, "No common parent subvolume found, and option \"incremental\" is set to \"strict\"");
|
|
return undef;
|
|
}
|
|
unless(config_key($target, "incremental_clones")) {
|
|
INFO "Ignoring " . scalar(@clone_src) . " clone sources (incremental_clones=no)" if(@clone_src);
|
|
@clone_src = ();
|
|
delete $info{clone_src};
|
|
}
|
|
}
|
|
else {
|
|
INFO "Creating non-incremental backup...";
|
|
$parent = undef;
|
|
@clone_src = ();
|
|
delete $info{parent};
|
|
delete $info{clone_src};
|
|
}
|
|
|
|
my $ret;
|
|
my $vol_received;
|
|
my $raw_info;
|
|
if($target_type eq "send-receive")
|
|
{
|
|
$ret = btrfs_send_receive($source, $target, $parent, \@clone_src, \$vol_received);
|
|
ABORTED($config_target, "Failed to send/receive subvolume") unless($ret);
|
|
}
|
|
elsif($target_type eq "raw")
|
|
{
|
|
unless($dryrun) {
|
|
# make sure we know the source uuid
|
|
if($source->{node}{uuid} =~ /^$fake_uuid_prefix/) {
|
|
DEBUG "Fetching uuid of new subvolume: $source->{PRINT}";
|
|
my $detail = btrfs_subvolume_show($source);
|
|
return undef unless($detail);
|
|
die unless($detail->{uuid});
|
|
$source->{node}{uuid} = $detail->{uuid};
|
|
$uuid_cache{$detail->{uuid}} = $source->{node};
|
|
}
|
|
}
|
|
$ret = btrfs_send_to_file($source, $target, $parent, \$vol_received, \$raw_info);
|
|
ABORTED($config_target, "Failed to send subvolume to raw file") unless($ret);
|
|
}
|
|
else
|
|
{
|
|
die "Illegal target type \"$target_type\"";
|
|
}
|
|
|
|
# inject fake vinfo
|
|
|
|
# NOTE: it's not possible to add (and compare) correct target $detail
|
|
# from btrfs_send_receive(), as source detail also has fake uuid.
|
|
if($ret) {
|
|
vinfo_inject_child($target, $vol_received, {
|
|
# NOTE: this is not necessarily the correct parent_uuid (on
|
|
# receive, btrfs-progs picks the uuid of the first (lowest id)
|
|
# matching possible parent), whereas the target_parent is the
|
|
# first from _correlated_nodes().
|
|
#
|
|
# NOTE: the parent_uuid of an injected receive target is not used
|
|
# anywhere in btrbk at the time of writing
|
|
parent_uuid => $parent ? $info{target_parent_node}->{uuid} : '-',
|
|
received_uuid => $source->{node}{received_uuid} eq '-' ? $source->{node}{uuid} : $source->{node}{received_uuid},
|
|
readonly => 1,
|
|
TARGET_TYPE => $target_type,
|
|
FORCE_PRESERVE => 'preserve forced: created just now',
|
|
}, $raw_info);
|
|
}
|
|
$source->{SUBVOL_SENT}{$target->{URL}} = $vol_received;
|
|
|
|
# add info to $config->{SUBVOL_RECEIVED}
|
|
$info{received_type} = $target_type || die;
|
|
$info{received_subvolume} = $vol_received || die;
|
|
$target->{SUBVOL_RECEIVED} //= [];
|
|
push(@{$target->{SUBVOL_RECEIVED}}, \%info);
|
|
unless($ret) {
|
|
$info{ERROR} = 1;
|
|
return undef;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
|
|
# sets $result_vinfo->{CONFIG}->{ABORTED} on failure
|
|
# sets $result_vinfo->{SUBVOL_DELETED}
|
|
sub macro_delete($$$$;@)
|
|
{
|
|
my $root_subvol = shift || die;
|
|
my $subvol_basename = shift // die;
|
|
my $result_vinfo = shift || die;
|
|
my $schedule_options = shift || die;
|
|
my %delete_options = @_;
|
|
|
|
my $depends_fn = (($root_subvol->{CONFIG}{target_type} // "") eq "raw") && sub {
|
|
my $vol = shift->{value} // die;
|
|
my @ret;
|
|
if(my $related = _related_nodes($vol->{node}, omit_self => 1, fatal => 1)) {
|
|
foreach my $dep (@$related) {
|
|
TRACE "Checking parent of dependent raw target: $vol->{PRINT} <- $dep->{BTRBK_RAW}{NAME}" if($do_trace);
|
|
push @ret, grep($dep->{uuid} eq $_->{value}{node}{uuid}, @_);
|
|
}
|
|
return ("parent of preserved raw target", \@ret);
|
|
} else {
|
|
ABORTED($result_vinfo, "Failed to resolve related raw targets");
|
|
WARN "Skipping delete of \"$root_subvol->{PRINT}/$subvol_basename.*\": " . ABORTED_TEXT($result_vinfo);
|
|
return ("", []);
|
|
}
|
|
};
|
|
|
|
my @schedule;
|
|
foreach my $vol (@{get_btrbk_snapshot_siblings($root_subvol, name => $subvol_basename)}) {
|
|
if(my $ff = vinfo_match(\@exclude_vf, $vol)) {
|
|
INFO "Skipping deletion of \"$vol->{PRINT}\": Match on $ff->{reason}";
|
|
$vol->{node}{FORCE_PRESERVE} ||= "preserve forced: Match on $ff->{reason}";
|
|
}
|
|
push(@schedule, { value => $vol,
|
|
# name => $vol->{PRINT}, # only for logging
|
|
btrbk_date => $vol->{node}{BTRBK_DATE},
|
|
preserve => $vol->{node}{FORCE_PRESERVE},
|
|
});
|
|
}
|
|
my (undef, $delete) = schedule(
|
|
%$schedule_options,
|
|
schedule => \@schedule,
|
|
preserve_date_in_future => 1,
|
|
depends => $depends_fn,
|
|
);
|
|
return undef if(IS_ABORTED($result_vinfo)); # if depends_fn fails
|
|
|
|
my @delete_success;
|
|
foreach my $vol (@$delete) {
|
|
# NOTE: we do not abort on qgroup destroy errors
|
|
btrfs_qgroup_destroy($vol, %{$delete_options{qgroup}}) if($delete_options{qgroup}->{destroy});
|
|
if(btrfs_subvolume_delete($vol, %delete_options)) {
|
|
push @delete_success, $vol;
|
|
}
|
|
}
|
|
INFO "Deleted " . scalar(@delete_success) . " subvolumes in: $root_subvol->{PRINT}/$subvol_basename.*";
|
|
$result_vinfo->{SUBVOL_DELETED} //= [];
|
|
push @{$result_vinfo->{SUBVOL_DELETED}}, @delete_success;
|
|
|
|
if(scalar(@delete_success) != scalar(@$delete)) {
|
|
ABORTED($result_vinfo, "Failed to delete subvolume");
|
|
return undef;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub cmp_date($$)
|
|
{
|
|
return (($_[0]->[0] <=> $_[1]->[0]) || # unix time
|
|
($_[0]->[1] <=> $_[1]->[1])); # NN
|
|
}
|
|
|
|
|
|
sub schedule(@)
|
|
{
|
|
my %args = @_;
|
|
my $schedule = $args{schedule} || die;
|
|
my $preserve = $args{preserve} || die;
|
|
my $preserve_date_in_future = $args{preserve_date_in_future};
|
|
my $preserve_threshold_date = $args{preserve_threshold_date};
|
|
my $results_list = $args{results};
|
|
my $result_hints = $args{result_hints} // {};
|
|
my $result_preserve_action_text = $args{result_preserve_action_text};
|
|
my $result_delete_action_text = $args{result_delete_action_text} // 'delete';
|
|
my $depends_fn = $args{depends};
|
|
|
|
my $preserve_day_of_week = $preserve->{dow} || die;
|
|
my $preserve_hour_of_day = $preserve->{hod} // die;
|
|
my $preserve_min_n = $preserve->{min_n};
|
|
my $preserve_min_q = $preserve->{min_q};
|
|
my $preserve_hourly = $preserve->{h};
|
|
my $preserve_daily = $preserve->{d};
|
|
my $preserve_weekly = $preserve->{w};
|
|
my $preserve_monthly = $preserve->{m};
|
|
my $preserve_yearly = $preserve->{y};
|
|
|
|
DEBUG "Schedule: " . format_preserve_matrix($preserve, format => "debug_text");
|
|
|
|
# 0 1 2 3 4 5 6 7 8
|
|
# sec, min, hour, mday, mon, year, wday, yday, isdst
|
|
|
|
# sort the schedule, ascending by date
|
|
# regular entries come in front of informative_only
|
|
my @sorted_schedule = sort { cmp_date($a->{btrbk_date}, $b->{btrbk_date} ) ||
|
|
(($a->{informative_only} ? ($b->{informative_only} ? 0 : 1) : ($b->{informative_only} ? -1 : 0)))
|
|
} @$schedule;
|
|
|
|
DEBUG "Scheduler reference time: " . timestamp(\@tm_now, 'debug-iso');
|
|
|
|
# first, do our calendar calculations
|
|
# - days start on $preserve_hour_of_day (or 00:00 if timestamp_format=short)
|
|
# - weeks start on $preserve_day_of_week
|
|
# - months start on first $preserve_day_of_week of month
|
|
# - years start on first $preserve_day_of_week of year
|
|
# NOTE: leap hours are NOT taken into account for $delta_hours
|
|
my $now_h = timegm_nocheck( 0, 0, $tm_now[2], $tm_now[3], $tm_now[4], $tm_now[5] ); # use timelocal() here (and below) if you want to honor leap hours
|
|
|
|
foreach my $href (@sorted_schedule)
|
|
{
|
|
my @tm = localtime($href->{btrbk_date}->[0]);
|
|
my $has_exact_time = $href->{btrbk_date}->[2];
|
|
my $delta_hours_from_hod = $tm[2] - ($has_exact_time ? $preserve_hour_of_day : 0);
|
|
my $delta_days_from_eow = $tm[6] - $day_of_week_map{$preserve_day_of_week};
|
|
if($delta_hours_from_hod < 0) {
|
|
$delta_hours_from_hod += 24;
|
|
$delta_days_from_eow -= 1;
|
|
}
|
|
if($delta_days_from_eow < 0) {
|
|
$delta_days_from_eow += 7;
|
|
}
|
|
my $month_corr = $tm[4]; # [0..11]
|
|
my $year_corr = $tm[5];
|
|
if($tm[3] <= $delta_days_from_eow) {
|
|
# our month/year start on first $preserve_day_of_week, corrected value
|
|
$month_corr -= 1;
|
|
if($month_corr < 0) {
|
|
$month_corr = 11;
|
|
$year_corr -= 1;
|
|
}
|
|
}
|
|
|
|
# check timegm: ignores leap hours
|
|
my $delta_hours = int(($now_h - timegm_nocheck( 0, 0, $tm[2], $tm[3], $tm[4], $tm[5] ) ) / (60 * 60));
|
|
my $delta_days = int(($delta_hours + $delta_hours_from_hod) / 24); # days from beginning of day
|
|
my $delta_weeks = int(($delta_days + $delta_days_from_eow) / 7); # weeks from beginning of week
|
|
my $delta_years = ($tm_now[5] - $year_corr);
|
|
my $delta_months = $delta_years * 12 + ($tm_now[4] - $month_corr);
|
|
|
|
$href->{delta_hours} = $delta_hours;
|
|
$href->{delta_days} = $delta_days;
|
|
$href->{delta_weeks} = $delta_weeks;
|
|
$href->{delta_months} = $delta_months;
|
|
$href->{delta_years} = $delta_years;
|
|
|
|
# these are only needed for text output (format_preserve_delta)
|
|
$href->{year} = $year_corr + 1900;
|
|
$href->{month} = $month_corr + 1;
|
|
$href->{delta_hours_from_hod} = $delta_hours_from_hod;
|
|
$href->{delta_days_from_eow} = $delta_days_from_eow;
|
|
$href->{real_hod} = $preserve_hour_of_day if($has_exact_time);
|
|
|
|
if($preserve_date_in_future && ($delta_hours < 0)) {
|
|
$href->{preserve} = "preserve forced: " . -($delta_hours) . " hours in the future";
|
|
}
|
|
}
|
|
|
|
my %first_in_delta_hours;
|
|
my %first_in_delta_days;
|
|
my %first_in_delta_weeks;
|
|
my %first_weekly_in_delta_months;
|
|
my %first_monthly_in_delta_years;
|
|
|
|
# filter "preserve all within N days/weeks/..."
|
|
foreach my $href (@sorted_schedule) {
|
|
if($preserve_min_q) {
|
|
if($preserve_min_q eq 'all') {
|
|
$href->{preserve} = "preserve min: all";
|
|
} elsif($preserve_min_q eq 'h') {
|
|
$href->{preserve} = "preserve min: $href->{delta_hours} hours ago" if($href->{delta_hours} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'd') {
|
|
$href->{preserve} = "preserve min: $href->{delta_days} days ago" if($href->{delta_days} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'w') {
|
|
$href->{preserve} = "preserve min: $href->{delta_weeks} weeks ago" if($href->{delta_weeks} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'm') {
|
|
$href->{preserve} = "preserve min: $href->{delta_months} months ago" if($href->{delta_months} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'y') {
|
|
$href->{preserve} = "preserve min: $href->{delta_years} years ago" if($href->{delta_years} <= $preserve_min_n);
|
|
}
|
|
}
|
|
$first_in_delta_hours{$href->{delta_hours}} //= $href;
|
|
}
|
|
if($preserve_min_q && ($preserve_min_q eq 'latest') && (scalar @sorted_schedule)) {
|
|
my $href = $sorted_schedule[-1];
|
|
$href->{preserve} = 'preserve min: latest';
|
|
}
|
|
|
|
# filter hourly, daily, weekly, monthly, yearly
|
|
foreach (sort {$b <=> $a} keys %first_in_delta_hours) {
|
|
my $href = $first_in_delta_hours{$_} || die;
|
|
if($preserve_hourly && (($preserve_hourly eq 'all') || ($href->{delta_hours} <= $preserve_hourly))) {
|
|
$href->{preserve} = "preserve hourly: first of hour, $href->{delta_hours} hours ago";
|
|
}
|
|
$first_in_delta_days{$href->{delta_days}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_in_delta_days) {
|
|
my $href = $first_in_delta_days{$_} || die;
|
|
if($preserve_daily && (($preserve_daily eq 'all') || ($href->{delta_days} <= $preserve_daily))) {
|
|
$href->{preserve} = "preserve daily: first of day" . ($href->{real_hod} ? sprintf(" (starting at %02u:00)", $href->{real_hod}) : "") . ", $href->{delta_days} days ago"
|
|
. (defined($href->{real_hod}) ? ($href->{delta_hours_from_hod} ? ", $href->{delta_hours_from_hod}h after " : ", at ") . sprintf("%02u:00", $href->{real_hod}) : "");
|
|
}
|
|
$first_in_delta_weeks{$href->{delta_weeks}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_in_delta_weeks) {
|
|
my $href = $first_in_delta_weeks{$_} || die;
|
|
if($preserve_weekly && (($preserve_weekly eq 'all') || ($href->{delta_weeks} <= $preserve_weekly))) {
|
|
$href->{preserve} = "preserve weekly: $href->{delta_weeks} weeks ago," . _format_preserve_delta($href, $preserve_day_of_week);
|
|
}
|
|
$first_weekly_in_delta_months{$href->{delta_months}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_weekly_in_delta_months) {
|
|
my $href = $first_weekly_in_delta_months{$_} || die;
|
|
if($preserve_monthly && (($preserve_monthly eq 'all') || ($href->{delta_months} <= $preserve_monthly))) {
|
|
$href->{preserve} = "preserve monthly: first weekly of month $href->{year}-" . sprintf("%02u", $href->{month}) . " ($href->{delta_months} months ago," . _format_preserve_delta($href, $preserve_day_of_week) . ")";
|
|
}
|
|
$first_monthly_in_delta_years{$href->{delta_years}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_monthly_in_delta_years) {
|
|
my $href = $first_monthly_in_delta_years{$_} || die;
|
|
if($preserve_yearly && (($preserve_yearly eq 'all') || ($href->{delta_years} <= $preserve_yearly))) {
|
|
$href->{preserve} = "preserve yearly: first weekly of year $href->{year} ($href->{delta_years} years ago," . _format_preserve_delta($href, $preserve_day_of_week) . ")";
|
|
}
|
|
}
|
|
|
|
if($depends_fn) {
|
|
# for all preserved, check depends against all non-preserved
|
|
foreach my $href (grep $_->{preserve}, @sorted_schedule) {
|
|
my ($dtxt, $deps) = $depends_fn->($href, grep(!$_->{preserve}, @sorted_schedule));
|
|
foreach my $dep (@$deps) {
|
|
DEBUG "Preserving dependent: $dep->{value}{PRINT} <- $href->{value}{PRINT}";
|
|
$dep->{preserve} = "preserve forced: $dtxt";
|
|
}
|
|
}
|
|
}
|
|
|
|
# assemble results
|
|
my @delete;
|
|
my @preserve;
|
|
my %result_base = ( %$preserve,
|
|
scheme => format_preserve_matrix($preserve),
|
|
%$result_hints,
|
|
);
|
|
my $count_defined = 0;
|
|
foreach my $href (@sorted_schedule)
|
|
{
|
|
my $result_reason_text = $href->{preserve};
|
|
my $result_action_text;
|
|
|
|
unless($href->{informative_only}) {
|
|
if($href->{preserve}) {
|
|
if($preserve_threshold_date && (cmp_date($href->{btrbk_date}, $preserve_threshold_date) <= 0)) {
|
|
# older than threshold, do not add to preserve list
|
|
$result_reason_text = "$result_reason_text, ignored (archive_exclude_older) older than existing archive";
|
|
}
|
|
else {
|
|
push(@preserve, $href->{value});
|
|
$result_action_text = $result_preserve_action_text;
|
|
}
|
|
}
|
|
else {
|
|
push(@delete, $href->{value});
|
|
$result_action_text = $result_delete_action_text;
|
|
}
|
|
$count_defined++;
|
|
}
|
|
|
|
TRACE join(" ", "schedule: $href->{value}{PRINT}", ($href->{informative_only} ? "(informative_only)" : uc($result_action_text || "-")), ($result_reason_text // "-")) if($do_trace && $href->{value} && $href->{value}{PRINT});
|
|
push @$results_list, { %result_base,
|
|
action => $result_action_text,
|
|
reason => $result_reason_text,
|
|
value => $href->{value},
|
|
} if($results_list);
|
|
}
|
|
DEBUG "Preserving " . @preserve . "/" . $count_defined . " items";
|
|
return (\@preserve, \@delete);
|
|
}
|
|
|
|
|
|
sub _format_preserve_delta($$$)
|
|
{
|
|
my $href = shift;
|
|
my $preserve_day_of_week = shift;
|
|
my $s = "";
|
|
$s .= " $href->{delta_days_from_eow}d" if($href->{delta_days_from_eow});
|
|
$s .= " $href->{delta_hours_from_hod}h" if($href->{delta_hours_from_hod});
|
|
return ($s ? "$s after " : " at ") . $preserve_day_of_week . (defined($href->{real_hod}) ? sprintf(" %02u:00", $href->{real_hod}) : "");
|
|
}
|
|
|
|
|
|
sub format_preserve_matrix($@)
|
|
{
|
|
my $preserve = shift || die;
|
|
my %opts = @_;
|
|
my $format = $opts{format} // "short";
|
|
|
|
if($format eq "debug_text") {
|
|
my @out;
|
|
my %trans = ( h => 'hours', d => 'days', w => 'weeks', m => 'months', y => 'years' );
|
|
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
|
|
push @out, "all forever";
|
|
}
|
|
else {
|
|
push @out, "latest" if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
|
|
push @out, "all within $preserve->{min_n} $trans{$preserve->{min_q}}" if($preserve->{min_n} && $preserve->{min_q});
|
|
push @out, "first of day (starting at " . sprintf("%02u:00", $preserve->{hod}) . ") for $preserve->{d} days" if($preserve->{d});
|
|
unless($preserve->{d} && ($preserve->{d} eq 'all')) {
|
|
push @out, "first daily in week (starting on $preserve->{dow}) for $preserve->{w} weeks" if($preserve->{w});
|
|
unless($preserve->{w} && ($preserve->{w} eq 'all')) {
|
|
push @out, "first weekly of month for $preserve->{m} months" if($preserve->{m});
|
|
unless($preserve->{m} && ($preserve->{m} eq 'all')) {
|
|
push @out, "first weekly of year for $preserve->{y} years" if($preserve->{y});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return 'preserving ' . join('; ', @out);
|
|
}
|
|
|
|
my $s = "";
|
|
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
|
|
$s = '*d+';
|
|
}
|
|
else {
|
|
# $s .= '.+' if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
|
|
$s .= $preserve->{min_n} . $preserve->{min_q} . '+' if($preserve->{min_n} && $preserve->{min_q});
|
|
foreach (qw(h d w m y)) {
|
|
my $val = $preserve->{$_} // 0;
|
|
next unless($val);
|
|
$val = '*' if($val eq 'all');
|
|
$s .= ($s ? ' ' : '') . $val . $_;
|
|
}
|
|
if(($format ne "config") && ($preserve->{d} || $preserve->{w} || $preserve->{m} || $preserve->{y})) {
|
|
$s .= " ($preserve->{dow}, " . sprintf("%02u:00", $preserve->{hod}) . ")";
|
|
}
|
|
}
|
|
return $s;
|
|
}
|
|
|
|
|
|
sub timestamp($$;$)
|
|
{
|
|
my $time = shift // die; # unixtime, or arrayref from localtime()
|
|
my $format = shift;
|
|
my $tm_is_utc = shift;
|
|
my @tm = ref($time) ? @$time : localtime($time);
|
|
my $ts;
|
|
# NOTE: can't use POSIX::strftime(), as "%z" always prints offset of local timezone!
|
|
|
|
if($format eq "short") {
|
|
return sprintf('%04u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3]);
|
|
}
|
|
elsif($format eq "long") {
|
|
return sprintf('%04u%02u%02uT%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1]);
|
|
}
|
|
elsif($format eq "long-iso") {
|
|
$ts = sprintf('%04u%02u%02uT%02u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
|
|
}
|
|
elsif($format eq "debug-iso") {
|
|
$ts = sprintf('%04u-%02u-%02uT%02u:%02u:%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
|
|
}
|
|
else { die; }
|
|
|
|
if($tm_is_utc) {
|
|
$ts .= '+0000'; # or 'Z'
|
|
} else {
|
|
my $offset = timegm(@tm) - timelocal(@tm);
|
|
if($offset < 0) { $ts .= '-'; $offset = -$offset; } else { $ts .= '+'; }
|
|
my $h = int($offset / (60 * 60));
|
|
die if($h > 24); # sanity check, something went really wrong
|
|
$ts .= sprintf('%02u%02u', $h, int($offset / 60) % 60);
|
|
}
|
|
return $ts;
|
|
|
|
return undef;
|
|
}
|
|
|
|
|
|
sub print_header(@)
|
|
{
|
|
my %args = @_;
|
|
my $config = $args{config};
|
|
|
|
print "--------------------------------------------------------------------------------\n";
|
|
print "$args{title} ($VERSION_INFO)\n\n";
|
|
if($args{time}) {
|
|
print " Date: " . localtime($args{time}) . "\n";
|
|
}
|
|
if($config) {
|
|
print " Config: " . config_key($config, "SRC_FILE") . "\n";
|
|
}
|
|
if($dryrun) {
|
|
print " Dryrun: YES\n";
|
|
}
|
|
if($config && $config->{CMDLINE_FILTER_LIST}) {
|
|
print " Filter: ";
|
|
print join("\n ", @{$config->{CMDLINE_FILTER_LIST}});
|
|
print "\n";
|
|
}
|
|
if($args{info}) {
|
|
print "\n" . join("\n", grep(defined, @{$args{info}})) . "\n";
|
|
}
|
|
if(my @options = grep(defined, @{$args{options} // []})) {
|
|
print "\nOptions:\n ";
|
|
print join("\n ", @options);
|
|
print "\n";
|
|
}
|
|
if($args{legend}) {
|
|
print "\nLegend:\n ";
|
|
print join("\n ", grep(defined, @{$args{legend}}));
|
|
print "\n";
|
|
}
|
|
print "--------------------------------------------------------------------------------\n";
|
|
print "\n" if($args{paragraph});
|
|
}
|
|
|
|
|
|
sub print_footer($$)
|
|
{
|
|
my $config = shift;
|
|
my $exit_status = shift;
|
|
if($exit_status) {
|
|
print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
|
|
print "Please check warning and error messages above.\n";
|
|
my @fix_manually_text = _config_collect_values($config, "FIX_MANUALLY");
|
|
if(scalar(@fix_manually_text)) {
|
|
my @unique = do { my %seen; grep { !$seen{$_}++ } @fix_manually_text };
|
|
print join("\n", @unique) . "\n";
|
|
}
|
|
}
|
|
if($dryrun) {
|
|
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
|
|
}
|
|
}
|
|
|
|
|
|
sub print_table($;$)
|
|
{
|
|
my $data = shift;
|
|
my $spacing = shift // " ";
|
|
my $maxlen = 0;
|
|
foreach (@$data) {
|
|
$maxlen = length($_->[0]) if($maxlen < length($_->[0]));
|
|
}
|
|
foreach (@$data) {
|
|
print $_->[0] . ((' ' x ($maxlen - length($_->[0]))) . $spacing) . $_->[1] . "\n";
|
|
}
|
|
}
|
|
|
|
|
|
sub print_formatted(@)
|
|
{
|
|
my $format_key = shift || die;
|
|
my $data = shift || die;
|
|
my $default_format = "table";
|
|
my %args = @_;
|
|
my $title = $args{title};
|
|
my $table_format = ref($format_key) ? $format_key : $table_formats{$format_key};
|
|
my $format = $args{output_format} || $output_format || $default_format;
|
|
my $pretty = $args{pretty} // $output_pretty;
|
|
my $no_header = $args{no_header};
|
|
my $fh = $args{outfile} // *STDOUT;
|
|
my $table_spacing = 2;
|
|
my $empty_cell_char = $args{empty_cell_char} // "-";
|
|
|
|
my @keys;
|
|
my %ralign;
|
|
my %hide_column;
|
|
if($format =~ s/^col:\s*(h:)?\s*//) {
|
|
$no_header = 1 if($1);
|
|
foreach (split(/\s*,\s*/, $format)) {
|
|
$ralign{$_} = 1 if s/:R(ALIGN)?$//i;
|
|
push @keys, lc($_);
|
|
}
|
|
}
|
|
else {
|
|
unless(exists($table_format->{$format})) {
|
|
WARN "Unsupported output format \"$format\", defaulting to \"$default_format\" format.";
|
|
$format = $default_format;
|
|
}
|
|
@keys = @{$table_format->{$format}};
|
|
%ralign = %{$table_format->{RALIGN} // {}};
|
|
}
|
|
# strips leading "-" from @keys
|
|
%hide_column = map { $_ => 1 } grep { s/^-// } @keys;
|
|
|
|
if($format eq "single_column")
|
|
{
|
|
# single-column: newline separated values, no headers
|
|
my $key = $keys[0];
|
|
foreach (grep defined, map $_->{$key}, @$data) {
|
|
print $fh $_ . "\n" if($_ ne "");
|
|
}
|
|
}
|
|
elsif($format eq "raw")
|
|
{
|
|
# output: key0="value0" key1="value1" ...
|
|
foreach my $row (@$data) {
|
|
print $fh "format=\"$format_key\" ";
|
|
print $fh join(' ', map { "$_=" . quoteshell(($row->{$_} // "")) } @keys) . "\n";
|
|
}
|
|
}
|
|
elsif(($format eq "tlog") || ($format eq "syslog"))
|
|
{
|
|
# output: value0 value1, ...
|
|
unless($no_header) {
|
|
print $fh join(' ', map uc($_), @keys) . "\n"; # unaligned upper case headings
|
|
}
|
|
foreach my $row (@$data) {
|
|
my $line = join(' ', map { ((defined($row->{$_}) && ($_ eq "message")) ? '# ' : '') . ($row->{$_} // "-") } @keys);
|
|
if($format eq "syslog") { # dirty hack, ignore outfile on syslog format
|
|
syslog($line);
|
|
} else {
|
|
print $fh ($line . "\n");
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
# Text::CharWidth does it correctly with wide chars (e.g. asian) taking up two columns
|
|
my $termwidth = eval_quiet { require Text::CharWidth; } ? \&Text::CharWidth::mbswidth :
|
|
eval_quiet { require Encode; } ? sub { length(Encode::decode_utf8(shift)) } :
|
|
sub { length(shift) };
|
|
|
|
# sanitize and calculate maxlen for each column
|
|
my %maxlen = map { $_ => $no_header ? 0 : length($_) } @keys;
|
|
my @formatted_data;
|
|
foreach my $row (@$data) {
|
|
my %formatted_row;
|
|
foreach my $key (@keys) {
|
|
my $val = $row->{$key};
|
|
$val = join(',', @$val) if(ref $val eq "ARRAY");
|
|
|
|
$hide_column{$key} = 0 if(defined($val));
|
|
$val = $empty_cell_char if(!defined($val) || ($val eq ""));
|
|
$formatted_row{$key} = $val;
|
|
my $vl = $termwidth->($val);
|
|
$maxlen{$key} = $vl if($maxlen{$key} < $vl);
|
|
}
|
|
push @formatted_data, \%formatted_row;
|
|
}
|
|
my @visible_keys = grep !$hide_column{$_}, @keys;
|
|
|
|
# print title
|
|
if($title) {
|
|
print $fh "$title\n";
|
|
print $fh '-' x length($title) . "\n"; # separator line
|
|
}
|
|
|
|
# print keys (headings)
|
|
unless($no_header) {
|
|
my $fill = 0;
|
|
foreach (@visible_keys) {
|
|
print $fh ' ' x $fill;
|
|
$fill = $maxlen{$_} - length($_);
|
|
if($pretty) {
|
|
# use aligned lower case headings (with separator line below)
|
|
if($ralign{$_}) {
|
|
print $fh ' ' x $fill;
|
|
$fill = 0;
|
|
}
|
|
print $fh $_;
|
|
} else {
|
|
print $fh uc($_); # default unaligned upper case headings
|
|
}
|
|
$fill += $table_spacing;
|
|
}
|
|
print $fh "\n";
|
|
|
|
$fill = 0;
|
|
if($pretty) { # separator line after header
|
|
foreach (@visible_keys) {
|
|
print $fh ' ' x $fill;
|
|
print $fh '-' x $maxlen{$_};
|
|
$fill = $table_spacing;
|
|
}
|
|
print $fh "\n";
|
|
# alternative (all above in one line ;)
|
|
#print $fh join(' ' x $table_spacing, map { '-' x ($maxlen{$_}) } @keys) . "\n";
|
|
}
|
|
}
|
|
|
|
# print values
|
|
foreach my $row (@formatted_data) {
|
|
my $fill = 0;
|
|
foreach (@visible_keys) {
|
|
my $val = $row->{$_};
|
|
print $fh ' ' x $fill;
|
|
$fill = $maxlen{$_} - $termwidth->($val);
|
|
if($ralign{$_}) {
|
|
print $fh ' ' x $fill;
|
|
$fill = 0;
|
|
}
|
|
print $fh $val;
|
|
$fill += $table_spacing;
|
|
}
|
|
print $fh "\n";
|
|
}
|
|
|
|
# print additional newline for paragraphs
|
|
if($args{paragraph}) {
|
|
print $fh "\n";
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
sub print_size($)
|
|
{
|
|
my $size = shift;
|
|
if($output_format && ($output_format eq "raw")) {
|
|
return $size;
|
|
}
|
|
return "-" if($size == 0);
|
|
my ($unit, $mul);
|
|
if(@output_unit) {
|
|
($unit, $mul) = @output_unit;
|
|
}
|
|
else {
|
|
($unit, $mul) = ("KiB", 1024);
|
|
($unit, $mul) = ("MiB", $mul * 1024) if($size > $mul * 1024);
|
|
($unit, $mul) = ("GiB", $mul * 1024) if($size > $mul * 1024);
|
|
($unit, $mul) = ("TiB", $mul * 1024) if($size > $mul * 1024);
|
|
}
|
|
return $size if($mul == 1);
|
|
return sprintf('%.2f', ($size / $mul)) . " $unit";
|
|
}
|
|
|
|
|
|
sub _origin_tree
|
|
{
|
|
my $prefix = shift;
|
|
my $node = shift // die;
|
|
my $lines = shift;
|
|
my $nodelist = shift;
|
|
my $depth = shift // 0;
|
|
my $seen = shift // [];
|
|
my $norecurse = shift;
|
|
my $uuid = $node->{uuid} || die;
|
|
|
|
# cache a bit, this might be large
|
|
# note: root subvolumes dont have REL_PATH
|
|
$nodelist //= [ (sort { ($a->{REL_PATH} // "") cmp ($b->{REL_PATH} // "") } values %uuid_cache) ];
|
|
|
|
my $prefix_spaces = ' ' x (($depth * 4) - ($prefix ? 4 : 0));
|
|
push(@$lines, { tree => "${prefix_spaces}${prefix}" . _fs_path($node),
|
|
uuid => $node->{uuid},
|
|
parent_uuid => $node->{parent_uuid},
|
|
received_uuid => $node->{received_uuid},
|
|
});
|
|
|
|
# handle deep recursion
|
|
return 0 if(grep /^$uuid$/, @$seen);
|
|
|
|
if($node->{parent_uuid} ne '-') {
|
|
my $parent_node = $uuid_cache{$node->{parent_uuid}};
|
|
if($parent_node) {
|
|
if($norecurse) {
|
|
push(@$lines,{ tree => "${prefix_spaces} ^-- ...",
|
|
uuid => $parent_node->{uuid},
|
|
parent_uuid => $parent_node->{parent_uuid},
|
|
received_uuid => $parent_node->{received_uuid},
|
|
recursion => 'stop_recursion',
|
|
});
|
|
return 0;
|
|
}
|
|
if($parent_node->{readonly}) {
|
|
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1, undef, 1); # end recursion
|
|
}
|
|
else {
|
|
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1);
|
|
}
|
|
}
|
|
else {
|
|
push(@$lines,{ tree => "${prefix_spaces} ^-- <unknown uuid=$node->{parent_uuid}>" });
|
|
}
|
|
}
|
|
|
|
return 0 if($norecurse);
|
|
push(@$seen, $uuid);
|
|
|
|
if($node->{received_uuid} ne '-') {
|
|
my $received_uuid = $node->{received_uuid};
|
|
my @receive_parents; # there should be only one!
|
|
my @receive_twins;
|
|
|
|
foreach (@$nodelist) {
|
|
next if($_->{uuid} eq $uuid);
|
|
if($received_uuid eq $_->{uuid} && $_->{readonly}) {
|
|
_origin_tree("", $_, \@receive_parents, $nodelist, $depth, $seen);
|
|
}
|
|
elsif(($_->{received_uuid} ne '-') && ($received_uuid eq $_->{received_uuid}) && $_->{readonly}) {
|
|
_origin_tree("", $_, \@receive_twins, $nodelist, $depth, $seen, 1); # end recursion
|
|
}
|
|
}
|
|
push @$lines, @receive_twins;
|
|
push @$lines, @receive_parents;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
sub exit_status
|
|
{
|
|
my $config = shift;
|
|
foreach my $subsection (@{$config->{SUBSECTION}}) {
|
|
return 10 if(IS_ABORTED($subsection, "abort_"));
|
|
return 10 if(defined($subsection->{FIX_MANUALLY})); # treated as errors
|
|
return 10 if(exit_status($subsection));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
MAIN:
|
|
{
|
|
# NOTE: Since v0.26.0, btrbk does not enable taint mode (perl -T) by
|
|
# default, and does not hardcode $PATH anymore.
|
|
#
|
|
# btrbk still does all taint checks, and can be run in taint mode.
|
|
# In order to enable taint mode, run `perl -T btrbk`.
|
|
#
|
|
# see: perlrun(1), perlsec(1)
|
|
#
|
|
my $taint_mode_enabled = eval '${^TAINT}';
|
|
if($taint_mode_enabled) {
|
|
# we are running in tainted mode (perl -T), sanitize %ENV
|
|
delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
|
|
|
|
# in taint mode, perl needs an untainted $PATH.
|
|
$ENV{PATH} = '/sbin:/bin:/usr/sbin:/usr/bin';
|
|
}
|
|
|
|
Getopt::Long::Configure qw(gnu_getopt);
|
|
my $start_time = time;
|
|
@tm_now = localtime($start_time);
|
|
|
|
my @config_override_cmdline;
|
|
my @exclude_cmdline;
|
|
my ($config_cmdline, $lockfile_cmdline, $print_schedule,
|
|
$preserve_snapshots, $preserve_backups, $wipe_snapshots, $skip_snapshots, $skip_backups,
|
|
$raw_cmdline, $extents_related, $recursive
|
|
);
|
|
|
|
# Calling btrbk via "lsbtr" symlink acts as an alias for "btrbk ls",
|
|
# while also changing the semantics of the command line options.
|
|
$program_name = $0;
|
|
$program_name =~ s/^.*\///; # remove path
|
|
my @getopt_options = (
|
|
# common options
|
|
'help|h' => sub { HELP_MESSAGE; exit 0; },
|
|
'version' => sub { VERSION_MESSAGE; exit 0; },
|
|
'quiet|q' => \$quiet,
|
|
'verbose|v' => sub { $loglevel = ($loglevel =~ /^[0-9]+$/) ? $loglevel+1 : 2; },
|
|
'loglevel|l=s' => \$loglevel,
|
|
'format=s' => \$output_format,
|
|
'single-column|1' => sub { $output_format = "single_column" },
|
|
'pretty' => \$output_pretty,
|
|
'config|c=s' => \$config_cmdline,
|
|
'override=s' => \@config_override_cmdline, # e.g. --override=incremental=no
|
|
'lockfile=s' => \$lockfile_cmdline,
|
|
);
|
|
push @getopt_options, ($program_name eq "lsbtr") ? (
|
|
# "lsbtr" options
|
|
'long|l' => sub { $output_format = "table" },
|
|
'uuid|u' => sub { $output_format = "long" },
|
|
'raw' => sub { $output_format = "raw" },
|
|
) : (
|
|
# "btrbk" options
|
|
'dry-run|n' => \$dryrun,
|
|
'exclude=s' => \@exclude_cmdline,
|
|
'preserve|p' => sub { $preserve_snapshots = "preserve", $preserve_backups = "preserve" },
|
|
'preserve-snapshots' => sub { $preserve_snapshots = "preserve-snapshots" },
|
|
'preserve-backups' => sub { $preserve_backups = "preserve-backups" },
|
|
'recursive|r' => sub { $recursive = 1 },
|
|
'wipe' => \$wipe_snapshots,
|
|
'progress' => \$show_progress,
|
|
'related' => \$extents_related,
|
|
'table|t' => sub { $output_format = "table" },
|
|
'long|L' => sub { $output_format = "long" },
|
|
'print-schedule|S' => \$print_schedule,
|
|
'raw' => \$raw_cmdline,
|
|
'bytes' => sub { @output_unit = ("", 1 ) },
|
|
'kbytes' => sub { @output_unit = ("KiB", 1024 ) },
|
|
'mbytes' => sub { @output_unit = ("MiB", 1024 * 1024 ) },
|
|
'gbytes' => sub { @output_unit = ("GiB", 1024 * 1024 * 1024 ) },
|
|
'tbytes' => sub { @output_unit = ("TiB", 1024 * 1024 * 1024 * 1024 ) },
|
|
);
|
|
unless(GetOptions(@getopt_options)) {
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
if($program_name eq "lsbtr") {
|
|
unshift @ARGV, './' unless(@ARGV); # default to current path
|
|
unshift @ARGV, "ls"; # implicit "btrbk ls"
|
|
}
|
|
my $command = shift @ARGV;
|
|
unless($command) {
|
|
HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
|
|
# assign command line options
|
|
@config_src = ( $config_cmdline ) if($config_cmdline);
|
|
$loglevel = { error => 0, warn => 1, warning => 1, info => 2, debug => 3, trace => 4 }->{$loglevel} // $loglevel;
|
|
unless($loglevel =~ /^[0-9]+$/) {
|
|
ERROR "Unknown loglevel: $loglevel";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
$do_trace = 1 if($loglevel >= 4);
|
|
require_data_dumper() if($do_trace || ($VERSION =~ /-dev$/));
|
|
|
|
# check command line options
|
|
if($show_progress && (not check_exe('mbuffer'))) {
|
|
WARN 'Found option "--progress", but required executable "mbuffer" does not exist on your system. Please install "mbuffer".';
|
|
$show_progress = 0;
|
|
}
|
|
my ($action_run, $action_usage, $action_resolve, $action_diff, $action_extents, $action_origin, $action_config_print, $action_list, $action_clean, $action_archive, $action_cp, $action_ls);
|
|
my @filter_args;
|
|
my @subvol_args;
|
|
my $args_expected_min = 0;
|
|
my $args_expected_max = 9999;
|
|
my $fallback_default_config;
|
|
my $subvol_args_allow_relative;
|
|
my $subvol_args_init;
|
|
if(($command eq "run") || ($command eq "dryrun")) {
|
|
$action_run = 1;
|
|
$dryrun = 1 if($command eq "dryrun");
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif($command eq "snapshot") {
|
|
$action_run = 1;
|
|
$skip_backups = "snapshot";
|
|
$preserve_backups = "snapshot";
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif($command eq "resume") {
|
|
$action_run = 1;
|
|
$skip_snapshots = "resume";
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif($command eq "prune") {
|
|
$action_run = 1;
|
|
$skip_snapshots = "prune";
|
|
$skip_backups = "prune";
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "clean") {
|
|
$action_clean = 1;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "archive") {
|
|
$action_archive = 1;
|
|
$fallback_default_config = 1;
|
|
$args_expected_min = $args_expected_max = 2;
|
|
$subvol_args_allow_relative = 1;
|
|
@subvol_args = @ARGV;
|
|
}
|
|
elsif ($command eq "cp") {
|
|
$action_cp = 1;
|
|
$fallback_default_config = 1;
|
|
$args_expected_min = 2;
|
|
$subvol_args_allow_relative = 1;
|
|
@subvol_args = @ARGV;
|
|
}
|
|
elsif ($command eq "usage") {
|
|
$action_usage = 1;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "ls") {
|
|
$action_ls = 1;
|
|
$fallback_default_config = 1;
|
|
$args_expected_min = 1;
|
|
$subvol_args_allow_relative = 1;
|
|
@subvol_args = @ARGV;
|
|
}
|
|
elsif ($command eq "diff") {
|
|
$action_diff = 1;
|
|
$fallback_default_config = 1;
|
|
$args_expected_min = $args_expected_max = 2;
|
|
$subvol_args_init = "restrict_same_fs deny_root_subvol";
|
|
$subvol_args_allow_relative = 1;
|
|
@subvol_args = @ARGV;
|
|
}
|
|
elsif ($command eq "extents") {
|
|
my $subcommand = shift @ARGV // "";
|
|
if(($subcommand eq "list") ||
|
|
($subcommand eq "diff")) {
|
|
$action_extents = $subcommand;
|
|
}
|
|
else { # defaults to "list"
|
|
unshift @ARGV, $subcommand;
|
|
$action_extents = "list";
|
|
}
|
|
$fallback_default_config = 1;
|
|
$args_expected_min = 1;
|
|
$subvol_args_init = "restrict_same_fs";
|
|
$subvol_args_allow_relative = 1;
|
|
my $excl;
|
|
foreach(@ARGV) {
|
|
# subvol_arg... "exclusive" filter_arg...
|
|
if($_ eq "exclusive") {
|
|
$excl = 1;
|
|
} else {
|
|
push @subvol_args, $_;
|
|
push @filter_args, $_ if($excl);
|
|
}
|
|
}
|
|
}
|
|
elsif ($command eq "origin") {
|
|
$action_origin = 1;
|
|
$args_expected_min = $args_expected_max = 1;
|
|
$subvol_args_init = "deny_root_subvol";
|
|
$subvol_args_allow_relative = 1;
|
|
@subvol_args = @ARGV;
|
|
}
|
|
elsif($command eq "list") {
|
|
my $subcommand = shift @ARGV // "";
|
|
if(($subcommand eq "config") ||
|
|
($subcommand eq "volume") ||
|
|
($subcommand eq "source") ||
|
|
($subcommand eq "target"))
|
|
{
|
|
$action_list = $subcommand;
|
|
}
|
|
elsif(($subcommand eq "all") ||
|
|
($subcommand eq "snapshots") ||
|
|
($subcommand eq "backups") ||
|
|
($subcommand eq "latest"))
|
|
{
|
|
$action_resolve = $subcommand;
|
|
}
|
|
else {
|
|
$action_resolve = "all";
|
|
unshift @ARGV, $subcommand if($subcommand ne "");
|
|
}
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif($command eq "stats") {
|
|
$action_resolve = "stats";
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "config") {
|
|
my $subcommand = shift @ARGV // "";
|
|
@filter_args = @ARGV;
|
|
if(($subcommand eq "print") || ($subcommand eq "print-all")) {
|
|
$action_config_print = $subcommand;
|
|
}
|
|
elsif($subcommand eq "list") {
|
|
$action_list = "config";
|
|
}
|
|
else {
|
|
ERROR "Unknown subcommand for \"config\" command: $subcommand";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
}
|
|
else {
|
|
ERROR "Unrecognized command: $command";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
if(($args_expected_min > scalar(@ARGV)) || ($args_expected_max < scalar(@ARGV))) {
|
|
ERROR "Incorrect number of arguments for \"$command\" command: expected " . (
|
|
($args_expected_min == $args_expected_max) ? $args_expected_min :
|
|
($args_expected_min > scalar(@ARGV)) ? "at least $args_expected_min" : "at most $args_expected_max")
|
|
. ", got " . scalar(@ARGV);
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
|
|
# input validation
|
|
foreach (@subvol_args) {
|
|
my ($url_prefix, $path) = check_url($_);
|
|
if(!defined($path) && $subvol_args_allow_relative && ($url_prefix eq "") && (-d $_)) {
|
|
$path = check_file(abs_path($_), { absolute => 1, sanitize => 1 });
|
|
}
|
|
unless(defined($path)) {
|
|
ERROR "Bad argument: not a subvolume declaration: $_";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
$_ = $url_prefix . $path;
|
|
}
|
|
my @filter_vf;
|
|
foreach (@filter_args) {
|
|
my $vf = vinfo_filter_statement($_, "filter_argument");
|
|
unless($vf) {
|
|
ERROR "Bad argument: invalid filter statement: $_";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
push @filter_vf, $vf;
|
|
}
|
|
foreach (@exclude_cmdline) {
|
|
my $vf = vinfo_filter_statement($_, "exclude");
|
|
unless($vf) {
|
|
ERROR "Bad argument: invalid filter statement: --exclude='$_'";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
push @exclude_vf, $vf;
|
|
}
|
|
foreach(@config_override_cmdline) {
|
|
if(/(.*?)=(.*)/) {
|
|
my $key = $1;
|
|
my $value = $2;
|
|
unless(append_config_option(\%config_override, $key, $value, "OVERRIDE", error_statement => "in option \"--override\"")) {
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
}
|
|
else {
|
|
ERROR "Option \"override\" requires \"<config_option>=<value>\" format";
|
|
ERROR_HELP_MESSAGE;
|
|
exit 2;
|
|
}
|
|
}
|
|
if(defined($lockfile_cmdline)) {
|
|
unless($lockfile = check_file($lockfile_cmdline, { absolute => 1, relative => 1 },
|
|
error_statement => 'for option --lockfile')) {
|
|
exit 2;
|
|
}
|
|
}
|
|
|
|
|
|
INFO "$VERSION_INFO (" . localtime($start_time) . ")";
|
|
action("startup", status => "v$VERSION", message => $VERSION_INFO, time => $start_time);
|
|
|
|
|
|
#
|
|
# parse config file
|
|
#
|
|
my $config;
|
|
if(my $config_file = _config_file(@config_src)) {
|
|
INFO "Using configuration: $config_file";
|
|
$config = parse_config($config_file);
|
|
exit 2 unless($config);
|
|
}
|
|
elsif($fallback_default_config && !$config_cmdline) {
|
|
INFO "Configuration file not found: " . join(', ', @config_src);
|
|
INFO "Using default configuration";
|
|
$config = init_config();
|
|
}
|
|
else {
|
|
ERROR "Configuration file not found: " . join(', ', @config_src);
|
|
exit 2;
|
|
}
|
|
$safe_commands = config_key($config, 'safe_commands');
|
|
|
|
unless(ref($config->{SUBSECTION}) eq "ARRAY") {
|
|
ERROR "No volumes defined in configuration file";
|
|
exit 2;
|
|
}
|
|
|
|
# input validation (part 2, after config is initialized)
|
|
@subvol_args = map { vinfo($_, $config) } @subvol_args;
|
|
if($subvol_args_init) {
|
|
foreach(@subvol_args) {
|
|
unless(vinfo_init_root($_)) {
|
|
ERROR "Failed to fetch subvolume detail for '$_->{PRINT}'" , @stderr;
|
|
exit 1;
|
|
}
|
|
if(defined($_->{NODE_SUBDIR})) {
|
|
ERROR "Argument is not a subvolume: $_->{PATH}";
|
|
exit 1;
|
|
}
|
|
if(($subvol_args_init =~ /deny_root_subvol/) && $_->{node}{is_root}) {
|
|
ERROR "Subvolume is btrfs root: $_->{PATH}";
|
|
exit 1;
|
|
}
|
|
if(($subvol_args_init =~ /restrict_same_fs/) && (not _is_same_fs_tree($subvol_args[0]->{node}, $_->{node}))) {
|
|
ERROR "Subvolumes are not on the same btrfs filesystem!";
|
|
exit 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
if($action_diff)
|
|
{
|
|
#
|
|
# print snapshot diff (btrfs find-new)
|
|
#
|
|
my $src_vol = $subvol_args[0];
|
|
my $target_vol = $subvol_args[1];
|
|
|
|
# NOTE: in some cases "cgen" differs from "gen", even for read-only snapshots (observed: gen=cgen+1)
|
|
my $lastgen = $src_vol->{node}{gen} + 1;
|
|
|
|
# dump files, sorted and unique
|
|
my $ret = btrfs_subvolume_find_new($target_vol, $lastgen);
|
|
exit 1 unless(ref($ret));
|
|
|
|
INFO "Listing changed files for subvolume: $target_vol->{PRINT} (gen=$target_vol->{node}{gen})";
|
|
INFO "Starting at generation after subvolume: $src_vol->{PRINT} (gen=$src_vol->{node}{gen})";
|
|
INFO "Listing files modified within generation range: [$lastgen..$target_vol->{node}{gen}]";
|
|
DEBUG "Newest file generation (transid marker) was: $ret->{transid_marker}";
|
|
|
|
my $files = $ret->{files};
|
|
my $total_len = 0;
|
|
my @data;
|
|
foreach my $name (sort keys %$files) {
|
|
my $finfo = $files->{$name};
|
|
$total_len += $finfo->{len};
|
|
push @data, {
|
|
flags => ($finfo->{new} ? '+' : '.') .
|
|
($finfo->{flags}->{COMPRESS} ? 'c' : '.') .
|
|
($finfo->{flags}->{INLINE} ? 'i' : '.'),
|
|
count => scalar(keys(%{$finfo->{gen}})),
|
|
size => print_size($finfo->{len}),
|
|
file => $name,
|
|
};
|
|
}
|
|
|
|
my $raw = ($output_format && $output_format eq "raw");
|
|
print_formatted("diff", \@data, paragraph => 1);
|
|
print "Total size: " . print_size($total_len) . "\n" unless($raw);
|
|
exit 0;
|
|
}
|
|
|
|
|
|
if($action_extents)
|
|
{
|
|
#
|
|
# print extents diff (filefrag)
|
|
#
|
|
|
|
# check system requirements
|
|
my $extentmap_fn;
|
|
if($dryrun) {
|
|
$extentmap_fn = sub {
|
|
INFO("Fetching extent information (dryrun) for: $_[0]->{PRINT}");
|
|
return undef;
|
|
};
|
|
}
|
|
elsif(eval_quiet { require IO::AIO; }) {
|
|
# this is slightly faster (multithreaded) than filefrag
|
|
$extentmap_fn=\&aio_extentmap;
|
|
}
|
|
elsif(check_exe("filefrag")) {
|
|
INFO "IO::AIO module not present, falling back to 'filefrag' (slower)";
|
|
$extentmap_fn=\&filefrag_extentmap;
|
|
}
|
|
else {
|
|
ERROR 'Please install either "IO::AIO" perl module or "filefrag" (from e2fsprogs package)';
|
|
exit 1;
|
|
}
|
|
INFO "Extent map caching disabled (consider setting \"cache_dir\" configuration option)" unless(config_key($config, 'cache_dir'));
|
|
|
|
# resolve related subvolumes
|
|
my @resolved_vol;
|
|
if($extents_related) {
|
|
# add all related subvolumes
|
|
foreach my $svol (@subvol_args) {
|
|
my $svol_gen = $svol->{node}{readonly} ? $svol->{node}{cgen} : $svol->{node}{gen};
|
|
my @related = map({ vinfo_resolved_all_mountpoints($_, $svol->{VINFO_MOUNTPOINT}) // () }
|
|
@{_related_nodes($svol->{node})}); # includes $svol
|
|
push @resolved_vol, @related;
|
|
}
|
|
}
|
|
else {
|
|
@resolved_vol = @subvol_args;
|
|
}
|
|
|
|
my @data;
|
|
# print results on ctrl-c
|
|
$SIG{INT} = sub {
|
|
print STDERR "\nERROR: Caught SIGINT, dumping incomplete list:\n";
|
|
print_formatted("extent_diff", \@data);
|
|
exit 1;
|
|
};
|
|
|
|
my $do_diff = ($action_extents eq "diff");
|
|
my $prev_data;
|
|
# sort by gen for r/w subvolumes, cgen on readonly subvolumes, as
|
|
# "gen" is increased on readonly subvolume when snapshotted.
|
|
# crawl descending, but display ascending (unshift):
|
|
foreach my $vol (sort { ($b->{node}{readonly} ? $b->{node}{cgen} : $b->{node}{gen}) <=>
|
|
($a->{node}{readonly} ? $a->{node}{cgen} : $a->{node}{gen}) }
|
|
@resolved_vol) {
|
|
if($prev_data && ($prev_data->{_vinfo}{node}{id} == $vol->{node}{id})) {
|
|
INFO "Skipping duplicate of \"$prev_data->{_vinfo}{PRINT}\": $vol->{PRINT}";
|
|
next;
|
|
}
|
|
|
|
# read extent map
|
|
if($vol->{EXTENTMAP} = read_extentmap_cache($vol)) {
|
|
INFO "Using cached extent map: $vol->{PRINT}";
|
|
} else {
|
|
$vol->{EXTENTMAP} = $extentmap_fn->($vol);
|
|
write_extentmap_cache($vol);
|
|
}
|
|
next unless($vol->{EXTENTMAP});
|
|
|
|
if($do_diff && $prev_data) {
|
|
my $diff_map = extentmap_diff($prev_data->{_vinfo}{EXTENTMAP}, $vol->{EXTENTMAP});
|
|
$prev_data->{diff} = print_size(extentmap_size($diff_map));
|
|
}
|
|
$prev_data = {
|
|
%{$vol->{node}}, # copy node
|
|
total => print_size(extentmap_size($vol->{EXTENTMAP})),
|
|
subvol => $vol->{PRINT},
|
|
_vinfo => $vol,
|
|
};
|
|
unshift @data, $prev_data;
|
|
}
|
|
|
|
my @universe_set = map $_->{_vinfo}{EXTENTMAP}, @data;
|
|
unless(scalar(@universe_set)) {
|
|
ERROR "No extent map data, exiting";
|
|
exit -1;
|
|
}
|
|
|
|
my @summary;
|
|
INFO "Calculating union of " . scalar(@data) . " subvolumes";
|
|
push @summary, {
|
|
a => "Union (" . scalar(@data) . " subvolumes):",
|
|
b => print_size(extentmap_size(extentmap_merge(@universe_set)))
|
|
};
|
|
|
|
INFO "Calculating set-exclusive size for " . scalar(@data) . " subvolumes";
|
|
foreach my $d (@data) {
|
|
my $vol = $d->{_vinfo};
|
|
DEBUG "Calculating exclusive for: $vol->{PRINT}";
|
|
my @others = grep { $_ != $vol->{EXTENTMAP} } @universe_set;
|
|
$d->{exclusive} = print_size(extentmap_size(extentmap_diff($vol->{EXTENTMAP}, extentmap_merge(@others)))),
|
|
}
|
|
|
|
if(scalar(@filter_vf)) {
|
|
INFO "Calculating set difference (X \\ A)";
|
|
my @excl;
|
|
my @others;
|
|
foreach(@data) {
|
|
if(vinfo_match(\@filter_vf, $_->{_vinfo})) {
|
|
$_->{set} = "X";
|
|
push @excl, $_->{_vinfo}{EXTENTMAP};
|
|
} else {
|
|
$_->{set} = "A";
|
|
push @others, $_->{_vinfo}{EXTENTMAP};
|
|
}
|
|
}
|
|
push @summary, {
|
|
a => "Exclusive data ( X \\ A ):",
|
|
b => print_size(extentmap_size(extentmap_diff(extentmap_merge(@excl), extentmap_merge(@others)))),
|
|
};
|
|
}
|
|
unless($do_diff) {
|
|
@data = sort { $a->{subvol} cmp $b->{subvol} } @data;
|
|
}
|
|
|
|
INFO "Printing extents map set difference: (extents \\ extents-on-prev-line)" if $do_diff;
|
|
print_formatted("extent_diff", \@data, paragraph => 1);
|
|
print_formatted({ table => [ qw( a b ) ], RALIGN => { b=>1 } },
|
|
\@summary, output_format => "table", no_header => 1);
|
|
exit 0;
|
|
}
|
|
|
|
|
|
if($action_ls)
|
|
{
|
|
#
|
|
# print accessible subvolumes for local path
|
|
#
|
|
my $exit_status = 0;
|
|
my %data_uniq;
|
|
foreach my $root_vol (@subvol_args) {
|
|
my ($root_path, $mountpoint) = vinfo_mountpoint($root_vol);
|
|
unless($mountpoint) {
|
|
ERROR "Failed to read filesystem details for: $root_vol->{PRINT}", @stderr;
|
|
$exit_status = 1;
|
|
next;
|
|
}
|
|
$root_vol = vinfo($root_vol->{URL_PREFIX} . $root_path, $config);
|
|
INFO "Listing subvolumes for directory: $root_vol->{PRINT}";
|
|
|
|
my @search = ( $mountpoint );
|
|
while(my $mnt = shift @search) {
|
|
unshift @search, @{$mnt->{SUBTREE}} if($mnt->{SUBTREE});
|
|
next if($mnt->{fs_type} ne "btrfs");
|
|
|
|
my $vol = vinfo($root_vol->{URL_PREFIX} . $mnt->{mount_point}, $config);
|
|
unless(vinfo_init_root($vol)) {
|
|
ERROR "Failed to fetch subvolume detail for: $vol->{PRINT}", @stderr;
|
|
$exit_status = 1;
|
|
next;
|
|
}
|
|
|
|
my $subvol_list = vinfo_subvol_list($vol);
|
|
my $count_added = 0;
|
|
foreach my $svol ($vol, @$subvol_list) {
|
|
my $svol_path = $svol->{PATH};
|
|
$svol_path =~ s/^\/\//\//; # sanitize "//" (see vinfo_child)
|
|
|
|
next unless($root_path eq "/" || $svol_path =~ /^\Q$root_path\E(\/|\z)/);
|
|
if(_find_mountpoint($mnt, $svol_path) ne $mnt) {
|
|
DEBUG "Subvolume is hidden by another mount point: $svol->{PRINT}";
|
|
next;
|
|
}
|
|
|
|
$data_uniq{$svol->{PRINT}} = {
|
|
%{$svol->{node}}, # copy node
|
|
top => $svol->{node}{top_level}, # alias (narrow column)
|
|
mount_point => $svol->{VINFO_MOUNTPOINT}{PATH},
|
|
mount_source => $svol->{node}{TREE_ROOT}{mount_source},
|
|
mount_subvolid => $mnt->{MNTOPS}{subvolid},
|
|
mount_subvol => $mnt->{MNTOPS}{subvol},
|
|
subvolume_path => $svol->{node}{path},
|
|
subvolume_rel_path => $svol->{node}{REL_PATH},
|
|
url => $svol->{URL},
|
|
host => $svol->{HOST},
|
|
path => $svol_path,
|
|
flags => ($svol->{node}{readonly} ? "readonly" : undef),
|
|
};
|
|
$count_added++;
|
|
}
|
|
DEBUG "Listing $count_added/" . (scalar(@$subvol_list) + 1) . " subvolumes for btrfs mount: $vol->{PRINT}";
|
|
}
|
|
}
|
|
|
|
my @sorted = sort { (($a->{host} // "") cmp ($b->{host} // "")) ||
|
|
($a->{mount_point} cmp $b->{mount_point}) ||
|
|
($a->{path} cmp $b->{path})
|
|
} values %data_uniq;
|
|
$output_format ||= "short";
|
|
print_formatted("fs_list", \@sorted, no_header => !scalar(@sorted));
|
|
|
|
exit $exit_status;
|
|
}
|
|
|
|
|
|
#
|
|
# try exclusive lock if set in config or command-line option
|
|
#
|
|
$lockfile //= config_key($config, "lockfile");
|
|
if(defined($lockfile) && (not $dryrun)) {
|
|
unless(open(LOCKFILE, '>>', $lockfile)) {
|
|
# NOTE: the lockfile is never deleted by design
|
|
ERROR "Failed to open lock file '$lockfile': $!";
|
|
exit 3;
|
|
}
|
|
unless(flock(LOCKFILE, 6)) { # exclusive, non-blocking (LOCK_EX | LOCK_NB)
|
|
ERROR "Failed to take lock (another btrbk instance is running): $lockfile";
|
|
exit 3;
|
|
}
|
|
}
|
|
|
|
|
|
if($action_archive)
|
|
{
|
|
#
|
|
# archive (clone) tree
|
|
#
|
|
# NOTE: This is intended to work without a config file! The only
|
|
# thing used from the configuration is the SSH and transaction log
|
|
# stuff.
|
|
#
|
|
# FIXME: add command line options for preserve logic
|
|
|
|
my $sroot = $subvol_args[0] || die;
|
|
my $droot = $subvol_args[1] || die;
|
|
|
|
unless(vinfo_init_root($sroot)) {
|
|
ERROR "Failed to fetch subvolume detail for '$sroot->{PRINT}'", @stderr;
|
|
exit 1;
|
|
}
|
|
unless($raw_cmdline ? vinfo_init_raw_root($droot) : vinfo_init_root($droot)) {
|
|
ERROR "Failed to fetch " . ($raw_cmdline ? "raw target metadata" : "subvolume detail") . " for '$droot->{PRINT}'", @stderr;
|
|
exit 1;
|
|
}
|
|
|
|
$config->{SUBSECTION} = []; # clear configured subsections, we build them dynamically
|
|
|
|
my $cur = $config;
|
|
my %name_uniq;
|
|
foreach my $vol (sort { ($a->{subtree_depth} <=> $b->{subtree_depth}) ||
|
|
($a->{SUBVOL_DIR} cmp $b->{SUBVOL_DIR})
|
|
} @{vinfo_subvol_list($sroot)})
|
|
{
|
|
next unless($vol->{node}{readonly});
|
|
my $snapshot_name = $vol->{node}{BTRBK_BASENAME};
|
|
unless(defined($snapshot_name) && ($vol->{node}{BTRBK_FAMILY} eq "btrbk")) {
|
|
WARN "Skipping subvolume (not a btrbk subvolume): $vol->{PRINT}";
|
|
next;
|
|
}
|
|
my $subdir = $vol->{SUBVOL_DIR} ? "/" . $vol->{SUBVOL_DIR} : "";
|
|
next if($name_uniq{"$subdir/$snapshot_name"});
|
|
$name_uniq{"$subdir/$snapshot_name"} = 1;
|
|
$cur = parse_config_line($cur, $_->[0], $_->[1]) // die for(
|
|
[ subvolume => $sroot->{URL_PREFIX} . "/dev/null" ],
|
|
[ snapshot_dir => $sroot->{PATH} . $subdir ],
|
|
[ snapshot_name => $snapshot_name ],
|
|
[ target => ($raw_cmdline ? "raw" : "send-receive") . " '" . $droot->{URL} . $subdir . "'" ],
|
|
# [ target_create_dir => "yes" ], # not user-settable yet, see below
|
|
);
|
|
$cur->{target_create_dir} = "yes";
|
|
|
|
if($dryrun && !vinfo_realpath(my $dr = vinfo_child($droot, $subdir))) {
|
|
# vinfo_mkdir below does not know about $droot, and thus cannot fake realpath_cache correctly.
|
|
# hackily set cache here for now, while keeping target_create_dir disabled for the user.
|
|
# TODO: implement pinned target root subvolume, and enable target_create_dir as regular option.
|
|
system_mkdir($dr); # required for correct transaction log
|
|
$realpath_cache{$droot->{URL} . $subdir} = $droot->{PATH} . $subdir;
|
|
}
|
|
}
|
|
_config_propagate_target($config);
|
|
|
|
# translate archive_exclude globs, add to exclude args
|
|
my $archive_exclude = config_key($config, 'archive_exclude') // [];
|
|
push @exclude_vf, map(vinfo_filter_statement($_, "archive_exclude"), (@$archive_exclude));
|
|
}
|
|
|
|
|
|
if($action_cp)
|
|
{
|
|
#
|
|
# copy any subvolume, recursively
|
|
#
|
|
# Similar to action "archive", but does not apply scheduling and
|
|
# thus can operate on non-btrbk snapshots.
|
|
|
|
my $droot = pop @subvol_args;
|
|
$droot->{CONFIG} = {
|
|
CONTEXT => "target",
|
|
PARENT => $config,
|
|
target_type => $raw_cmdline ? "raw" : "send-receive",
|
|
url => $droot->{URL},
|
|
};
|
|
|
|
my $exit_status = 0;
|
|
my @subvol_src;
|
|
foreach my $svol (@subvol_args) {
|
|
unless(vinfo_init_root($svol)) {
|
|
ERROR "Failed to fetch subvolume detail for '$svol->{PRINT}'", @stderr;
|
|
$exit_status = 1;
|
|
next;
|
|
}
|
|
if($recursive) {
|
|
push @subvol_src, sort {
|
|
($a->{subtree_depth} <=> $b->{subtree_depth}) ||
|
|
($a->{SUBVOL_DIR} cmp $b->{SUBVOL_DIR})
|
|
} @{vinfo_subvol_list($svol)};
|
|
} else {
|
|
unless($svol->{node}{readonly}) {
|
|
ERROR "Subvolume is not read-only: $svol->{PRINT}";
|
|
$exit_status = 1;
|
|
}
|
|
push @subvol_src, $svol;
|
|
}
|
|
}
|
|
exit $exit_status if($exit_status);
|
|
|
|
my %svol_uniq;
|
|
foreach my $svol (@subvol_src) {
|
|
next if($svol_uniq{$svol->{URL}});
|
|
$svol_uniq{$svol->{URL}} = 1;
|
|
|
|
unless($svol->{node}{readonly}) {
|
|
WARN "Subvolume is not read-only, skipping: $svol->{PRINT}";
|
|
$exit_status = 10;
|
|
}
|
|
|
|
my $dvol = $svol->{SUBVOL_DIR} ? vinfo($droot->{URL} . "/" . $svol->{SUBVOL_DIR}, $droot->{CONFIG}) : $droot;
|
|
unless(my $ret = vinfo_mkdir($dvol)) {
|
|
ERROR "Failed to create directory: $dvol->{PRINT}", @stderr unless(defined($ret));
|
|
$exit_status = 10;
|
|
next;
|
|
}
|
|
|
|
unless($raw_cmdline ? vinfo_init_raw_root($dvol) : vinfo_init_root($dvol)) {
|
|
ERROR "Failed to fetch " . ($raw_cmdline ? "raw target metadata" : "subvolume detail") . " for '$dvol->{PRINT}'", @stderr;
|
|
exit 1;
|
|
}
|
|
|
|
if(my @rt = get_receive_targets($dvol, $svol, exact => 1, warn => 1)) {
|
|
WARN "Correlated target subvolume already exists, skipping: $svol->{PRINT}", map($_->{PRINT}, @rt);
|
|
next;
|
|
}
|
|
|
|
if(my $err_vol = vinfo_subvol($dvol, $svol->{NAME})) {
|
|
WARN "Target subvolume \"$err_vol->{PRINT}\" exists, but is not a receive target of \"$svol->{PRINT}\"";
|
|
WARN "Skipping subvolume copy: $svol->{PRINT}";
|
|
$exit_status = 10;
|
|
next;
|
|
}
|
|
my ($clone_src, $target_parent_node);
|
|
my $parent = get_best_parent(
|
|
$svol, $dvol,
|
|
clone_src => \$clone_src,
|
|
target_parent_node => \$target_parent_node,
|
|
);
|
|
unless(macro_send_receive(
|
|
source => $svol,
|
|
target => $dvol,
|
|
parent => $parent,
|
|
clone_src => $clone_src,
|
|
target_parent_node => $target_parent_node,
|
|
)) {
|
|
$exit_status = 10;
|
|
}
|
|
}
|
|
|
|
exit $exit_status;
|
|
}
|
|
|
|
|
|
#
|
|
# expand subvolume globs (wildcards)
|
|
#
|
|
foreach my $config_vol (config_subsection($config, "volume")) {
|
|
# read-in subvolume list (and expand globs) only if needed
|
|
next unless(grep defined($_->{GLOB_CONTEXT}), @{$config_vol->{SUBSECTION}});
|
|
my @vol_subsection_expanded;
|
|
foreach my $config_subvol (config_subsection($config_vol, "subvolume")) {
|
|
if($config_subvol->{GLOB_CONTEXT}) {
|
|
my ($url_prefix, $globs) = check_url($config_subvol->{url}, accept_wildcards => 1);
|
|
$globs =~ s/([^\*]*)\///;
|
|
my $sroot_glob = vinfo($url_prefix . $1, $config_subvol);
|
|
|
|
INFO "Expanding wildcards: $sroot_glob->{PRINT}/$globs";
|
|
unless(vinfo_init_root($sroot_glob)) {
|
|
WARN "Failed to fetch subvolume detail for: $sroot_glob->{PRINT}", @stderr;
|
|
WARN "No subvolumes found matching: $sroot_glob->{PRINT}/$globs";
|
|
next;
|
|
}
|
|
|
|
# support "*some*file*", "*/*"
|
|
my $match = join('[^\/]*', map(quotemeta($_), split(/\*+/, $globs, -1)));
|
|
TRACE "translated globs \"$globs\" to regex \"$match\"" if($do_trace);
|
|
my $expand_count = 0;
|
|
foreach my $vol (@{vinfo_subvol_list($sroot_glob, sort => 'path')})
|
|
{
|
|
if($vol->{node}{readonly}) {
|
|
TRACE "skipping readonly subvolume: $vol->{PRINT}" if($do_trace);
|
|
next;
|
|
}
|
|
unless($vol->{SUBVOL_PATH} =~ /^$match$/) {
|
|
TRACE "skipping non-matching subvolume: $vol->{PRINT}" if($do_trace);
|
|
next;
|
|
}
|
|
unless(defined(check_file($vol->{SUBVOL_PATH}, { relative => 1 }))) {
|
|
WARN "Ambiguous subvolume path \"$vol->{SUBVOL_PATH}\" while expanding \"$globs\", ignoring";
|
|
next;
|
|
}
|
|
INFO "Found source subvolume: $vol->{PRINT}";
|
|
my %conf = ( %$config_subvol,
|
|
url_glob => $config_subvol->{url},
|
|
url => $vol->{URL},
|
|
snapshot_name => $vol->{NAME}, # snapshot_name defaults to subvolume name
|
|
);
|
|
# deep copy of target subsection
|
|
my @subsection_copy = map { { %$_, PARENT => \%conf }; } @{$config_subvol->{SUBSECTION}};
|
|
$conf{SUBSECTION} = \@subsection_copy;
|
|
push @vol_subsection_expanded, \%conf;
|
|
$expand_count += 1;
|
|
}
|
|
unless($expand_count) {
|
|
WARN "No subvolumes found matching: $sroot_glob->{PRINT}/$globs";
|
|
}
|
|
}
|
|
else {
|
|
push @vol_subsection_expanded, $config_subvol;
|
|
}
|
|
}
|
|
$config_vol->{SUBSECTION} = \@vol_subsection_expanded;
|
|
}
|
|
TRACE(Data::Dumper->Dump([$config], ["config"])) if($do_trace && $do_dumper);
|
|
|
|
|
|
#
|
|
# create vinfo nodes (no readin yet)
|
|
#
|
|
foreach my $config_vol (config_subsection($config, "volume")) {
|
|
my $sroot = $config_vol->{DUMMY} ? { CONFIG => $config_vol, PRINT => "*default*" } : vinfo($config_vol->{url}, $config_vol);
|
|
vinfo_assign_config($sroot);
|
|
foreach my $config_subvol (config_subsection($config_vol, "subvolume")) {
|
|
my $svol = vinfo($config_subvol->{url}, $config_subvol);
|
|
my $snapshot_dir = config_key($svol, "snapshot_dir");
|
|
my $url;
|
|
if(!defined($snapshot_dir)) {
|
|
if($config_vol->{DUMMY}) {
|
|
ABORTED($svol, "No snapshot_dir defined for subvolume");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
} else {
|
|
$url = $sroot->{URL};
|
|
}
|
|
} elsif($snapshot_dir =~ /^\//) {
|
|
$url = $svol->{URL_PREFIX} . $snapshot_dir;
|
|
} else {
|
|
if($config_vol->{DUMMY}) {
|
|
ABORTED($svol, "Relative snapshot_dir path defined, but no volume context present");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
} else {
|
|
$url = $sroot->{URL} . '/' . $snapshot_dir;
|
|
}
|
|
}
|
|
$url //= "/dev/null"; # snaproot cannot be undef, even if ABORTED
|
|
my $snaproot = vinfo($url, $config_subvol);
|
|
vinfo_assign_config($svol, $snaproot);
|
|
foreach my $config_target (@{$config_subvol->{SUBSECTION}}) {
|
|
die unless($config_target->{CONTEXT} eq "target");
|
|
my $droot = vinfo($config_target->{url}, $config_target);
|
|
vinfo_assign_config($droot);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
#
|
|
# filter subvolumes matching command line arguments, handle noauto option
|
|
#
|
|
if(scalar @filter_vf)
|
|
{
|
|
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
|
|
my $found_vol = 0;
|
|
if(vinfo_match(\@filter_vf, $sroot, flag_matched => '_matched')) {
|
|
next;
|
|
}
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
|
|
my $found_subvol = 0;
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
if(vinfo_match(\@filter_vf, $svol, flag_matched => '_matched') ||
|
|
vinfo_match(\@filter_vf, vinfo_child($snaproot, $snapshot_name), flag_matched => '_matched'))
|
|
{
|
|
$found_vol = 1;
|
|
next;
|
|
}
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
if(vinfo_match(\@filter_vf, $droot, flag_matched => '_matched') ||
|
|
vinfo_match(\@filter_vf, vinfo_child($droot, $snapshot_name), flag_matched => '_matched'))
|
|
{
|
|
$found_subvol = 1;
|
|
$found_vol = 1;
|
|
}
|
|
else {
|
|
ABORTED($droot, "skip_cmdline_filter", "No match on filter command line argument");
|
|
DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
|
|
}
|
|
}
|
|
unless($found_subvol) {
|
|
ABORTED($svol, "skip_cmdline_filter", "No match on filter command line argument");
|
|
DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
}
|
|
}
|
|
unless($found_vol) {
|
|
ABORTED($sroot, "skip_cmdline_filter", "No match on filter command line argument");
|
|
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
|
|
}
|
|
}
|
|
# make sure all args have a match
|
|
my @nomatch = map { $_->{_matched} ? () : $_->{unparsed} } @filter_vf;
|
|
if(@nomatch) {
|
|
foreach(@nomatch) {
|
|
ERROR "Filter argument \"$_\" does not match any volume, subvolume, target or group declaration";
|
|
}
|
|
exit 2;
|
|
}
|
|
$config->{CMDLINE_FILTER_LIST} = [ map { $_->{unparsed} } @filter_vf ];
|
|
}
|
|
elsif(not $action_config_print)
|
|
{
|
|
# no filter_args present, abort "noauto" contexts
|
|
if(config_key($config, "noauto")) {
|
|
WARN "Option \"noauto\" is set in global context, and no filter argument present, exiting";
|
|
exit 0;
|
|
}
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
if(config_key($sroot, "noauto")) {
|
|
ABORTED($sroot, "skip_noauto", 'option "noauto" is set');
|
|
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
|
|
next;
|
|
}
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
if(config_key($svol, "noauto")) {
|
|
ABORTED($svol, "skip_noauto", 'option "noauto" is set');
|
|
DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
next;
|
|
}
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
if(config_key($droot, "noauto")) {
|
|
ABORTED($droot, "skip_noauto", 'option "noauto" is set');
|
|
DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
if(scalar @exclude_vf)
|
|
{
|
|
# handle --exclude command line option
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
if(my $ff = vinfo_match(\@exclude_vf, $sroot)) {
|
|
|
|
ABORTED($sroot, "skip_" . $ff->{origin}, "Match on $ff->{reason}");
|
|
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
|
|
next;
|
|
}
|
|
my $all_svol_aborted = 1;
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
if(my $ff = (vinfo_match(\@exclude_vf, $svol) ||
|
|
vinfo_match(\@exclude_vf, vinfo_child($snaproot, $snapshot_name))))
|
|
{
|
|
ABORTED($svol, "skip_" . $ff->{origin}, "Match on $ff->{reason}");
|
|
DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
next;
|
|
}
|
|
$all_svol_aborted = 0;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
if(my $ff = (vinfo_match(\@exclude_vf, $droot) ||
|
|
vinfo_match(\@exclude_vf, vinfo_child($droot, $snapshot_name))))
|
|
{
|
|
ABORTED($droot, "skip_" . $ff->{origin}, "Match on $ff->{reason}");
|
|
DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
|
|
next;
|
|
}
|
|
}
|
|
}
|
|
if($all_svol_aborted) {
|
|
ABORTED($sroot, "skip_inherit_exclude", "All subvolumes excluded");
|
|
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
|
|
}
|
|
}
|
|
}
|
|
|
|
if($action_usage)
|
|
{
|
|
#
|
|
# print filesystem information
|
|
#
|
|
my @data;
|
|
my %usage_cache;
|
|
my %processed;
|
|
my $exit_status = 0;
|
|
my $push_data = sub {
|
|
my ($vol, $type) = @_;
|
|
return if $processed{$vol->{URL}};
|
|
my $mountpoint = vinfo_mountpoint($vol, fs_type => 'btrfs');
|
|
unless($mountpoint) {
|
|
$exit_status = 1;
|
|
return unless($mountpoint) ;
|
|
}
|
|
my $mount_source = $mountpoint->{mount_source};
|
|
my $mid = $vol->{MACHINE_ID} . $mount_source;
|
|
my $usage = btrfs_filesystem_usage(vinfo($vol->{URL_PREFIX} . $mountpoint->{mount_point}, $vol->{CONFIG}));
|
|
$exit_status = 1 unless defined($usage);
|
|
$usage_cache{$mid} //= $usage // {};
|
|
push @data, { %{$usage_cache{$mid}},
|
|
type => $type,
|
|
mount_source => $mount_source,
|
|
vinfo_prefixed_keys("", $vol),
|
|
};
|
|
$processed{$vol->{URL}} = 1;
|
|
};
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
$push_data->($svol, "source");
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
$push_data->($droot, "target");
|
|
}
|
|
}
|
|
}
|
|
@data = sort { $a->{url} cmp $b->{url} } @data;
|
|
print_formatted("usage", \@data);
|
|
exit $exit_status;
|
|
}
|
|
|
|
|
|
if($action_config_print)
|
|
{
|
|
#
|
|
# print configuration lines, machine readable
|
|
#
|
|
my %opts = (all => ($action_config_print eq "print-all"));
|
|
my @out;
|
|
push @out, config_dump_keys($config, %opts);
|
|
my $indent = "";
|
|
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
|
|
unless($sroot->{CONFIG}{DUMMY}) {
|
|
push @out, "";
|
|
push @out, "volume $sroot->{URL}";
|
|
$indent .= "\t";
|
|
push @out, config_dump_keys($sroot, prefix => $indent, %opts);
|
|
}
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
|
|
push @out, "";
|
|
push @out, "${indent}# subvolume $svol->{CONFIG}->{url_glob}" if(defined($svol->{CONFIG}->{url_glob}));
|
|
push @out, "${indent}subvolume $svol->{URL}";
|
|
$indent .= "\t";
|
|
push @out, config_dump_keys($svol, prefix => $indent, %opts);
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
push @out, "";
|
|
push @out, "${indent}target $droot->{CONFIG}->{target_type} $droot->{URL}";
|
|
push @out, config_dump_keys($droot, prefix => "\t$indent", %opts);
|
|
}
|
|
$indent =~ s/\t//;
|
|
}
|
|
$indent = "";
|
|
}
|
|
|
|
print_header(title => "Configuration Dump",
|
|
config => $config,
|
|
time => $start_time,
|
|
);
|
|
|
|
print join("\n", @out) . "\n";
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
if($action_list)
|
|
{
|
|
my @vol_data;
|
|
my @subvol_data;
|
|
my @target_data;
|
|
my @mixed_data;
|
|
my %target_uniq;
|
|
|
|
#
|
|
# print configuration lines, machine readable
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
my $volh = { vinfo_prefixed_keys("volume", $sroot) };
|
|
push @vol_data, $volh;
|
|
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $subvolh = { %$volh,
|
|
vinfo_prefixed_keys("source", $svol),
|
|
snapshot_path => $snaproot->{PATH},
|
|
snapshot_name => config_key($svol, "snapshot_name"),
|
|
snapshot_preserve => format_preserve_matrix(config_preserve_hash($svol, "snapshot")),
|
|
};
|
|
push @subvol_data, $subvolh;
|
|
|
|
my $found = 0;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my $targeth = { %$subvolh,
|
|
vinfo_prefixed_keys("target", $droot),
|
|
target_preserve => format_preserve_matrix(config_preserve_hash($droot, "target")),
|
|
target_type => $droot->{CONFIG}{target_type}, # "send-receive" or "raw"
|
|
};
|
|
if($action_list eq "target") {
|
|
next if($target_uniq{$droot->{URL}});
|
|
$target_uniq{$droot->{URL}} = 1;
|
|
}
|
|
push @target_data, $targeth;
|
|
push @mixed_data, $targeth;
|
|
$found = 1;
|
|
}
|
|
# make sure the subvol is always printed (even if no targets around)
|
|
push @mixed_data, $subvolh unless($found);
|
|
}
|
|
}
|
|
if($action_list eq "volume") {
|
|
print_formatted("config_volume", \@vol_data);
|
|
}
|
|
elsif($action_list eq "source") {
|
|
print_formatted("config_source", \@subvol_data);
|
|
}
|
|
elsif($action_list eq "target") {
|
|
print_formatted("config_target", \@target_data);
|
|
}
|
|
elsif($action_list eq "config") {
|
|
print_formatted("config", \@mixed_data);
|
|
}
|
|
else {
|
|
die "unknown action_list=$action_list";
|
|
}
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
#
|
|
# fill vinfo hash, basic checks on configuration
|
|
#
|
|
|
|
# read volume btrfs tree, and make sure subvolume exist
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
DEBUG "Initializing volume section: $sroot->{PRINT}";
|
|
unless(scalar(vinfo_subsection($sroot, 'subvolume', 1))) {
|
|
WARN "No subvolume configured for \"volume $sroot->{URL}\"";
|
|
}
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
DEBUG "Initializing snapshot root: $snaproot->{PRINT}";
|
|
unless(vinfo_init_root($snaproot)) {
|
|
ABORTED($svol, "Failed to fetch subvolume detail for snapshot_dir");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol), @stderr;
|
|
next;
|
|
}
|
|
next if($skip_snapshots || $action_archive);
|
|
|
|
DEBUG "Initializing subvolume section: $svol->{PRINT}";
|
|
unless(vinfo_init_root($svol)) {
|
|
ABORTED($svol, "Failed to fetch subvolume detail");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol), @stderr;
|
|
next;
|
|
}
|
|
if((not $svol->{node}{uuid}) || ($svol->{node}{uuid} eq '-')) {
|
|
ABORTED($svol, "subvolume has no UUID");
|
|
ERROR "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
next;
|
|
}
|
|
if($svol->{node}{readonly}) {
|
|
ABORTED($svol, "subvolume is readonly");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
next;
|
|
}
|
|
if($svol->{node}{received_uuid} ne '-') {
|
|
ABORTED($svol, "\"Received UUID\" is set");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
next;
|
|
}
|
|
unless(_is_same_fs_tree($snaproot->{node}, $svol->{node})) {
|
|
ABORTED($svol, "Snapshot path is not on same filesystem");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
|
|
next;
|
|
}
|
|
}
|
|
}
|
|
|
|
# read target btrfs tree, create target directories
|
|
if($action_run && $skip_backups && $preserve_snapshots && $preserve_backups) {
|
|
# if running "btrbk snapshot --preserve", there is no need to
|
|
# initialize targets, and we don't want to fail on missing targets.
|
|
DEBUG "Skipping target tree readin (preserving all snapshots and backups)";
|
|
}
|
|
else {
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
DEBUG "Initializing target section: $droot->{PRINT}";
|
|
my $target_type = $droot->{CONFIG}->{target_type} || die;
|
|
|
|
if($config_override{FAILSAFE_PRESERVE}) {
|
|
ABORTED($droot, $config_override{FAILSAFE_PRESERVE});
|
|
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
|
|
next;
|
|
}
|
|
|
|
if(config_key($droot, "target_create_dir")) {
|
|
unless(my $ret = vinfo_mkdir($droot)) {
|
|
ABORTED($droot, "Failed to create directory: $droot->{PRINT}/");
|
|
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
|
|
next;
|
|
}
|
|
}
|
|
|
|
if($target_type eq "send-receive")
|
|
{
|
|
unless(vinfo_init_root($droot)) {
|
|
ABORTED($droot, "Failed to fetch subvolume detail");
|
|
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
|
|
next;
|
|
}
|
|
}
|
|
elsif($target_type eq "raw")
|
|
{
|
|
unless(vinfo_init_raw_root($droot)) {
|
|
ABORTED($droot, "Failed to fetch raw target metadata");
|
|
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot), @stderr;
|
|
next;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# check for duplicate snapshot locations
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
|
|
# check for duplicate snapshot locations
|
|
if(config_key($svol, "snapshot_create")) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snaproot_subdir_path = (defined($snaproot->{NODE_SUBDIR}) ? $snaproot->{NODE_SUBDIR} . '/' : "") . $snapshot_basename;
|
|
if(my $prev = $snaproot->{node}->{_SNAPSHOT_CHECK}->{$snaproot_subdir_path}) {
|
|
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same snapshot: $snaproot->{PRINT}/${snapshot_basename}.*";
|
|
ERROR "Please fix \"snapshot_name\" configuration options!";
|
|
exit 1;
|
|
}
|
|
$snaproot->{node}->{_SNAPSHOT_CHECK}->{$snaproot_subdir_path} = $svol->{PRINT};
|
|
}
|
|
|
|
# check for duplicate target locations
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my $droot_subdir_path = (defined($droot->{NODE_SUBDIR}) ? $droot->{NODE_SUBDIR} . '/' : "") . $snapshot_basename;
|
|
if(my $prev = $droot->{node}->{_BACKUP_CHECK}->{$droot_subdir_path}) {
|
|
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same backup target: $droot->{PRINT}/${snapshot_basename}.*";
|
|
ERROR "Please fix \"snapshot_name\" or \"target\" configuration options!";
|
|
exit 1;
|
|
}
|
|
$droot->{node}->{_BACKUP_CHECK}->{$droot_subdir_path} = $svol->{PRINT};
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
if($action_origin)
|
|
{
|
|
#
|
|
# print origin information
|
|
#
|
|
my $vol = $subvol_args[0] || die;
|
|
my $lines = [];
|
|
_origin_tree("", $vol->{node}, $lines);
|
|
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom") {
|
|
print_header(title => "Origin Tree",
|
|
config => $config,
|
|
time => $start_time,
|
|
legend => [
|
|
"^-- : parent subvolume",
|
|
"newline : received-from relationship with subvolume (identical content)",
|
|
]
|
|
);
|
|
print join("\n", map { $_->{tree} } @$lines) . "\n";
|
|
}
|
|
else {
|
|
print_formatted('origin_tree', $lines );
|
|
}
|
|
exit 0;
|
|
}
|
|
|
|
|
|
if($action_resolve)
|
|
{
|
|
my @data;
|
|
my %stats = ( snapshots => 0, backups => 0, correlated => 0, incomplete => 0, orphaned => 0 );
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
my $snapshots = get_btrbk_snapshot_siblings($snaproot, name => $snapshot_name, readonly => 1, sort => 1);
|
|
my %svol_data = (
|
|
vinfo_prefixed_keys("source", $svol),
|
|
snapshot_name => $snapshot_name,
|
|
);
|
|
my @sdata = map +{
|
|
%svol_data,
|
|
type => "snapshot",
|
|
status => ($_->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : "",
|
|
vinfo_prefixed_keys("snapshot", $_),
|
|
_vinfo => $_,
|
|
_btrbk_date => $_->{node}{BTRBK_DATE},
|
|
}, @$snapshots;
|
|
|
|
my %svol_stats_data = (
|
|
%svol_data,
|
|
snapshot_subvolume => "$snaproot->{PATH}/$snapshot_name.*",
|
|
snapshot_status => (grep { $_->{status} eq "up-to-date" } @sdata) ? "up-to-date" : "",
|
|
snapshots => scalar(@sdata),
|
|
);
|
|
$stats{snapshots} += scalar(@sdata);
|
|
|
|
my (@bdata, @ldata, @stdata);
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my %dstats = ( backups => 0, correlated => 0, orphaned => 0, incomplete => 0, uptodate => 0 );
|
|
my $latest_backup;
|
|
foreach my $target_vol (@{get_btrbk_snapshot_siblings($droot, name => $snapshot_name, sort => 1)}) {
|
|
my $target_data = {
|
|
%svol_data,
|
|
type => "backup",
|
|
target_type => $target_vol->{CONFIG}{target_type}, # "send-receive" or "raw"
|
|
vinfo_prefixed_keys("target", $target_vol),
|
|
_btrbk_date => $target_vol->{node}{BTRBK_DATE},
|
|
};
|
|
|
|
# incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
|
|
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
|
|
if($target_vol->{node}{received_uuid} eq '-') {
|
|
$dstats{incomplete}++;
|
|
$target_data->{status} = "incomplete";
|
|
push @bdata, $target_data;
|
|
next;
|
|
}
|
|
|
|
foreach (@sdata) {
|
|
if(_is_correlated($_->{_vinfo}{node}, $target_vol->{node})) {
|
|
$target_data = {
|
|
%$_,
|
|
%$target_data,
|
|
type => "snapshot,backup",
|
|
_correlated => 1,
|
|
};
|
|
$_->{_correlated} = 1;
|
|
last;
|
|
}
|
|
}
|
|
push @bdata, $target_data;
|
|
$latest_backup = $target_data if(!defined($latest_backup) || (cmp_date($latest_backup->{_btrbk_date}, $target_data->{_btrbk_date}) < 0));
|
|
|
|
$dstats{uptodate} ||= ($target_data->{status} // "") eq "up-to-date";
|
|
$dstats{backups}++;
|
|
if($target_data->{_correlated}) { $dstats{correlated}++; } else { $dstats{orphaned}++; }
|
|
}
|
|
push @ldata, $latest_backup;
|
|
|
|
push @stdata, {
|
|
%svol_stats_data,
|
|
%dstats,
|
|
vinfo_prefixed_keys("target", $droot),
|
|
target_subvolume => "$droot->{PATH}/$snapshot_name.*",
|
|
backup_status => $dstats{uptodate} ? "up-to-date" : "",
|
|
};
|
|
$stats{$_} += $dstats{$_} foreach(qw(backups correlated incomplete orphaned));
|
|
}
|
|
|
|
if($action_resolve eq "snapshots") {
|
|
push @data, @sdata;
|
|
} elsif($action_resolve eq "backups") {
|
|
push @data, @bdata;
|
|
} elsif($action_resolve eq "all") {
|
|
push @data, sort { cmp_date($a->{_btrbk_date}, $b->{_btrbk_date}) } (@bdata, grep { !$_->{_correlated} } @sdata);
|
|
} elsif($action_resolve eq "latest") {
|
|
my $latest_snapshot = (sort { cmp_date($b->{_btrbk_date}, $a->{_btrbk_date}) } (@sdata, @bdata))[0];
|
|
push @data, @ldata;
|
|
push @data, $latest_snapshot if($latest_snapshot && !$latest_snapshot->{_correlated});
|
|
} elsif($action_resolve eq "stats") {
|
|
@stdata = ( \%svol_stats_data ) unless(@stdata);
|
|
push @data, @stdata;
|
|
}
|
|
}
|
|
}
|
|
|
|
if($action_resolve eq "stats") {
|
|
my $filter = $config->{CMDLINE_FILTER_LIST} ? " (" . join(", ", @{$config->{CMDLINE_FILTER_LIST}}) . ")" : "";
|
|
my @backup_total = map { $stats{$_} ? "$stats{$_} $_" : () } qw( correlated incomplete );
|
|
my $bflags = @backup_total ? "(" . join(', ', @backup_total) . ")" : undef;
|
|
|
|
print_formatted("stats", \@data, paragraph => 1);
|
|
print "Total${filter}:\n";
|
|
print_formatted({ table => [ qw( a b -c ) ], RALIGN => { a=>1 } },
|
|
[ { a => $stats{snapshots}, b => "snapshots" },
|
|
{ a => $stats{backups}, b => "backups", c => $bflags } ],
|
|
output_format => "table", no_header => 1, empty_cell_char => "");
|
|
}
|
|
elsif($action_resolve eq "snapshots") {
|
|
print_formatted("snapshots", \@data);
|
|
}
|
|
elsif($action_resolve eq "backups") {
|
|
print_formatted("backups", \@data);
|
|
}
|
|
elsif($action_resolve eq "latest") {
|
|
print_formatted("latest", \@data);
|
|
}
|
|
else {
|
|
print_formatted("resolved", \@data);
|
|
}
|
|
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
if($action_clean)
|
|
{
|
|
#
|
|
# identify and delete incomplete backups
|
|
#
|
|
init_transaction_log(config_key($config, "transaction_log"),
|
|
config_key($config, "transaction_syslog"));
|
|
|
|
my @out;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
INFO "Cleaning incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
|
|
push @out, "$droot->{PRINT}/$snapshot_name.*";
|
|
|
|
# incomplete received (garbled) subvolumes are not readonly and have no received_uuid (as of btrfs-progs v4.3.1).
|
|
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
|
|
my @delete = grep $_->{node}{received_uuid} eq '-', @{get_btrbk_snapshot_siblings($droot, name => $snapshot_name, sort => 1)};
|
|
my @delete_success;
|
|
foreach my $target_vol (@delete) {
|
|
DEBUG "Found incomplete target subvolume: $target_vol->{PRINT}";
|
|
if(btrfs_subvolume_delete($target_vol, commit => config_key($droot, "btrfs_commit_delete"), type => "delete_garbled")) {
|
|
push(@delete_success, $target_vol);
|
|
}
|
|
}
|
|
|
|
INFO "Deleted " . scalar(@delete_success) . " incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
|
|
$droot->{SUBVOL_DELETED} //= [];
|
|
push @{$droot->{SUBVOL_DELETED}}, @delete_success;
|
|
push @out, map("--- $_->{PRINT}", @delete_success);
|
|
|
|
if(scalar(@delete_success) != scalar(@delete)) {
|
|
ABORTED($droot, "Failed to delete incomplete target subvolume");
|
|
push @out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED_TEXT($droot);
|
|
}
|
|
push(@out, "<no_action>") unless(scalar(@delete));
|
|
push(@out, "");
|
|
}
|
|
}
|
|
}
|
|
|
|
my $exit_status = exit_status($config);
|
|
my $time_elapsed = time - $start_time;
|
|
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
|
|
action("finished",
|
|
status => $exit_status ? "partial" : "success",
|
|
duration => $time_elapsed,
|
|
message => $exit_status ? "At least one delete operation failed" : undef,
|
|
);
|
|
close_transaction_log();
|
|
|
|
#
|
|
# print summary
|
|
#
|
|
unless($quiet)
|
|
{
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom")
|
|
{
|
|
print_header(title => "Cleanup Summary",
|
|
config => $config,
|
|
time => $start_time,
|
|
legend => [
|
|
"--- deleted subvolume (incomplete backup)",
|
|
],
|
|
);
|
|
print join("\n", @out);
|
|
print_footer($config, $exit_status);
|
|
}
|
|
else
|
|
{
|
|
# print action log (without transaction start messages)
|
|
my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
|
|
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
|
|
}
|
|
}
|
|
|
|
exit $exit_status;
|
|
}
|
|
|
|
|
|
if($action_run || $action_archive)
|
|
{
|
|
init_transaction_log(config_key($config, "transaction_log"),
|
|
config_key($config, "transaction_syslog"));
|
|
|
|
if($skip_snapshots || $action_archive) {
|
|
INFO "Skipping snapshot creation (btrbk resume)" unless($action_archive);
|
|
}
|
|
else
|
|
{
|
|
#
|
|
# create snapshots
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
DEBUG "Evaluating snapshot creation for: $svol->{PRINT}";
|
|
|
|
# check if we need to create a snapshot
|
|
my $snapshot_create = config_key($svol, "snapshot_create");
|
|
if(not $snapshot_create) {
|
|
DEBUG "Snapshot creation disabled (snapshot_create=no)";
|
|
next;
|
|
}
|
|
elsif($snapshot_create eq "always") {
|
|
DEBUG "Snapshot creation enabled (snapshot_create=always)";
|
|
}
|
|
elsif($snapshot_create eq "onchange") {
|
|
# check if latest (btrbk only!) snapshot is up-to-date with source subvolume (by generation)
|
|
my $latest = get_latest_related_snapshot($snaproot, $svol, $snapshot_basename);
|
|
if($latest) {
|
|
if($latest->{node}{cgen} == $svol->{node}{gen}) {
|
|
INFO "Snapshot creation skipped: snapshot_create=onchange, snapshot is up-to-date: $latest->{PRINT}";
|
|
$svol->{SNAPSHOT_UP_TO_DATE} = $latest;
|
|
next;
|
|
}
|
|
DEBUG "Snapshot creation enabled: snapshot_create=onchange, gen=$svol->{node}{gen} > snapshot_cgen=$latest->{node}{cgen}";
|
|
}
|
|
else {
|
|
DEBUG "Snapshot creation enabled: snapshot_create=onchange, no snapshots found";
|
|
}
|
|
}
|
|
elsif($snapshot_create eq "ondemand") {
|
|
# check if at least one target is present
|
|
if(scalar vinfo_subsection($svol, 'target')) {
|
|
DEBUG "Snapshot creation enabled (snapshot_create=ondemand): at least one target is present";
|
|
}
|
|
else {
|
|
INFO "Snapshot creation skipped: snapshot_create=ondemand, and no target is present for: $svol->{PRINT}";
|
|
next;
|
|
}
|
|
}
|
|
else {
|
|
die "illegal value for snapshot_create configuration option: $snapshot_create";
|
|
}
|
|
|
|
# find unique snapshot name
|
|
my $timestamp = timestamp(\@tm_now, config_key($svol, "timestamp_format"));
|
|
my @unconfirmed_target_name;
|
|
my @lookup = map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($snaproot)};
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
if(IS_ABORTED($droot)) {
|
|
push(@unconfirmed_target_name, $droot);
|
|
next;
|
|
}
|
|
push(@lookup, map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($droot)});
|
|
}
|
|
@lookup = grep /^\Q$snapshot_basename.$timestamp\E(_[0-9]+)?$/ ,@lookup;
|
|
TRACE "Present snapshot names for \"$svol->{PRINT}\": " . join(', ', @lookup) if($do_trace);
|
|
@lookup = map { /_([0-9]+)$/ ? $1 : 0 } @lookup;
|
|
@lookup = sort { $b <=> $a } @lookup;
|
|
my $postfix_counter = $lookup[0] // -1;
|
|
$postfix_counter++;
|
|
my $snapshot_name = $snapshot_basename . '.' . $timestamp . ($postfix_counter ? "_$postfix_counter" : "");
|
|
|
|
if(@unconfirmed_target_name) {
|
|
INFO "Assuming non-present subvolume \"$snapshot_name\" in skipped targets: " . join(", ", map { "\"$_->{PRINT}\"" } @unconfirmed_target_name);
|
|
}
|
|
|
|
# finally create the snapshot
|
|
INFO "Creating subvolume snapshot for: $svol->{PRINT}";
|
|
my $snapshot = vinfo_child($snaproot, "$snapshot_name");
|
|
if(btrfs_subvolume_snapshot($svol, $snapshot))
|
|
{
|
|
vinfo_inject_child($snaproot, $snapshot, {
|
|
parent_uuid => $svol->{node}{uuid},
|
|
received_uuid => '-',
|
|
readonly => 1,
|
|
FORCE_PRESERVE => 'preserve forced: created just now',
|
|
});
|
|
$svol->{SNAPSHOT_CREATED} = $snapshot;
|
|
}
|
|
else {
|
|
ABORTED($svol, "Failed to create snapshot: $svol->{PRINT} -> $snapshot->{PRINT}");
|
|
WARN "Skipping subvolume section: " . ABORTED_TEXT($svol);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#
|
|
# create backups
|
|
#
|
|
my $schedule_results = [];
|
|
if($skip_backups) {
|
|
INFO "Skipping backup creation (btrbk snapshot)";
|
|
}
|
|
else {
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
my $snapshots = get_btrbk_snapshot_siblings($snaproot, name => $snapshot_basename, readonly => 1, sort => 1);
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
INFO "Checking for missing backups of \"$snaproot->{PRINT}/${snapshot_basename}.*\" in: $droot->{PRINT}/";
|
|
my @schedule;
|
|
my $resume_total = 0;
|
|
my $resume_success = 0;
|
|
|
|
my $unexpected_only = [];
|
|
foreach my $snapshot (@$snapshots)
|
|
{
|
|
if(get_receive_targets($droot, $snapshot, exact => 1, warn => 1, ret_unexpected_only => $unexpected_only)) {
|
|
DEBUG "Found correlated target of: $snapshot->{PRINT}";
|
|
next;
|
|
}
|
|
if(my $ff = vinfo_match(\@exclude_vf, $snapshot)) {
|
|
INFO "Skipping backup candidate \"$snapshot->{PRINT}\": Match on $ff->{reason}";
|
|
next;
|
|
}
|
|
|
|
DEBUG "Adding backup candidate: $snapshot->{PRINT}";
|
|
push(@schedule, { value => $snapshot,
|
|
btrbk_date => $snapshot->{node}{BTRBK_DATE},
|
|
# not enforcing resuming of latest snapshot anymore (since v0.23.0)
|
|
# preserve => $snapshot->{node}{FORCE_PRESERVE},
|
|
});
|
|
}
|
|
|
|
if(scalar @$unexpected_only && ((config_key($droot, "incremental") // "") eq "strict")) {
|
|
ABORTED($droot, "Receive targets of backup candidates found at a foreign location, and option \"incremental\" is set to \"strict\"");
|
|
WARN "Skipping backups of \"$snaproot->{PRINT}/${snapshot_basename}.*\": " . ABORTED_TEXT($droot),
|
|
"It is not possible to backup (send-receive) those subvolumes without duplicating some data.",
|
|
"Please check your target setup, or fix manually by running" . ($droot->{URL_PREFIX} ? " (on $droot->{URL_PREFIX}):" : ":"),
|
|
"`btrfs subvolume snapshot -r <found> <target>`",
|
|
map { "target: $droot->{PATH}/$_->{src_vol}{NAME}, found: " . _fs_path($_->{target_node}) } @$unexpected_only;
|
|
next;
|
|
}
|
|
|
|
if(scalar @schedule)
|
|
{
|
|
DEBUG "Checking schedule for backup candidates";
|
|
my $last_dvol_date; # oldest present archive (by btrbk_date)
|
|
|
|
# Add all present backups as informative_only: these are needed for correct results of schedule().
|
|
# Note that we don't filter readonly here, in order to also get garbled targets.
|
|
foreach my $dvol (@{get_btrbk_snapshot_siblings($droot, name => $snapshot_basename)}) {
|
|
my $btrbk_date = $dvol->{node}{BTRBK_DATE};
|
|
push(@schedule, { informative_only => 1,
|
|
value => $dvol,
|
|
btrbk_date => $dvol->{node}{BTRBK_DATE},
|
|
});
|
|
$last_dvol_date = $btrbk_date if(!defined($last_dvol_date) || cmp_date($btrbk_date, $last_dvol_date) > 0);
|
|
}
|
|
my $schedule_results_mixed = [];
|
|
my ($preserve, undef) = schedule(
|
|
schedule => \@schedule,
|
|
preserve => config_preserve_hash($droot, $action_archive ? "archive" : "target"),
|
|
preserve_threshold_date => ($action_archive && config_key($droot, "archive_exclude_older") ? $last_dvol_date : undef),
|
|
|
|
results => $schedule_results_mixed,
|
|
result_hints => { topic => "backup", root_path => $snaproot->{PATH} },
|
|
result_delete_action_text => 'REMOVE_FROM_OUTPUT',
|
|
result_preserve_action_text => 'create',
|
|
);
|
|
my @resume = grep defined, @$preserve; # remove entries with no value from list (target subvolumes)
|
|
$resume_total = scalar @resume;
|
|
|
|
foreach my $snapshot (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @resume)
|
|
{
|
|
# Continue gracefully (skip instead of abort) on existing (possibly garbled) target
|
|
if(my $err_vol = vinfo_subvol($droot, $snapshot->{NAME})) {
|
|
my $err_msg = "Please delete stray subvolumes: \"btrbk clean $droot->{PRINT}\"";
|
|
FIX_MANUALLY($droot, $err_msg);
|
|
WARN "Target subvolume \"$err_vol->{PRINT}\" exists, but is not a receive target of \"$snapshot->{PRINT}\"";
|
|
WARN $err_msg;
|
|
WARN "Skipping backup of: $snapshot->{PRINT}";
|
|
$droot->{SUBVOL_RECEIVED} //= [];
|
|
push(@{$droot->{SUBVOL_RECEIVED}}, { ERROR => 1, received_subvolume => $err_vol });
|
|
next;
|
|
}
|
|
|
|
# Note: strict_related does not make much sense on archive:
|
|
# on targets, parent_uuid chain is broken after first prune.
|
|
my ($clone_src, $target_parent_node);
|
|
my $parent = get_best_parent($snapshot, $droot,
|
|
snaproot => $snaproot,
|
|
strict_related => ((config_key($droot, "incremental") // "") eq "strict") && !$action_archive,
|
|
clone_src => \$clone_src,
|
|
target_parent_node => \$target_parent_node,
|
|
);
|
|
if(macro_send_receive(source => $snapshot,
|
|
target => $droot,
|
|
parent => $parent, # this is <undef> if no suitable parent found
|
|
clone_src => $clone_src,
|
|
target_parent_node => $target_parent_node,
|
|
))
|
|
{
|
|
$resume_success++;
|
|
}
|
|
else {
|
|
# note: ABORTED flag is already set by macro_send_receive()
|
|
ERROR("Error while resuming backups, aborting");
|
|
last;
|
|
}
|
|
}
|
|
|
|
# replace results with target value
|
|
foreach (@$schedule_results_mixed) {
|
|
my $replace = $_->{value}{SUBVOL_SENT}{$droot->{URL}} // next;
|
|
$_->{value} = $replace;
|
|
}
|
|
push @$schedule_results, @$schedule_results_mixed;
|
|
}
|
|
|
|
if($resume_total) {
|
|
INFO "Created $resume_success/$resume_total missing backups";
|
|
} else {
|
|
INFO "No missing backups found";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
#
|
|
# remove backups following a preserve daily/weekly/monthly scheme
|
|
#
|
|
if($preserve_snapshots && $preserve_backups) {
|
|
INFO "Preserving all snapshots and backups";
|
|
}
|
|
else
|
|
{
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
my $target_aborted = 0;
|
|
my $snapshots = get_btrbk_snapshot_siblings($snaproot, name => $snapshot_basename, readonly => 1, sort => "desc");
|
|
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
if(IS_ABORTED($droot)) {
|
|
if(IS_ABORTED($droot, "skip_cmdline_")) {
|
|
$target_aborted ||= -1;
|
|
} else {
|
|
$target_aborted = 1;
|
|
}
|
|
next;
|
|
}
|
|
|
|
# preserve latest common snapshot/backup (for incremental targets)
|
|
if(config_key($droot, "incremental")) {
|
|
foreach my $snapshot (@$snapshots) {
|
|
my @receive_targets = get_receive_targets($droot, $snapshot, exact => 1);
|
|
if(scalar(@receive_targets)) {
|
|
DEBUG "Force preserve for latest common snapshot: $snapshot->{PRINT}";
|
|
$snapshot->{node}{FORCE_PRESERVE} = 'preserve forced: latest common snapshot';
|
|
foreach(@receive_targets) {
|
|
DEBUG "Force preserve for latest common target: $_->{PRINT}";
|
|
$_->{node}{FORCE_PRESERVE} = 'preserve forced: latest common target';
|
|
}
|
|
last;
|
|
}
|
|
}
|
|
}
|
|
|
|
if($preserve_backups) {
|
|
INFO "Preserving all backups";
|
|
}
|
|
else {
|
|
#
|
|
# delete backups
|
|
#
|
|
INFO "Cleaning backups of \"$snaproot->{PRINT}/${snapshot_basename}.*\" in: $droot->{PRINT}/";
|
|
unless(macro_delete($droot, $snapshot_basename, $droot,
|
|
{ preserve => config_preserve_hash($droot, $action_archive ? "archive" : "target"),
|
|
results => $schedule_results,
|
|
result_hints => { topic => "backup_delete", root_path => $droot->{PATH} },
|
|
},
|
|
commit => config_key($droot, "btrfs_commit_delete"),
|
|
type => "delete_target",
|
|
qgroup => { destroy => config_key($droot, "target_qgroup_destroy"),
|
|
type => "qgroup_destroy_target" },
|
|
))
|
|
{
|
|
$target_aborted = 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
#
|
|
# delete snapshots
|
|
#
|
|
if($preserve_snapshots || $action_archive) {
|
|
INFO "Preserving all snapshots" unless($action_archive);
|
|
}
|
|
elsif($target_aborted) {
|
|
if($target_aborted == -1) {
|
|
INFO "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target is skipped by command line argument";
|
|
} else {
|
|
WARN "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target aborted earlier";
|
|
}
|
|
}
|
|
else {
|
|
INFO "Cleaning snapshots" . ($wipe_snapshots ? " (wipe)" : "") . ": $snaproot->{PRINT}/${snapshot_basename}.*";
|
|
macro_delete($snaproot, $snapshot_basename, $svol,
|
|
{ preserve => config_preserve_hash($svol, "snapshot", wipe => $wipe_snapshots),
|
|
results => $schedule_results,
|
|
result_hints => { topic => "snapshot_delete", root_path => $snaproot->{PATH} },
|
|
},
|
|
commit => config_key($svol, "btrfs_commit_delete"),
|
|
type => "delete_snapshot",
|
|
qgroup => { destroy => config_key($svol, "snapshot_qgroup_destroy"),
|
|
type => "qgroup_destroy_snapshot" },
|
|
);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
my $exit_status = exit_status($config);
|
|
my $time_elapsed = time - $start_time;
|
|
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
|
|
action("finished",
|
|
status => $exit_status ? "partial" : "success",
|
|
duration => $time_elapsed,
|
|
message => $exit_status ? "At least one backup task aborted" : undef,
|
|
);
|
|
close_transaction_log();
|
|
|
|
|
|
unless($quiet)
|
|
{
|
|
#
|
|
# print scheduling results
|
|
#
|
|
if($print_schedule && $schedule_results) {
|
|
foreach my $topic (qw(backup backup_delete snapshot_delete)) {
|
|
my @data = map +{ %$_, vinfo_prefixed_keys("", $_->{value}) },
|
|
grep { !($_->{action} && $_->{action} eq "REMOVE_FROM_OUTPUT") }
|
|
grep { $_->{topic} eq $topic } @$schedule_results;
|
|
next unless(@data);
|
|
print_formatted("schedule", \@data, title => (uc($topic =~ s/_/ /gr) . " SCHEDULE"), paragraph => 1);
|
|
}
|
|
}
|
|
|
|
|
|
#
|
|
# print summary
|
|
#
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom")
|
|
{
|
|
my @out;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
|
|
my @subvol_out;
|
|
if($svol->{SNAPSHOT_UP_TO_DATE}) {
|
|
push @subvol_out, "=== $svol->{SNAPSHOT_UP_TO_DATE}->{PRINT}";
|
|
}
|
|
if($svol->{SNAPSHOT_CREATED}) {
|
|
push @subvol_out, "+++ $svol->{SNAPSHOT_CREATED}->{PRINT}";
|
|
}
|
|
foreach(@{$svol->{SUBVOL_DELETED} // []}) {
|
|
push @subvol_out, "--- $_->{PRINT}";
|
|
}
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
if($droot->{SUBDIR_CREATED}) {
|
|
push @subvol_out, "++. $droot->{PRINT}/";
|
|
}
|
|
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
|
|
my $create_mode = "***";
|
|
$create_mode = ">>>" if($_->{parent});
|
|
# substr($create_mode, 0, 1, '%') if($_->{resume});
|
|
$create_mode = "!!!" if($_->{ERROR});
|
|
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
|
|
}
|
|
|
|
foreach(@{$droot->{SUBVOL_DELETED} // []}) {
|
|
push @subvol_out, "--- $_->{PRINT}";
|
|
}
|
|
|
|
if(IS_ABORTED($droot, "skip_archive_exclude")) {
|
|
push @subvol_out, "<archive_exclude>";
|
|
}
|
|
if(IS_ABORTED($droot, "abort_")) {
|
|
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED_TEXT($droot);
|
|
}
|
|
}
|
|
|
|
if(IS_ABORTED($sroot, "abort_")) {
|
|
# repeat volume errors in subvolume context
|
|
push @subvol_out, "!!! Volume \"$sroot->{PRINT}\" aborted: " . ABORTED_TEXT($sroot);
|
|
}
|
|
if(IS_ABORTED($svol, "abort_")) {
|
|
# don't print "<no_action>" on skip_cmdline or skip_noauto
|
|
push @subvol_out, "!!! Aborted: " . ABORTED_TEXT($svol);
|
|
}
|
|
|
|
unless(scalar(@subvol_out)) {
|
|
@subvol_out = (
|
|
# print <archive_exclude> on skip_archive_exclude
|
|
IS_ABORTED($sroot, "skip_archive_exclude") ? "<archive_exclude>" :
|
|
IS_ABORTED($svol, "skip_archive_exclude") ? "<archive_exclude>" :
|
|
# print nothing if aborted by any other "skip_" tag
|
|
IS_ABORTED($sroot, "skip_") ? () :
|
|
IS_ABORTED($svol, "skip_") ? () :
|
|
"<no_action>"
|
|
# alternative: print generic aborted key
|
|
#"<" . (IS_ABORTED($sroot, "skip_") || IS_ABORTED($svol, "skip_") || "no_action") . ">",
|
|
);
|
|
}
|
|
|
|
if(@subvol_out) {
|
|
if($action_archive) {
|
|
my $snaproot = vinfo_snapshot_root($svol);
|
|
my $snapshot_basename = config_key($svol, "snapshot_name");
|
|
push @out, "$snaproot->{PRINT}/${snapshot_basename}.*";
|
|
} else {
|
|
push @out, "$svol->{PRINT}";
|
|
}
|
|
push @out, @subvol_out, "";
|
|
}
|
|
}
|
|
}
|
|
|
|
print_header(title => $action_archive ? "Archive Summary" : "Backup Summary",
|
|
config => $config,
|
|
time => $start_time,
|
|
options => [
|
|
(map $_->{reason}, @exclude_vf),
|
|
$skip_snapshots && "$skip_snapshots: No snapshots created",
|
|
$skip_backups && "$skip_backups: No backups created",
|
|
$preserve_snapshots && "$preserve_snapshots: Preserved all snapshots",
|
|
$preserve_backups && "$preserve_backups: Preserved all backups",
|
|
],
|
|
legend => [
|
|
$action_archive && "++. created directory",
|
|
$action_run && "=== up-to-date subvolume (source snapshot)",
|
|
$action_run && "+++ created subvolume (source snapshot)",
|
|
"--- deleted subvolume",
|
|
"*** received subvolume (non-incremental)",
|
|
">>> received subvolume (incremental)",
|
|
],
|
|
);
|
|
|
|
print join("\n", @out);
|
|
print_footer($config, $exit_status);
|
|
}
|
|
else
|
|
{
|
|
# print action log (without transaction start messages)
|
|
my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
|
|
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
|
|
}
|
|
}
|
|
|
|
exit $exit_status if($exit_status);
|
|
}
|
|
}
|
|
|
|
|
|
1;
|