mirror of https://github.com/digint/btrbk
5337 lines
198 KiB
Perl
Executable File
5337 lines
198 KiB
Perl
Executable File
#!/usr/bin/perl -T
|
|
|
|
=head1 NAME
|
|
|
|
btrbk - create snapshots and remote backups of btrfs subvolumes
|
|
|
|
=head1 SYNOPSIS
|
|
|
|
btrbk --help
|
|
|
|
=head1 DESCRIPTION
|
|
|
|
Backup tool for btrfs subvolumes, taking advantage of btrfs specific
|
|
send-receive mechanism, allowing incremental backups at file-system
|
|
level.
|
|
|
|
The full btrbk documentation is available at L<http://digint.ch/btrbk/>.
|
|
|
|
=head1 AUTHOR
|
|
|
|
Axel Burri <axel@tty0.ch>
|
|
|
|
=head1 COPYRIGHT AND LICENSE
|
|
|
|
Copyright (c) 2014-2016 Axel Burri. All rights reserved.
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
=cut
|
|
|
|
use strict;
|
|
use warnings FATAL => qw( all );
|
|
|
|
use Carp qw(confess);
|
|
use Getopt::Long qw(GetOptions);
|
|
use Time::Local qw( timelocal timegm timegm_nocheck );
|
|
|
|
our $VERSION = '0.24.0-dev';
|
|
our $AUTHOR = 'Axel Burri <axel@tty0.ch>';
|
|
our $PROJECT_HOME = '<http://digint.ch/btrbk/>';
|
|
|
|
our $BTRFS_PROGS_MIN = '3.18.2'; # required since btrbk-v0.23.0
|
|
|
|
my $VERSION_INFO = "btrbk command line client, version $VERSION";
|
|
|
|
|
|
my @config_src = ("/etc/btrbk.conf", "/etc/btrbk/btrbk.conf");
|
|
|
|
my %compression = (
|
|
# NOTE: also adapt "compress_list" in ssh_filter_btrbk.sh if you change this
|
|
gzip => { name => 'gzip', format => 'gz', compress_cmd => [ 'gzip', '-c' ], decompress_cmd => [ 'gzip', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
pigz => { name => 'pigz', format => 'gz', compress_cmd => [ 'pigz', '-c' ], decompress_cmd => [ 'pigz', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
|
|
bzip2 => { name => 'bzip2', format => 'bz2', compress_cmd => [ 'bzip2', '-c' ], decompress_cmd => [ 'bzip2', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
pbzip2 => { name => 'pbzip2', format => 'bz2', compress_cmd => [ 'pbzip2', '-c' ], decompress_cmd => [ 'pbzip2', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
|
|
xz => { name => 'xz', format => 'xz', compress_cmd => [ 'xz', '-c' ], decompress_cmd => [ 'xz', '-d', '-c' ], level_min => 0, level_max => 9, threads => '-T' },
|
|
lzo => { name => 'lzo', format => 'lzo', compress_cmd => [ 'lzop', '-c' ], decompress_cmd => [ 'lzop', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
lz4 => { name => 'lz4', format => 'lz4', compress_cmd => [ 'lz4', '-c' ], decompress_cmd => [ 'lz4', '-d', '-c' ], level_min => 1, level_max => 9 },
|
|
);
|
|
|
|
my $compress_format_alt = join '|', map { $_->{format} } values %compression; # note: this contains duplicate alternations
|
|
my $ip_addr_match = qr/(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])/;
|
|
my $host_name_match = qr/(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])/;
|
|
my $file_match = qr/[0-9a-zA-Z_@\+\-\.\/]+/; # note: ubuntu uses '@' in the subvolume layout: <https://help.ubuntu.com/community/btrfs>
|
|
my $glob_match = qr/[0-9a-zA-Z_@\+\-\.\/\*]+/; # file_match plus '*'
|
|
my $uuid_match = qr/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/;
|
|
my $timestamp_postfix_match = qr/\.(?<YYYY>[0-9]{4})(?<MM>[0-9]{2})(?<DD>[0-9]{2})(T(?<hh>[0-9]{2})(?<mm>[0-9]{2})((?<ss>[0-9]{2})(?<zz>(Z|[+-][0-9]{4})))?)?(_(?<NN>[0-9]+))?/; # matches "YYYYMMDD[Thhmm[ss+0000]][_NN]"
|
|
my $raw_postfix_match = qr/--(?<received_uuid>$uuid_match)(\@(?<parent_uuid>$uuid_match))?\.btrfs?(\.(?<compress>($compress_format_alt)))?(\.(?<encrypt>gpg))?(\.(?<incomplete>part))?/; # matches ".btrfs_<received_uuid>[@<parent_uuid>][.gz|bz2|xz][.gpg][.part]"
|
|
|
|
my $group_match = qr/[a-zA-Z0-9_:-]+/;
|
|
my $ssh_cipher_match = qr/[a-z0-9][a-z0-9@.-]+/;
|
|
|
|
my %day_of_week_map = ( sunday => 0, monday => 1, tuesday => 2, wednesday => 3, thursday => 4, friday => 5, saturday => 6 );
|
|
my @syslog_facilities = qw( user mail daemon auth lpr news cron authpriv local0 local1 local2 local3 local4 local5 local6 local7 );
|
|
|
|
my %config_options = (
|
|
# NOTE: the parser always maps "no" to undef
|
|
# NOTE: keys "volume", "subvolume" and "target" are hardcoded
|
|
# NOTE: files "." and "no" map to <undef>
|
|
timestamp_format => { default => "short", accept => [ "short", "long", "long-iso" ], context => [ "root", "volume", "subvolume" ] },
|
|
snapshot_dir => { default => undef, accept_file => { relative => 1 } },
|
|
snapshot_name => { default => undef, accept_file => { name_only => 1 }, context => [ "subvolume" ], deny_glob_context => 1 }, # NOTE: defaults to the subvolume name (hardcoded)
|
|
snapshot_create => { default => "always", accept => [ "no", "always", "ondemand", "onchange" ] },
|
|
incremental => { default => "yes", accept => [ "yes", "no", "strict" ] },
|
|
preserve_day_of_week => { default => "sunday", accept => [ (keys %day_of_week_map) ] },
|
|
snapshot_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1, context => [ "root", "volume", "subvolume" ], },
|
|
snapshot_preserve_min => { default => "all", accept => [ "all", "latest" ], accept_regexp => qr/^[1-9][0-9]*[hdwmy]$/, context => [ "root", "volume", "subvolume" ], },
|
|
target_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1 },
|
|
target_preserve_min => { default => "all", accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/ },
|
|
archive_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1 },
|
|
archive_preserve_min => { default => "all", accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/ },
|
|
btrfs_commit_delete => { default => undef, accept => [ "after", "each", "no" ] },
|
|
ssh_identity => { default => undef, accept_file => { absolute => 1 } },
|
|
ssh_user => { default => "root", accept_regexp => qr/^[a-z_][a-z0-9_-]*$/ },
|
|
ssh_port => { default => "default", accept => [ "default" ], accept_numeric => 1 },
|
|
ssh_compression => { default => undef, accept => [ "yes", "no" ] },
|
|
ssh_cipher_spec => { default => "default", accept_regexp => qr/^$ssh_cipher_match(,$ssh_cipher_match)*$/ },
|
|
rate_limit => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgt]?$/, require_bin => 'pv' },
|
|
transaction_log => { default => undef, accept_file => { absolute => 1 } },
|
|
transaction_syslog => { default => undef, accept => \@syslog_facilities },
|
|
lockfile => { default => undef, accept_file => { absolute => 1 }, context => [ "root" ] },
|
|
|
|
stream_compress => { default => undef, accept => [ "no", (keys %compression) ] },
|
|
stream_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
|
|
stream_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
|
|
|
|
raw_target_compress => { default => undef, accept => [ "no", (keys %compression) ] },
|
|
raw_target_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
|
|
raw_target_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
|
|
raw_target_encrypt => { default => undef, accept => [ "no", "gpg" ] },
|
|
raw_target_block_size => { default => "128K", accept_regexp => qr/^[0-9]+(kB|k|K|KiB|MB|M|MiB)?$/ },
|
|
gpg_keyring => { default => undef, accept_file => { absolute => 1 } },
|
|
gpg_recipient => { default => undef, accept_regexp => qr/^[0-9a-zA-Z_@\+\-\.]+$/ },
|
|
|
|
group => { default => undef, accept_regexp => qr/^$group_match(\s*,\s*$group_match)*$/, split => qr/\s*,\s*/ },
|
|
|
|
backend => { default => "btrfs-progs", accept => [ "btrfs-progs", "btrfs-progs-btrbk" ] },
|
|
backend_local => { default => undef, accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk" ] },
|
|
backend_remote => { default => undef, accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk" ] },
|
|
|
|
# deprecated options
|
|
btrfs_progs_compat => { default => undef, accept => [ "yes", "no" ],
|
|
deprecated => { DEFAULT => { ABORT => 1, warn => 'This feature has been dropped in btrbk-v0.23.0. Please update to newest btrfs-progs, AT LEAST >= $BTRFS_PROGS_MIN' } } },
|
|
snapshot_preserve_daily => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
|
|
snapshot_preserve_weekly => { default => 0, accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
|
|
snapshot_preserve_monthly => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
|
|
target_preserve_daily => { default => 'all', accept => [ "all" ], accept_numeric => 1,
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
|
|
target_preserve_weekly => { default => 0, accept => [ "all" ], accept_numeric => 1,
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
|
|
target_preserve_monthly => { default => 'all', accept => [ "all" ], accept_numeric => 1,
|
|
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
|
|
resume_missing => { default => "yes", accept => [ "yes", "no" ],
|
|
deprecated => { yes => { warn => 'ignoring (missing backups are always resumed since btrbk v0.23.0)' },
|
|
no => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve_min latest" and "target_preserve no" if you want to keep only the latest backup', } } },
|
|
snapshot_create_always => { default => undef, accept => [ "yes", "no" ],
|
|
deprecated => { yes => { warn => "Please use \"snapshot_create always\"",
|
|
replace_key => "snapshot_create",
|
|
replace_value => "always",
|
|
},
|
|
no => { warn => "Please use \"snapshot_create no\" or \"snapshot_create ondemand\"",
|
|
replace_key => "snapshot_create",
|
|
replace_value => "ondemand",
|
|
}
|
|
},
|
|
},
|
|
receive_log => { default => undef, accept => [ "sidecar", "no" ], accept_file => { absolute => 1 },
|
|
deprecated => { DEFAULT => { warn => "ignoring" } },
|
|
}
|
|
);
|
|
|
|
my @config_target_types = qw(send-receive raw);
|
|
|
|
my %table_formats = (
|
|
list_volume => { table => [ qw( volume_host volume_path ) ],
|
|
long => [ qw( volume_host volume_path ) ],
|
|
raw => [ qw( volume_url volume_host volume_path volume_rsh ) ],
|
|
},
|
|
list_source => { table => [ qw( source_host source_subvol snapshot_path snapshot_name ) ],
|
|
long => [ qw( source_host source_subvol snapshot_path snapshot_name ) ],
|
|
raw => [ qw( source_url source_host source_path snapshot_path snapshot_name source_rsh ) ],
|
|
},
|
|
list_target => { table => [ qw( target_host target_path ) ],
|
|
long => [ qw( target_host target_path ) ],
|
|
raw => [ qw( target_url target_host target_path target_rsh ) ],
|
|
},
|
|
list => { table => [ qw( source_host source_subvol snapshot_path snapshot_name target_host target_path ) ],
|
|
long => [ qw( source_host source_subvol snapshot_path snapshot_name snapshot_preserve target_host target_path target_preserve ) ],
|
|
raw => [ qw( source_url source_host source_subvol snapshot_path snapshot_name snapshot_preserve target_url target_host target_path target_preserve source_rsh target_rsh ) ],
|
|
},
|
|
|
|
resolved => { table => [ qw( source_host source_subvol snapshot_subvol status target_host target_subvol ) ],
|
|
long => [ qw( type source_host source_subvol snapshot_subvol status target_host target_subvol ) ],
|
|
raw => [ qw( type source_host source_path snapshot_path snapshot_name status target_host target_path source_rsh ) ],
|
|
},
|
|
|
|
schedule => { table => [ qw( action host subvol scheme reason ) ],
|
|
long => [ qw( action host root_path subvol_path scheme reason ) ],
|
|
raw => [ qw( topic action url host path dow min h d w m y) ],
|
|
},
|
|
|
|
usage => { table => [ qw( host path size used free ) ],
|
|
long => [ qw( type host path size device_allocated device_unallocated device_missing used free free_min data_ratio metadata_ratio used global_reserve global_reserve_used ) ],
|
|
raw => [ qw( type host path size device_allocated device_unallocated device_missing used free free_min data_ratio metadata_ratio used global_reserve global_reserve_used ) ],
|
|
RALIGN => { size=>1, device_allocated=>1, device_unallocated=>1, device_missing=>1, used=>1, free=>1, free_min=>1, data_ratio=>1, metadata_ratio=>1, used=>1, global_reserve=>1, global_reserve_used=>1 },
|
|
},
|
|
|
|
transaction => { table => [ qw( type status target_host target_subvol source_host source_subvol parent_subvol ) ],
|
|
long => [ qw( localtime type status duration target_host target_subvol source_host source_subvol parent_subvol message ) ],
|
|
raw => [ qw( time localtime type status duration target_url source_url parent_url message ) ],
|
|
tlog => [ qw( localtime type status duration target_url source_url parent_url message ) ],
|
|
syslog => [ qw( type status duration target_url source_url parent_url message ) ],
|
|
},
|
|
|
|
origin_tree => { table => [ qw( tree uuid parent_uuid received_uuid ) ],
|
|
long => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
|
|
raw => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
|
|
},
|
|
);
|
|
|
|
my %backend_cmd_map = (
|
|
"btrfs-progs-btrbk" => { "btrfs subvolume list" => [ "btrfs-subvolume-list" ],
|
|
"btrfs subvolume show" => [ "btrfs-subvolume-show" ],
|
|
"btrfs subvolume snapshot" => [ "btrfs-subvolume-snapshot" ],
|
|
"btrfs subvolume delete" => [ "btrfs-subvolume-delete" ],
|
|
"btrfs send" => [ "btrfs-send" ],
|
|
"btrfs receive" => [ "btrfs-receive" ],
|
|
},
|
|
);
|
|
|
|
my %url_cache; # map URL to btr_tree node
|
|
my %fstab_cache; # map HOST to btrfs mount points
|
|
my %uuid_cache; # map UUID to btr_tree node
|
|
my %realpath_cache; # map URL to realpath (symlink target)
|
|
|
|
my $tree_inject_id = 0; # fake subvolume id for injected nodes (negative)
|
|
my $fake_uuid_prefix = 'XXXXXXXX-XXXX-XXXX-XXXX-'; # plus 0-padded inject_id: XXXXXXXX-XXXX-XXXX-XXXX-000000000000
|
|
|
|
my $dryrun;
|
|
my $loglevel = 1;
|
|
my $do_dumper;
|
|
my $show_progress = 0;
|
|
my $err = "";
|
|
my $abrt = ""; # last ABORTED() message
|
|
my $output_format;
|
|
my $lockfile;
|
|
my $tlog_fh;
|
|
my $syslog_enabled = 0;
|
|
my $current_transaction;
|
|
my @transaction_log;
|
|
my %config_override;
|
|
my @tm_now; # current localtime ( sec, min, hour, mday, mon, year, wday, yday, isdst )
|
|
my %warn_once;
|
|
|
|
|
|
$SIG{__DIE__} = sub {
|
|
print STDERR "\nERROR: process died unexpectedly (btrbk v$VERSION)";
|
|
print STDERR "\nPlease contact the author: $AUTHOR\n\n";
|
|
print STDERR "Stack Trace:\n----------------------------------------\n";
|
|
Carp::confess @_;
|
|
};
|
|
|
|
$SIG{INT} = sub {
|
|
print STDERR "\nERROR: Cought SIGINT, dumping transaction log:\n";
|
|
action("signal", status => "SIGINT");
|
|
print_formatted("transaction", \@transaction_log, output_format => "tlog", outfile => *STDERR);
|
|
exit 1;
|
|
};
|
|
|
|
sub VERSION_MESSAGE
|
|
{
|
|
print STDERR $VERSION_INFO . "\n\n";
|
|
}
|
|
|
|
sub HELP_MESSAGE
|
|
{
|
|
print STDERR "usage: btrbk [options] <command> [filter...]\n";
|
|
print STDERR "\n";
|
|
print STDERR "options:\n";
|
|
# "--------------------------------------------------------------------------------"; # 80
|
|
print STDERR " -h, --help display this help message\n";
|
|
print STDERR " --version display version information\n";
|
|
print STDERR " -c, --config=FILE specify configuration file\n";
|
|
print STDERR " -n, --dry-run perform a trial run with no changes made\n";
|
|
print STDERR " -p, --preserve preserve all backups (do not delete any old targets)\n";
|
|
print STDERR " -r, --resume-only resume only (do not create new snapshots, only resume\n";
|
|
print STDERR " missing backups)\n";
|
|
print STDERR " -v, --verbose be verbose (set loglevel=info)\n";
|
|
print STDERR " -q, --quiet be quiet (do not print summary for the \"run\" command)\n";
|
|
print STDERR " -l, --loglevel=LEVEL set logging level (warn, info, debug, trace)\n";
|
|
print STDERR " -t, --table change output to table format\n";
|
|
print STDERR " --format=FORMAT change output format, FORMAT=table|long|raw\n";
|
|
print STDERR " --print-schedule print scheduler details (for the \"run\" command)\n";
|
|
print STDERR " --progress show progress bar on send-receive operation\n";
|
|
print STDERR "\n";
|
|
print STDERR "commands:\n";
|
|
print STDERR " run perform backup operations as defined in the config\n";
|
|
print STDERR " dryrun don't run btrfs commands; show what would be executed\n";
|
|
print STDERR " stats print snapshot/backup statistics\n";
|
|
print STDERR " list <subcommand> available subcommands are:\n";
|
|
print STDERR " backups all backups and corresponding snapshots\n";
|
|
print STDERR " snapshots all snapshots and corresponding backups\n";
|
|
print STDERR " latest most recent snapshots and backups\n";
|
|
print STDERR " config configured source/snapshot/target relations\n";
|
|
print STDERR " source configured source/snapshot relations\n";
|
|
print STDERR " volume configured volume sections\n";
|
|
print STDERR " target configured targets\n";
|
|
print STDERR " clean delete incomplete (garbled) backups\n";
|
|
print STDERR " archive <src> <dst> recursively copy all subvolumes (experimental)\n";
|
|
print STDERR " usage print filesystem usage\n";
|
|
print STDERR " origin <subvol> print origin information for subvolume\n";
|
|
print STDERR " diff <from> <to> shows new files between related subvolumes\n";
|
|
print STDERR "\n";
|
|
print STDERR "For additional information, see $PROJECT_HOME\n";
|
|
}
|
|
|
|
|
|
sub TRACE { my $t = shift; print STDERR "... $t\n" if($loglevel >= 4); }
|
|
sub DEBUG { my $t = shift; print STDERR "$t\n" if($loglevel >= 3); }
|
|
sub INFO { my $t = shift; print STDERR "$t\n" if($loglevel >= 2); }
|
|
sub WARN { my $t = shift; print STDERR "WARNING: $t\n" if($loglevel >= 1); }
|
|
sub ERROR { my $t = shift; print STDERR "ERROR: $t\n"; }
|
|
|
|
sub WARN_ONCE {
|
|
my $t = shift;
|
|
if($warn_once{$t}) { TRACE("WARNING(again): $t"); }
|
|
else { $warn_once{$t} = 1; WARN($t); }
|
|
}
|
|
|
|
sub VINFO {
|
|
return undef unless($do_dumper);
|
|
my $vinfo = shift; my $t = shift || "vinfo"; my $maxdepth = shift // 2;
|
|
print STDERR Data::Dumper->new([$vinfo], [$t])->Maxdepth($maxdepth)->Dump();
|
|
}
|
|
sub SUBVOL_LIST {
|
|
return undef unless($do_dumper);
|
|
my $vol = shift; my $t = shift // "SUBVOL_LIST"; my $svl = vinfo_subvol_list($vol);
|
|
print STDERR "$t:\n " . join("\n ", map { "$vol->{PRINT}/./$_->{SUBVOL_PATH}\t$_->{node}{id}" } @$svl) . "\n";
|
|
}
|
|
sub URL_CACHE {
|
|
print STDERR "URL_CACHE:\n" . join("\n", (sort keys %url_cache)) . "\n";
|
|
}
|
|
|
|
|
|
sub ABORTED($;$)
|
|
{
|
|
my $config = shift;
|
|
$abrt = shift;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
return $config->{ABORTED} unless(defined($abrt));
|
|
|
|
unless($abrt eq "USER_SKIP") {
|
|
$abrt =~ s/\n/\\\\/g;
|
|
$abrt =~ s/\r//g;
|
|
action("abort_" . ($config->{CONTEXT} || "undef"),
|
|
status => "ABORT",
|
|
vinfo_prefixed_keys("target", vinfo($config->{url}, $config)),
|
|
message => $abrt,
|
|
);
|
|
}
|
|
$abrt = 1 unless($abrt); # make sure $abrt is always a true value
|
|
$config->{ABORTED} = $abrt;
|
|
}
|
|
|
|
sub eval_quiet(&)
|
|
{
|
|
local $SIG{__DIE__};
|
|
return eval { $_[0]->() }
|
|
}
|
|
|
|
sub require_data_dumper
|
|
{
|
|
if(eval_quiet { require Data::Dumper; }) {
|
|
Data::Dumper->import("Dumper");
|
|
$Data::Dumper::Sortkeys = 1;
|
|
$Data::Dumper::Quotekeys = 0;
|
|
$do_dumper = 1;
|
|
# silence perl warning: Name "Data::Dumper::Sortkeys" used only once: possible typo at...
|
|
TRACE "Successfully loaded Dumper module: sortkeys=$Data::Dumper::Sortkeys, quotekeys=$Data::Dumper::Quotekeys";
|
|
} else {
|
|
WARN "Perl module \"Data::Dumper\" not found: data trace dumps disabled!" if($loglevel >=4);
|
|
}
|
|
}
|
|
|
|
sub init_transaction_log($$)
|
|
{
|
|
my $file = shift;
|
|
my $config_syslog_facility = shift;
|
|
if(defined($file) && (not $dryrun)) {
|
|
if(open($tlog_fh, ">> $file")) {
|
|
# print headers (disabled)
|
|
# print_formatted("transaction", [ ], output_format => "tlog", outfile => $tlog_fh);
|
|
INFO "Using transaction log: $file";
|
|
} else {
|
|
$tlog_fh = undef;
|
|
ERROR "Failed to open transaction log '$file': $!";
|
|
}
|
|
}
|
|
if(defined($config_syslog_facility) && (not $dryrun)) {
|
|
DEBUG "Opening syslog";
|
|
if(eval_quiet { require Sys::Syslog; }) {
|
|
$syslog_enabled = 1;
|
|
Sys::Syslog::openlog("btrbk", "", $config_syslog_facility);
|
|
DEBUG "Syslog enabled";
|
|
}
|
|
else {
|
|
WARN "Syslog disabled: $@";
|
|
}
|
|
}
|
|
action("startup", status => "v$VERSION", message => "$VERSION_INFO");
|
|
}
|
|
|
|
sub close_transaction_log()
|
|
{
|
|
if($tlog_fh) {
|
|
DEBUG "Closing transaction log";
|
|
close $tlog_fh || ERROR "Failed to close transaction log: $!";
|
|
}
|
|
if($syslog_enabled) {
|
|
DEBUG "Closing syslog";
|
|
eval_quiet { Sys::Syslog::closelog(); };
|
|
}
|
|
}
|
|
|
|
sub action($@)
|
|
{
|
|
my $type = shift // die;
|
|
my $h = { @_ };
|
|
my $time = $h->{time} // time;
|
|
$h->{type} = $type;
|
|
$h->{time} = $time;
|
|
$h->{localtime} = timestamp($time, 'debug-iso');
|
|
print_formatted("transaction", [ $h ], output_format => "tlog", no_header => 1, outfile => $tlog_fh) if($tlog_fh);
|
|
print_formatted("transaction", [ $h ], output_format => "syslog", no_header => 1) if($syslog_enabled); # dirty hack, this calls syslog()
|
|
push @transaction_log, $h;
|
|
return $h;
|
|
}
|
|
|
|
sub start_transaction($@)
|
|
{
|
|
my $type = shift // die;
|
|
my $time = time;
|
|
die("start_transaction() while transaction is running") if($current_transaction);
|
|
my @actions = (ref($_[0]) eq "HASH") ? @_ : { @_ }; # single action is not hashref
|
|
$current_transaction = [];
|
|
foreach (@actions) {
|
|
push @$current_transaction, action($type, %$_, status => "starting", time => $time);
|
|
}
|
|
}
|
|
|
|
sub end_transaction($$)
|
|
{
|
|
my $type = shift // die;
|
|
my $status = shift // die;
|
|
my $time = time;
|
|
die("end_transaction() while no transaction is running") unless($current_transaction);
|
|
foreach (@$current_transaction) {
|
|
die("end_transaction() has different type") unless($_->{type} eq $type);
|
|
action($type, %$_, status => $status, duration => ($dryrun ? undef : ($time - $_->{time})));
|
|
}
|
|
$current_transaction = undef;
|
|
}
|
|
|
|
sub syslog($)
|
|
{
|
|
return undef unless($syslog_enabled);
|
|
my $line = shift;
|
|
eval_quiet { Sys::Syslog::syslog("info", $line); };
|
|
}
|
|
|
|
sub check_exe($)
|
|
{
|
|
my $cmd = shift // die;
|
|
foreach my $path (split(":", $ENV{PATH})) {
|
|
return 1 if( -x "$path/$cmd" );
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
sub compress_cmd($;$)
|
|
{
|
|
my $def = shift;
|
|
my $decompress = shift;
|
|
return undef unless(defined($def));
|
|
my $cc = $compression{$def->{key}};
|
|
my @cmd = $decompress ? @{$cc->{decompress_cmd}} : @{$cc->{compress_cmd}};
|
|
|
|
if((not $decompress) && defined($def->{level}) && ($def->{level} ne "default")) {
|
|
my $level = $def->{level};
|
|
if($level < $cc->{level_min}) {
|
|
WARN_ONCE "Compression level capped to minimum for '$cc->{name}': $cc->{level_min}";
|
|
$level = $cc->{level_min};
|
|
}
|
|
if($level > $cc->{level_max}) {
|
|
WARN_ONCE "Compression level capped to maximum for '$cc->{name}': $cc->{level_max}";
|
|
$level = $cc->{level_max};
|
|
}
|
|
push @cmd, '-' . $level;
|
|
}
|
|
if(defined($def->{threads}) && ($def->{threads} ne "default")) {
|
|
my $thread_opt = $cc->{threads};
|
|
if($thread_opt) {
|
|
push @cmd, $thread_opt . $def->{threads};
|
|
}
|
|
else {
|
|
WARN_ONCE "Threading is not supported for '$cc->{name}', ignoring";
|
|
}
|
|
}
|
|
return join(' ', @cmd);
|
|
}
|
|
|
|
sub decompress_cmd($)
|
|
{
|
|
return compress_cmd($_[0], 1);
|
|
}
|
|
|
|
|
|
sub _assemble_cmd($;$)
|
|
{
|
|
my $cmd_pipe = shift;
|
|
my $catch_stderr = shift;
|
|
my $cmd = "";
|
|
|
|
# simple single-command
|
|
if(scalar(@$cmd_pipe) == 1) {
|
|
$cmd = $cmd_pipe->[0]->{cmd_text};
|
|
$cmd .= ' 2>&1' if($catch_stderr && $cmd_pipe->[0]->{catch_stderr});
|
|
return $cmd;
|
|
}
|
|
|
|
# cmd result is something like this:
|
|
# { btrfs send <src> 2>&3 | pv | btrfs receive <dst> 2>&3 ; } 3>&1
|
|
my $pipe = "";
|
|
$cmd = "{ " if($catch_stderr);
|
|
foreach (@$cmd_pipe) {
|
|
if($_->{cmd_text} =~ /^>/) {
|
|
die unless($pipe);
|
|
$cmd .= ' ' . $_->{cmd_text};
|
|
$pipe = undef; # this dies if it is not last command
|
|
} else {
|
|
$cmd .= $pipe . $_->{cmd_text};
|
|
$cmd .= ' 2>&3' if($catch_stderr && $_->{catch_stderr});
|
|
$pipe = ' | ';
|
|
}
|
|
}
|
|
$cmd .= ' ; } 3>&1' if($catch_stderr);
|
|
|
|
return $cmd;
|
|
}
|
|
|
|
sub _safe_cmd($$)
|
|
{
|
|
# NOTE: this function alters $aref: hashes of form: "{ unsafe => 'string' }" get translated to "string"
|
|
my $aref = shift;
|
|
my $offending = shift;
|
|
foreach(@$aref) {
|
|
if(ref($_) eq 'HASH') {
|
|
$_ = $_->{unsafe}; # replace in-place
|
|
# NOTE: all files must be absolute
|
|
unless(defined(check_file($_, { absolute => 1 }))) {
|
|
push @$offending, "\"$_\"";
|
|
}
|
|
}
|
|
}
|
|
return join(' ', @$aref);
|
|
}
|
|
|
|
sub run_cmd(@)
|
|
{
|
|
# shell-based implementation.
|
|
# this needs some redirection magic for filter_stderr to work.
|
|
# NOTE: multiple filters are not supported!
|
|
|
|
my @cmd_pipe_in = (ref($_[0]) eq "HASH") ? @_ : { @_ };
|
|
die unless(scalar(@cmd_pipe_in));
|
|
$err = "";
|
|
|
|
my $destructive = 0;
|
|
my $catch_stderr = 0;
|
|
my $filter_stderr = undef;
|
|
my @cmd_pipe;
|
|
my @unsafe_cmd;
|
|
my $compressed = undef;
|
|
|
|
foreach my $href (@cmd_pipe_in)
|
|
{
|
|
die if(defined($href->{cmd_text}));
|
|
|
|
$catch_stderr = 1 if($href->{catch_stderr});
|
|
$filter_stderr = $href->{filter_stderr} if($href->{filter_stderr}); # NOTE: last filter wins!
|
|
$destructive = 1 unless($href->{non_destructive});
|
|
|
|
if($href->{check_unsafe}) {
|
|
_safe_cmd($href->{check_unsafe}, \@unsafe_cmd);
|
|
}
|
|
|
|
if($href->{compress}) {
|
|
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$href->{compress}->{key}}->{format})) {
|
|
push @cmd_pipe, { cmd_text => decompress_cmd($compressed) };
|
|
$compressed = undef;
|
|
}
|
|
unless($compressed) {
|
|
push @cmd_pipe, { cmd_text => compress_cmd($href->{compress}) };
|
|
$compressed = $href->{compress};
|
|
}
|
|
}
|
|
else {
|
|
if($href->{redirect_to_file}) {
|
|
$href->{cmd_text} = _safe_cmd([ '>', $href->{redirect_to_file} ], \@unsafe_cmd);
|
|
}
|
|
elsif($href->{cmd}) {
|
|
$href->{cmd_text} = _safe_cmd($href->{cmd}, \@unsafe_cmd);
|
|
}
|
|
return undef unless(defined($href->{cmd_text}));
|
|
|
|
if($href->{rsh}) {
|
|
my @rsh_cmd_pipe = ( $href );
|
|
|
|
if($href->{rsh_compress_in}) {
|
|
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$href->{rsh_compress_in}->{key}}->{format}))
|
|
{
|
|
push @cmd_pipe, { cmd_text => decompress_cmd($compressed) };
|
|
$compressed = undef;
|
|
}
|
|
unless($compressed) {
|
|
push @cmd_pipe, { cmd_text => compress_cmd($href->{rsh_compress_in}) };
|
|
$compressed = $href->{rsh_compress_in};
|
|
}
|
|
}
|
|
|
|
if($compressed && (not ($href->{compressed_ok}))) {
|
|
unshift @rsh_cmd_pipe, { cmd_text => decompress_cmd($compressed) };
|
|
$compressed = undef;
|
|
}
|
|
|
|
if($href->{rsh_compress_out}) {
|
|
die if($href->{redirect_to_file});
|
|
push @rsh_cmd_pipe, { cmd_text => compress_cmd($href->{rsh_compress_out}) };
|
|
$compressed = $href->{rsh_compress_out};
|
|
}
|
|
|
|
if((scalar(@rsh_cmd_pipe) == 1) && ($rsh_cmd_pipe[0]->{redirect_to_file})) {
|
|
# NOTE: direct redirection in ssh command does not work: "ssh '> outfile'"
|
|
# we need to assemble: "ssh 'cat > outfile'"
|
|
unshift @rsh_cmd_pipe, { cmd_text => 'cat' };
|
|
}
|
|
|
|
my $rsh_text = _safe_cmd($href->{rsh}, \@unsafe_cmd);
|
|
return undef unless(defined($rsh_text));
|
|
$href->{cmd_text} = $rsh_text . " '" . _assemble_cmd(\@rsh_cmd_pipe) . "'";
|
|
}
|
|
else {
|
|
if($compressed && (not ($href->{compressed_ok}))) {
|
|
push @cmd_pipe, { cmd_text => decompress_cmd($compressed) };
|
|
$compressed = undef;
|
|
}
|
|
}
|
|
push @cmd_pipe, $href;
|
|
}
|
|
}
|
|
|
|
my $cmd = _assemble_cmd(\@cmd_pipe, $catch_stderr);
|
|
my $cmd_print = _assemble_cmd(\@cmd_pipe); # hide redirection magic from debug output
|
|
|
|
if(scalar(@unsafe_cmd)) {
|
|
ERROR "Unsafe command `$cmd_print` (offending string: " . join(', ', @unsafe_cmd) . ')';
|
|
return undef;
|
|
}
|
|
|
|
if($dryrun && $destructive) {
|
|
DEBUG "### (dryrun) $cmd_print";
|
|
return "";
|
|
}
|
|
DEBUG "### $cmd_print";
|
|
|
|
# execute command and parse output
|
|
TRACE "Executing command: $cmd";
|
|
my $ret = "";
|
|
$ret = `$cmd`;
|
|
chomp($ret);
|
|
TRACE "Command output:\n$ret";
|
|
if($?) {
|
|
my $exitcode= $? >> 8;
|
|
my $signal = $? & 127;
|
|
DEBUG "Command execution failed (exitcode=$exitcode" . ($signal ? ", signal=$signal" : "") . "): \"$cmd\"";
|
|
|
|
if($catch_stderr) {
|
|
$_ = $ret;
|
|
&{$filter_stderr} ($cmd) if($filter_stderr);
|
|
ERROR "[$cmd_print] $_" if($_);
|
|
}
|
|
return undef;
|
|
}
|
|
else {
|
|
DEBUG "Command execution successful";
|
|
}
|
|
return $ret;
|
|
}
|
|
|
|
|
|
sub add_pv_command($@)
|
|
{
|
|
my $cmd_pipe = shift || die;
|
|
my %opts = @_;
|
|
my $rate_limit = $opts{rate_limit};
|
|
|
|
if($opts{show_progress}) {
|
|
if($rate_limit) {
|
|
push @$cmd_pipe, { cmd => [ 'pv', '-trab', '-L', $rate_limit ], compressed_ok => 1 };
|
|
} else {
|
|
push @$cmd_pipe, { cmd => [ 'pv', '-trab' ], compressed_ok => 1 };
|
|
}
|
|
}
|
|
elsif($rate_limit) {
|
|
push @$cmd_pipe, { cmd => [ 'pv', '-q', '-L', $rate_limit ], compressed_ok => 1 };
|
|
}
|
|
}
|
|
|
|
|
|
sub btrfs_filesystem_show($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem show", { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1
|
|
);
|
|
}
|
|
|
|
|
|
sub btrfs_filesystem_df($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem df", { unsafe => $path }),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1
|
|
);
|
|
}
|
|
|
|
|
|
sub btrfs_filesystem_usage($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $ret = run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem usage", { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my %detail;
|
|
foreach (split("\n", $ret)) {
|
|
if(/^\s+Device size:\s+(\S+)/) {
|
|
$detail{size} = $1;
|
|
}
|
|
elsif(/^\s+Device allocated:\s+(\S+)/) {
|
|
$detail{device_allocated} = $1;
|
|
}
|
|
elsif(/^\s+Device unallocated:\s+(\S+)/) {
|
|
$detail{device_unallocated} = $1;
|
|
}
|
|
elsif(/^\s+Device missing:\s+(\S+)/) {
|
|
$detail{device_missing} = $1;
|
|
}
|
|
elsif(/^\s+Used:\s+(\S+)/) {
|
|
$detail{used} = $1;
|
|
}
|
|
elsif(/^\s+Free \(estimated\):\s+(\S+)\s+\(min: (\S+)\)/) {
|
|
$detail{free} = $1;
|
|
$detail{free_min} = $2;
|
|
}
|
|
elsif(/^\s+Data ratio:\s+(\S+)/) {
|
|
$detail{data_ratio} = $1;
|
|
}
|
|
elsif(/^\s+Metadata ratio:\s+(\S+)/) {
|
|
$detail{metadata_ratio} = $1;
|
|
}
|
|
elsif(/^\s+Used:\s+(\S+)/) {
|
|
$detail{used} = $1;
|
|
}
|
|
elsif(/^\s+Global reserve:\s+(\S+)\s+\(used: (\S+)\)/) {
|
|
$detail{global_reserve} = $1;
|
|
$detail{global_reserve_used} = $2;
|
|
}
|
|
else {
|
|
TRACE "Failed to parse filesystem usage line \"$_\" for: $vol->{PRINT}";
|
|
}
|
|
}
|
|
DEBUG "Parsed " . scalar(keys %detail) . " filesystem usage detail items: $vol->{PRINT}";
|
|
TRACE(Data::Dumper->Dump([\%detail], ["btrfs_filesystem_usage($vol->{URL})"])) if($do_dumper);
|
|
return \%detail;
|
|
}
|
|
|
|
|
|
# returns hashref with keys: (name uuid parent_uuid id gen cgen top_level)
|
|
# for btrfs-progs >= 4.1, also returns key: "received_uuid"
|
|
sub btrfs_subvolume_show($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume show", { unsafe => $path }),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
catch_stderr => 1, # hack for shell-based run_cmd()
|
|
filter_stderr => sub {
|
|
if(/ssh command rejected/) {
|
|
# catch errors from ssh_filter_btrbk.sh
|
|
$err = "ssh command rejected (please fix ssh_filter_btrbk.sh)";
|
|
}
|
|
elsif(/^ERROR: (.*)/) {
|
|
# catch errors from btrfs command
|
|
$err = $1;
|
|
}
|
|
else {
|
|
DEBUG "Unparsed error: $_";
|
|
$err = $_;
|
|
}
|
|
# consume stderr line, as $err will be displayed as a user-friendly WARNING
|
|
$_ = undef;
|
|
}
|
|
);
|
|
|
|
return undef unless(defined($ret));
|
|
|
|
my $real_path;
|
|
if($ret =~ /^($file_match)/) {
|
|
$real_path = $1;
|
|
$real_path = check_file($real_path, { absolute => 1 }, sanitize => 1);
|
|
return undef unless(defined($real_path));
|
|
DEBUG "Real path for subvolume \"$vol->{PRINT}\" is: $real_path" if($real_path ne $path);
|
|
$realpath_cache{$vol->{URL}} = $real_path if($real_path ne $path);
|
|
}
|
|
else {
|
|
$real_path = $path;
|
|
WARN "No real path provided by \"btrfs subvolume show\" for subvolume \"$vol->{PRINT}\", using: $path";
|
|
}
|
|
my %detail = ( REAL_PATH => $real_path );
|
|
|
|
if($ret =~ /^\Q$real_path\E is (btrfs root|toplevel subvolume)/) {
|
|
# btrfs-progs < 4.4 prints: "<subvol> is btrfs root"
|
|
# btrfs-progs >= 4.4 prints: "<subvol> is toplevel subvolume"
|
|
DEBUG "found btrfs root: $vol->{PRINT}";
|
|
$detail{id} = 5;
|
|
$detail{is_root} = 1;
|
|
}
|
|
elsif($ret =~ /^\Q$real_path\E/) {
|
|
TRACE "btr_detail: found btrfs subvolume: $vol->{PRINT}";
|
|
# NOTE: received_uuid is not required here, as btrfs-progs < 4.1 does not give us that information.
|
|
# no worries, we get this from btrfs_subvolume_list() for all subvols.
|
|
my @required_keys = qw(name uuid parent_uuid id gen cgen top_level readonly);
|
|
my %trans = (
|
|
"Name" => "name",
|
|
"uuid" => "uuid",
|
|
"UUID" => "uuid", # btrfs-progs >= 4.1
|
|
"Parent uuid" => "parent_uuid",
|
|
"Parent UUID" => "parent_uuid", # btrfs-progs >= 4.1
|
|
"Received UUID" => "received_uuid", # btrfs-progs >= 4.1
|
|
"Creation time" => "creation_time",
|
|
"Object ID" => "id",
|
|
"Subvolume ID" => "id", # btrfs-progs >= 4.1
|
|
"Generation (Gen)" => "gen",
|
|
"Generation" => "gen", # btrfs-progs >= 4.1
|
|
"Gen at creation" => "cgen",
|
|
"Parent" => "parent_id",
|
|
"Parent ID" => "parent_id", # btrfs-progs >= 4.1
|
|
"Top Level" => "top_level",
|
|
"Top level ID" => "top_level", # btrfs-progs >= 4.1
|
|
"Flags" => "flags",
|
|
);
|
|
foreach (split("\n", $ret)) {
|
|
next unless /^\s+(.+):\s+(.*)$/;
|
|
my ($key, $value) = ($1, $2);
|
|
if($trans{$key}) {
|
|
$detail{$trans{$key}} = $value;
|
|
} else {
|
|
WARN "Failed to parse subvolume detail \"$key: $value\" for: $vol->{PRINT}";
|
|
}
|
|
}
|
|
DEBUG "Parsed " . scalar(keys %detail) . " subvolume detail items: $vol->{PRINT}";
|
|
|
|
# NOTE: as of btrfs-progs v4.6.1, flags are either "-" or "readonly"
|
|
$detail{readonly} = ($detail{flags} =~ /readonly/) ? 1 : 0;
|
|
|
|
VINFO(\%detail, "detail") if($loglevel >=4);
|
|
foreach(@required_keys) {
|
|
unless(defined($detail{$_})) {
|
|
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
}
|
|
}
|
|
else {
|
|
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
return \%detail;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_list_readonly_flag($)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", '-a', '-r', { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my %ro;
|
|
foreach (split(/\n/, $ret))
|
|
{
|
|
die("Failed to parse line: \"$_\"") unless(/^ID ([0-9]+) gen [0-9]+ top level [0-9]+ path /);
|
|
$ro{$1} = 1;
|
|
}
|
|
DEBUG "Parsed " . scalar(keys %ro) . " readonly subvolumes for filesystem at: $vol->{PRINT}";
|
|
return \%ro;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_list($;@)
|
|
{
|
|
my $vol = shift || die;
|
|
my %opts = @_;
|
|
my $path = $vol->{PATH} // die; # deliberately NOT using REAL_PATH here!
|
|
my @filter_options = ('-a');
|
|
push(@filter_options, '-o') if($opts{subvol_only});
|
|
|
|
# NOTE: btrfs-progs <= 3.17 do NOT support the '-R' flag.
|
|
# NOTE: Support for btrfs-progs <= 3.17 has been dropped in
|
|
# btrbk-0.23, the received_uuid flag very essential!
|
|
my @display_options = ('-c', '-u', '-q', '-R');
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", @filter_options, @display_options, { unsafe => $path } ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my @nodes;
|
|
foreach (split(/\n/, $ret))
|
|
{
|
|
# ID <ID> top level <ID> path <path> where path is the relative path
|
|
# of the subvolume to the top level subvolume. The subvolume?s ID may
|
|
# be used by the subvolume set-default command, or at mount time via
|
|
# the subvolid= option. If -p is given, then parent <ID> is added to
|
|
# the output between ID and top level. The parent?s ID may be used at
|
|
# mount time via the subvolrootid= option.
|
|
|
|
# NOTE: btrfs-progs prior to v3.17 do not support the -R flag (unsupported since
|
|
my %node;
|
|
unless(/^ID ([0-9]+) gen ([0-9]+) cgen ([0-9]+) top level ([0-9]+) parent_uuid ([0-9a-z-]+) received_uuid ([0-9a-z-]+) uuid ([0-9a-z-]+) path (.+)$/) {
|
|
ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
|
|
DEBUG "Offending line: $_";
|
|
return undef;
|
|
}
|
|
%node = (
|
|
id => $1,
|
|
gen => $2,
|
|
cgen => $3,
|
|
top_level => $4,
|
|
parent_uuid => $5, # note: parent_uuid="-" if no parent
|
|
received_uuid => $6,
|
|
uuid => $7,
|
|
path => $8 # btrfs path, NOT filesystem path
|
|
);
|
|
|
|
# NOTE: "btrfs subvolume list <path>" prints <FS_TREE> prefix only if
|
|
# the subvolume is reachable within <path>. (as of btrfs-progs-3.18.2)
|
|
#
|
|
# NOTE: Be prepared for this to change in btrfs-progs!
|
|
$node{path} =~ s/^<FS_TREE>\///; # remove "<FS_TREE>/" portion from "path".
|
|
|
|
push @nodes, \%node;
|
|
}
|
|
DEBUG "Parsed " . scalar(@nodes) . " total subvolumes for filesystem at: $vol->{PRINT}";
|
|
|
|
# fetch readonly flag
|
|
# NOTE: the only way to get "readonly" flag is via a second call to "btrfs subvol list" with the "-r" option (as of btrfs-progs v4.3.1)
|
|
my $ro = btrfs_subvolume_list_readonly_flag($vol);
|
|
return undef unless(defined($ro));
|
|
foreach (@nodes) {
|
|
$_->{readonly} = $ro->{$_->{id}} // 0;
|
|
}
|
|
|
|
return \@nodes;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_find_new($$;$)
|
|
{
|
|
my $vol = shift || die;
|
|
my $path = $vol->{PATH} // die;
|
|
my $lastgen = shift // die;
|
|
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume find-new", { unsafe => $path }, $lastgen ),
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to fetch modified files for: $vol->{PRINT}";
|
|
return undef;
|
|
}
|
|
|
|
my %files;
|
|
my $parse_errors = 0;
|
|
my $transid_marker;
|
|
foreach (split(/\n/, $ret))
|
|
{
|
|
if(/^inode \S+ file offset (\S+) len (\S+) disk start \S+ offset \S+ gen (\S+) flags (\S+) (.+)$/) {
|
|
my $file_offset = $1;
|
|
my $len = $2;
|
|
my $gen = $3;
|
|
my $flags = $4;
|
|
my $name = $5;
|
|
$files{$name}->{len} += $len;
|
|
$files{$name}->{new} = 1 if($file_offset == 0);
|
|
$files{$name}->{gen}->{$gen} = 1; # count the generations
|
|
if($flags eq "COMPRESS") {
|
|
$files{$name}->{flags}->{compress} = 1;
|
|
}
|
|
elsif($flags eq "COMPRESS|INLINE") {
|
|
$files{$name}->{flags}->{compress} = 1;
|
|
$files{$name}->{flags}->{inline} = 1;
|
|
}
|
|
elsif($flags eq "INLINE") {
|
|
$files{$name}->{flags}->{inline} = 1;
|
|
}
|
|
elsif($flags eq "NONE") {
|
|
}
|
|
else {
|
|
WARN "unparsed flags: $flags";
|
|
}
|
|
}
|
|
elsif(/^transid marker was (\S+)$/) {
|
|
$transid_marker = $1;
|
|
}
|
|
else {
|
|
$parse_errors++;
|
|
}
|
|
}
|
|
|
|
return { files => \%files,
|
|
transid_marker => $transid_marker,
|
|
parse_errors => $parse_errors,
|
|
};
|
|
}
|
|
|
|
|
|
# returns $target, or undef on error
|
|
sub btrfs_subvolume_snapshot($$)
|
|
{
|
|
my $svol = shift || die;
|
|
my $target_vol = shift // die;
|
|
my $target_path = $target_vol->{PATH} // die;
|
|
my $src_path = $svol->{PATH} // die;
|
|
INFO "[snapshot] source: $svol->{PRINT}";
|
|
INFO "[snapshot] target: $target_vol->{PRINT}";
|
|
start_transaction("snapshot",
|
|
vinfo_prefixed_keys("target", $target_vol),
|
|
vinfo_prefixed_keys("source", $svol),
|
|
);
|
|
my $ret = run_cmd(cmd => vinfo_cmd($svol, "btrfs subvolume snapshot", '-r', { unsafe => $src_path }, { unsafe => $target_path } ),
|
|
rsh => vinfo_rsh($svol),
|
|
);
|
|
end_transaction("snapshot", ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")));
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to create btrfs subvolume snapshot: $svol->{PRINT} -> $target_path";
|
|
return undef;
|
|
}
|
|
return $target_vol;
|
|
}
|
|
|
|
|
|
sub btrfs_subvolume_delete($@)
|
|
{
|
|
my $targets = shift // die;
|
|
my %opts = @_;
|
|
my $commit = $opts{commit};
|
|
die if($commit && ($commit ne "after") && ($commit ne "each"));
|
|
$targets = [ $targets ] unless(ref($targets) eq "ARRAY");
|
|
return 0 unless(scalar(@$targets));
|
|
|
|
# assert all targets share same HOST
|
|
# NOTE: rsh and backend command is taken from first target
|
|
my $rsh_host_check = $targets->[0]->{HOST} || "";
|
|
foreach (@$targets) {
|
|
die if($rsh_host_check ne ($_->{HOST} || ""));
|
|
}
|
|
|
|
INFO "[delete] options: commit-$commit" if($commit);
|
|
INFO "[delete] target: $_->{PRINT}" foreach(@$targets);
|
|
my @options;
|
|
@options = ("--commit-$commit") if($commit);
|
|
start_transaction($opts{type} // "delete",
|
|
map( { { vinfo_prefixed_keys("target", $_) }; } @$targets)
|
|
);
|
|
my @cmd_target_paths = map { { unsafe => $_->{PATH} } } @$targets;
|
|
my $ret = run_cmd(cmd => vinfo_cmd($targets->[0], "btrfs subvolume delete", @options, @cmd_target_paths ),
|
|
rsh => vinfo_rsh($targets->[0]),
|
|
);
|
|
end_transaction($opts{type} // "delete", ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")));
|
|
ERROR "Failed to delete btrfs subvolumes: " . join(' ', map( { $_->{PRINT} } @$targets)) unless(defined($ret));
|
|
return defined($ret) ? scalar(@$targets) : undef;
|
|
}
|
|
|
|
|
|
sub btrfs_send_receive($$$$;@)
|
|
{
|
|
my $snapshot = shift || die;
|
|
my $target = shift || die;
|
|
my $parent = shift;
|
|
my $ret_vol_received = shift;
|
|
my %opts = @_;
|
|
my $snapshot_path = $snapshot->{PATH} // die;
|
|
my $target_path = $target->{PATH} // die;
|
|
my $parent_path = $parent ? $parent->{PATH} : undef;
|
|
|
|
my $vol_received = vinfo_child($target, $snapshot->{NAME});
|
|
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
|
|
|
|
print STDOUT "Creating backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
|
|
|
|
INFO "[send/receive] source: $snapshot->{PRINT}";
|
|
INFO "[send/receive] parent: $parent->{PRINT}" if($parent);
|
|
INFO "[send/receive] target: $vol_received->{PRINT}";
|
|
|
|
my @send_options;
|
|
my @receive_options;
|
|
push(@send_options, '-p', { unsafe => $parent_path} ) if($parent_path);
|
|
# push(@send_options, '-v') if($loglevel >= 3);
|
|
# push(@receive_options, '-v') if($loglevel >= 3);
|
|
|
|
my @cmd_pipe;
|
|
push @cmd_pipe, {
|
|
cmd => vinfo_cmd($snapshot, "btrfs send", @send_options, { unsafe => $snapshot_path } ),
|
|
rsh => vinfo_rsh($snapshot, disable_compression => config_compress_hash($snapshot, "stream_compress")),
|
|
rsh_compress_out => config_compress_hash($snapshot, "stream_compress"),
|
|
name => "btrfs send",
|
|
catch_stderr => 1, # hack for shell-based run_cmd()
|
|
};
|
|
add_pv_command(\@cmd_pipe, show_progress => $show_progress, rate_limit => $opts{rate_limit});
|
|
push @cmd_pipe, {
|
|
cmd => vinfo_cmd($target, "btrfs receive", @receive_options, { unsafe => $target_path . '/' } ),
|
|
rsh => vinfo_rsh($target, disable_compression => config_compress_hash($target, "stream_compress")),
|
|
name => "btrfs receive",
|
|
rsh_compress_in => config_compress_hash($target, "stream_compress"),
|
|
catch_stderr => 1, # hack for shell-based run_cmd()
|
|
filter_stderr => sub { $err = $_; $_ = undef }
|
|
};
|
|
|
|
my $send_receive_error = 0;
|
|
start_transaction("send-receive",
|
|
vinfo_prefixed_keys("target", $vol_received),
|
|
vinfo_prefixed_keys("source", $snapshot),
|
|
vinfo_prefixed_keys("parent", $parent),
|
|
);
|
|
my $ret = run_cmd(@cmd_pipe);
|
|
unless(defined($ret)) {
|
|
$send_receive_error = 1;
|
|
$ret = $err; # print the errors below
|
|
}
|
|
if(defined($ret)) {
|
|
# NOTE: if "btrfs send" fails, "btrfs receive" returns 0! so we need to parse the output...
|
|
foreach(split("\n", $ret)) {
|
|
if(/^ERROR: /) {
|
|
ERROR $';
|
|
$send_receive_error = 1;
|
|
}
|
|
elsif(/^WARNING: /) {
|
|
WARN "[send/receive] (send=$snapshot_path, receive=$target_path) $'";
|
|
}
|
|
else {
|
|
WARN "[send/receive] (send=$snapshot_path, receive=$target_path) $_" if($send_receive_error);
|
|
}
|
|
}
|
|
}
|
|
|
|
unless($send_receive_error) {
|
|
# Read in target subvolume metadata (btrfs subvolume show):
|
|
# Double checking the output increases robustness against exotic
|
|
# revisions of external commands (btrfs-progs, pv, xz, lz4, ...).
|
|
#
|
|
# NOTE: we cannot rely on the underlying shell to have
|
|
# "pipefail" functionality.
|
|
#
|
|
# NOTE (bug?) (checked with btrfs-progs v4.6.1 and earlier):
|
|
# "cat /dev/null | btrfs receive" returns with exitcode=0 and no
|
|
# error message, having the effect that silently no subvolume is
|
|
# created if any command in @cmd_pipe fail.
|
|
|
|
if($dryrun) {
|
|
INFO "[send/receive] (dryrun, skip) checking target metadata: $vol_received->{PRINT}";
|
|
}
|
|
else {
|
|
INFO "[send/receive] checking target metadata: $vol_received->{PRINT}";
|
|
my $detail = btrfs_subvolume_show($vol_received);
|
|
if(defined($detail)) {
|
|
# plausibility checks on target detail
|
|
unless($detail->{readonly}) {
|
|
ERROR "[send/receive] target is not readonly: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
if($detail->{received_uuid} && ($detail->{received_uuid} eq '-')) {
|
|
# NOTE: received_uuid is not in @required_keys (needs btrfs-progs >= 4.1 (BTRFS_PROGS_MIN))
|
|
# so we only check it if it's really present
|
|
ERROR "[send/receive] received_uuid is not set on target: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
if($parent && ($detail->{parent_uuid} eq '-')) {
|
|
ERROR "[send/receive] parent_uuid is not set on target: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
if((not $parent) && ($detail->{parent_uuid} ne '-')) {
|
|
ERROR "[send/receive] parent_uuid is set on target: $vol_received->{PRINT}";
|
|
$send_receive_error = 1;
|
|
}
|
|
}
|
|
else {
|
|
$send_receive_error = 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
end_transaction("send-receive", ($dryrun ? "DRYRUN" : ($send_receive_error ? "ERROR" : "success")));
|
|
|
|
if($send_receive_error) {
|
|
ERROR "Failed to send/receive btrfs subvolume: $snapshot->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $target->{PRINT}";
|
|
|
|
# NOTE: btrfs-progs v3.19.1 does not delete garbled received subvolume,
|
|
# we need to do this by hand.
|
|
# TODO: remove this as soon as btrfs-progs handle receive errors correctly.
|
|
DEBUG "send/received failed, deleting (possibly present and garbled) received subvolume: $vol_received->{PRINT}";
|
|
my $ret = btrfs_subvolume_delete($vol_received, commit => "after", type => "delete_garbled");
|
|
if(defined($ret)) {
|
|
WARN "Deleted partially received (garbled) subvolume: $vol_received->{PRINT}";
|
|
}
|
|
else {
|
|
WARN "Deletion of partially received (garbled) subvolume failed, assuming clean environment: $vol_received->{PRINT}";
|
|
}
|
|
|
|
return undef;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub btrfs_send_to_file($$$$;@)
|
|
{
|
|
my $source = shift || die;
|
|
my $target = shift || die;
|
|
my $parent = shift;
|
|
my $ret_vol_received = shift;
|
|
my %opts = @_;
|
|
my $source_path = $source->{PATH} // die;
|
|
my $target_path = $target->{PATH} // die;
|
|
my $parent_path = $parent ? $parent->{PATH} : undef;
|
|
my $parent_uuid = $parent ? $parent->{node}{uuid} : undef ;
|
|
my $received_uuid = $source->{node}{uuid};
|
|
die unless($received_uuid);
|
|
die if($parent && !$parent_uuid);
|
|
|
|
my $target_filename = $source->{NAME} || die;
|
|
$target_filename .= "--$received_uuid";
|
|
$target_filename .= '@' . $parent_uuid if($parent_uuid);
|
|
$target_filename .= ".btrfs";
|
|
|
|
my @send_options;
|
|
push(@send_options, '-v') if($loglevel >= 3);
|
|
push(@send_options, '-p', $parent_path) if($parent_path);
|
|
|
|
my @cmd_pipe;
|
|
push @cmd_pipe, {
|
|
cmd => vinfo_cmd($source, "btrfs send", @send_options, { unsafe => $source_path } ),
|
|
rsh => vinfo_rsh($source, disable_compression => $opts{compress} || config_compress_hash($source, "stream_compress")),
|
|
name => "btrfs send",
|
|
rsh_compress_out => $opts{compress} || config_compress_hash($source, "stream_compress"),
|
|
};
|
|
add_pv_command(\@cmd_pipe, show_progress => $show_progress, rate_limit => $opts{rate_limit});
|
|
if($opts{compress}) {
|
|
$target_filename .= '.' . $compression{$opts{compress}->{key}}->{format};
|
|
push @cmd_pipe, { compress => $opts{compress} }; # does nothing if already compressed by rsh_compress_out
|
|
}
|
|
if($opts{encrypt}) {
|
|
die unless($opts{encrypt}->{type} eq "gpg");
|
|
$target_filename .= '.gpg';
|
|
my @gpg_options = ( '--batch', '--no-tty', '--trust-model', 'always' );
|
|
push @gpg_options, ( '--compress-algo', 'none' ) if($opts{compress}); # NOTE: if --compress-algo is not set, gpg might still compress according to OpenPGP standard.
|
|
push(@gpg_options, ( '--no-default-keyring', '--keyring', $opts{encrypt}->{keyring} )) if($opts{encrypt}->{keyring});
|
|
push(@gpg_options, ( '--default-recipient', $opts{encrypt}->{recipient} )) if($opts{encrypt}->{recipient});
|
|
push @cmd_pipe, {
|
|
cmd => [ 'gpg', @gpg_options, '--encrypt' ],
|
|
name => 'gpg',
|
|
compressed_ok => ($opts{compress} ? 1 : 0),
|
|
};
|
|
}
|
|
push @cmd_pipe, {
|
|
# NOTE: We use "dd" instead of shell redirections here, as it is
|
|
# common to have special filesystems (like NFS, SMB, FUSE) mounted
|
|
# on $target_path. By using "dd" we make sure to write in
|
|
# reasonably large blocks (default=128K), which is not always the
|
|
# case when using redirections (e.g. "gpg > outfile" writes in 8K
|
|
# blocks).
|
|
# Another approach would be to always pipe through "cat", which
|
|
# uses st_blksize from fstat(2) (with a minimum of 128K) to
|
|
# determine the block size.
|
|
cmd => [ 'dd', 'status=none', 'bs=' . config_key($target, "raw_target_block_size"), "of=${target_path}/${target_filename}.part" ],
|
|
check_unsafe => [ { unsafe => "${target_path}/${target_filename}.part" } ],
|
|
#redirect_to_file => { unsafe => "${target_path}/${target_filename}.part" }, # alternative (use shell redirection), less overhead on local filesystems (barely measurable):
|
|
rsh => vinfo_rsh($target, disable_compression => $opts{compress} || config_compress_hash($target, "stream_compress")),
|
|
rsh_compress_in => $opts{compress} || config_compress_hash($target, "stream_compress"),
|
|
compressed_ok => ($opts{compress} ? 1 : 0),
|
|
};
|
|
|
|
my $vol_received = vinfo_child($target, $target_filename);
|
|
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
|
|
|
|
print STDOUT "Creating raw backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
|
|
|
|
INFO "[send-to-raw] source: $source->{PRINT}";
|
|
INFO "[send-to-raw] parent: $parent->{PRINT}" if($parent);
|
|
INFO "[send-to-raw] target: $vol_received->{PRINT}";
|
|
|
|
start_transaction("send-to-raw",
|
|
vinfo_prefixed_keys("target", $vol_received),
|
|
vinfo_prefixed_keys("source", $source),
|
|
vinfo_prefixed_keys("parent", $parent),
|
|
);
|
|
my $ret = run_cmd(@cmd_pipe);
|
|
if(defined($ret)) {
|
|
# Test target file for "exists and size > 0" after writing,
|
|
# as we can not rely on the exit status of the command pipe,
|
|
# and the shell command always creates the target file.
|
|
DEBUG "Testing target file (non-zero size): $target->{PRINT}.part";
|
|
$ret = run_cmd({
|
|
cmd => ['test', '-s', { unsafe => "${target_path}/${target_filename}.part" } ],
|
|
rsh => vinfo_rsh($target),
|
|
name => "test",
|
|
});
|
|
if(defined($ret)) {
|
|
DEBUG "Renaming target file (remove postfix '.part'): $target->{PRINT}";
|
|
$ret = run_cmd({
|
|
cmd => ['mv', { unsafe => "${target_path}/${target_filename}.part" }, { unsafe => "${target_path}/${target_filename}" } ],
|
|
rsh => vinfo_rsh($target),
|
|
name => "mv",
|
|
});
|
|
}
|
|
}
|
|
end_transaction("send-to-raw", ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")));
|
|
unless(defined($ret)) {
|
|
ERROR "Failed to send btrfs subvolume to raw file: $source->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $vol_received->{PRINT}";
|
|
return undef;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub system_list_mounts($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $file = '/proc/self/mounts';
|
|
my $ret = run_cmd(cmd => [ qw(cat), $file ],
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
catch_stderr => 1, # hack for shell-based run_cmd()
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
my @mounts;
|
|
foreach (split(/\n/, $ret))
|
|
{
|
|
# from fstab(5)
|
|
unless(/^(\S+) (\S+) (\S+) (\S+) (\S+) (\S+)$/) {
|
|
ERROR "Failed to parse \"$file\" on " . ($vol->{HOST} || "localhost");
|
|
DEBUG "Offending line: $_";
|
|
return undef;
|
|
}
|
|
my %line = (
|
|
spec => $1,
|
|
file => $2,
|
|
vfstype => $3,
|
|
mntops => $4,
|
|
freq => $5,
|
|
passno => $6,
|
|
);
|
|
foreach (split(',', $line{mntops})) {
|
|
if(/^(.+?)=(.+)$/) {
|
|
$line{MNTOPS}->{$1} = $2;
|
|
} else {
|
|
$line{MNTOPS}->{$_} = 1;
|
|
}
|
|
}
|
|
push @mounts, \%line;
|
|
}
|
|
# TRACE(Data::Dumper->Dump([\@mounts], ["mounts"])) if($do_dumper);
|
|
return \@mounts;
|
|
}
|
|
|
|
|
|
sub system_realpath($)
|
|
{
|
|
my $vol = shift // die;
|
|
|
|
my $path = $vol->{PATH} // die;;
|
|
my @quiet = ($loglevel < 3) ? ('-q') : ();
|
|
my $ret = run_cmd(cmd => [ qw(readlink), '-e', @quiet, { unsafe => $path } ],
|
|
rsh => vinfo_rsh($vol),
|
|
non_destructive => 1,
|
|
);
|
|
return undef unless(defined($ret));
|
|
|
|
unless($ret =~ /^($file_match)$/) {
|
|
ERROR "Failed to parse output of `realpath` for \"$vol->{PRINT}\": \"$ret\"";
|
|
return undef;
|
|
}
|
|
my $realpath = $1; # untaint argument
|
|
DEBUG "Real path for \"$vol->{PRINT}\" is: $realpath";
|
|
return $realpath;
|
|
}
|
|
|
|
|
|
sub system_mkdir($)
|
|
{
|
|
my $vol = shift // die;
|
|
my $path = $vol->{PATH} // die;;
|
|
INFO "Creating directory: $vol->{PRINT}/";
|
|
my $ret = run_cmd(cmd => [ qw(mkdir), '-p', { unsafe => $path } ],
|
|
rsh => vinfo_rsh($vol),
|
|
);
|
|
action("mkdir",
|
|
vinfo_prefixed_keys("target", $vol),
|
|
status => ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")),
|
|
);
|
|
return undef unless(defined($ret));
|
|
return 1;
|
|
}
|
|
|
|
|
|
sub btrfs_mountpoint($)
|
|
{
|
|
my $vol = shift // die;
|
|
|
|
DEBUG "Resolving btrfs mount point for: $vol->{PRINT}";
|
|
my $host = $vol->{HOST} || "localhost";
|
|
my $mounts = $fstab_cache{$host};
|
|
TRACE "fstab_cache " . ($mounts ? "HIT" : "MISS") . ": $host";
|
|
|
|
# get real path
|
|
my $path = $realpath_cache{$vol->{URL}};
|
|
unless($path) {
|
|
$path = system_realpath($vol);
|
|
$realpath_cache{$vol->{URL}} = $path;
|
|
}
|
|
return (undef, undef, undef) unless($path);
|
|
|
|
unless($mounts) {
|
|
$mounts = [];
|
|
my $all_mounts = system_list_mounts($vol);
|
|
|
|
foreach my $mnt (@$all_mounts) {
|
|
if($mnt->{vfstype} ne 'btrfs') {
|
|
TRACE "non-btrfs mount point: $mnt->{spec} $mnt->{file} $mnt->{vfstype}";
|
|
next;
|
|
}
|
|
my $file = $mnt->{file} // die;
|
|
unless($file =~ /^$file_match$/) {
|
|
WARN "Skipping non-parseable file in btrfs mounts of $host: \"$file\"";
|
|
next;
|
|
}
|
|
TRACE "btrfs mount point (spec=$mnt->{spec}, subvolid=" . ($mnt->{MNTOPS}->{subvolid} // '<undef>') . "): $file";
|
|
push @$mounts, $mnt;
|
|
}
|
|
$fstab_cache{$host} = $mounts;
|
|
}
|
|
|
|
# find longest match
|
|
$path .= '/' unless($path =~ /\/$/); # correctly handle root path="/"
|
|
my $len = 0;
|
|
my $longest_match;
|
|
foreach(@$mounts) {
|
|
my $mnt_path = $_->{file};
|
|
$mnt_path .= '/' unless($mnt_path =~ /\/$/); # correctly handle root path="/"
|
|
$longest_match = $_ if((length($mnt_path) > $len) && ($path =~ /^\Q$mnt_path\E/));
|
|
}
|
|
unless($longest_match) {
|
|
DEBUG "No btrfs mount point found for: $vol->{PRINT}";
|
|
return (undef, undef, undef);
|
|
}
|
|
DEBUG "Found btrfs mount point for \"$vol->{PRINT}\": $longest_match->{file} (subvolid=" . ($longest_match->{MNTOPS}->{subvolid} // '<undef>') . ")";
|
|
return ($longest_match->{file}, $path, $longest_match->{MNTOPS}->{subvolid});
|
|
}
|
|
|
|
|
|
sub btr_tree($$)
|
|
{
|
|
my $vol = shift;
|
|
my $vol_root_id = shift || die;
|
|
die unless($vol_root_id >= 5);
|
|
# NOTE: we need an ID (provided by btrfs_subvolume_show()) in order
|
|
# to determine the anchor to our root path (since the subvolume path
|
|
# output of "btrfs subvolume list" is ambigous, and the uuid of the
|
|
# btrfs root node cannot be resolved).
|
|
|
|
# man btrfs-subvolume:
|
|
# Also every btrfs filesystem has a default subvolume as its initially
|
|
# top-level subvolume, whose subvolume id is 5(FS_TREE).
|
|
my %tree = ( id => 5,
|
|
is_root => 1,
|
|
SUBTREE => []
|
|
);
|
|
my %id = ( 5 => \%tree );
|
|
|
|
$tree{TREE_ROOT} = \%tree;
|
|
$tree{ID_HASH} = \%id;
|
|
|
|
my $node_list = btrfs_subvolume_list($vol);
|
|
return undef unless(ref($node_list) eq "ARRAY");
|
|
my $vol_root;
|
|
|
|
TRACE "btr_tree: processing subvolume list of: $vol->{PRINT}";
|
|
|
|
# check if we already know this tree
|
|
if((scalar @$node_list) && $uuid_cache{$node_list->[0]->{uuid}}) {
|
|
TRACE "uuid_cache HIT: $node_list->[0]->{uuid}";
|
|
$vol_root = $uuid_cache{$node_list->[0]->{uuid}}->{TREE_ROOT}->{ID_HASH}->{$vol_root_id};
|
|
die "Duplicate UUID on different file systems" unless($vol_root);
|
|
TRACE "btr_tree: returning already parsed tree at id=$vol_root->{id}";
|
|
return $vol_root;
|
|
}
|
|
|
|
# fill ID_HASH and uuid_cache
|
|
my $gen_max = 0;
|
|
foreach my $node (@$node_list)
|
|
{
|
|
die unless($node->{id} >= 0);
|
|
die if exists($id{$node->{id}});
|
|
$node->{SUBTREE} //= [];
|
|
$id{$node->{id}} = $node;
|
|
$uuid_cache{$node->{uuid}} = $node;
|
|
$gen_max = $node->{gen} if($node->{gen} > $gen_max);
|
|
}
|
|
$tree{GEN_MAX} = $gen_max;
|
|
|
|
# note: it is possible that id < top_level, e.g. after restoring
|
|
foreach my $node (@$node_list)
|
|
{
|
|
# set SUBTREE / TOP_LEVEL node
|
|
die unless exists($id{$node->{top_level}});
|
|
my $top_level = $id{$node->{top_level}};
|
|
|
|
push(@{$top_level->{SUBTREE}}, $node);
|
|
$node->{TOP_LEVEL} = $top_level;
|
|
$node->{TREE_ROOT} = \%tree;
|
|
|
|
# "path" always starts with set REL_PATH
|
|
my $rel_path = $node->{path};
|
|
if($node->{top_level} != 5) {
|
|
die unless($rel_path =~ s/^$top_level->{path}\///);
|
|
}
|
|
|
|
$node->{REL_PATH} = $rel_path; # relative to {TOP_LEVEL}->{path}
|
|
|
|
add_btrbk_filename_info($node);
|
|
|
|
$vol_root = $node if($vol_root_id == $node->{id});
|
|
}
|
|
unless($vol_root) {
|
|
if($vol_root_id == 5) {
|
|
$vol_root = \%tree;
|
|
}
|
|
else {
|
|
ERROR "Failed to resolve tree root for: " . ($vol->{PRINT} // $vol->{id});
|
|
return undef;
|
|
}
|
|
}
|
|
|
|
TRACE "btr_tree: returning tree at id=$vol_root->{id}";
|
|
VINFO($vol_root, "node") if($loglevel >=4);
|
|
|
|
return $vol_root;
|
|
}
|
|
|
|
|
|
sub btr_tree_inject_node
|
|
{
|
|
my $top_node = shift;
|
|
my $detail = shift;
|
|
my $rel_path = shift;
|
|
my $subtree = $top_node->{SUBTREE} // die;
|
|
my $tree_root = $top_node->{TREE_ROOT};
|
|
|
|
$tree_inject_id -= 1;
|
|
$tree_root->{GEN_MAX} += 1;
|
|
|
|
my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
|
|
my $node = {
|
|
%$detail, # make a copy
|
|
TREE_ROOT => $top_node->{TREE_ROOT},
|
|
SUBTREE => [],
|
|
TOP_LEVEL => $top_node,
|
|
REL_PATH => $rel_path,
|
|
INJECTED => 1,
|
|
id => $tree_inject_id,
|
|
uuid => $uuid,
|
|
gen => $tree_root->{GEN_MAX},
|
|
cgen => $tree_root->{GEN_MAX},
|
|
};
|
|
push(@$subtree, $node);
|
|
$uuid_cache{$uuid} = $node;
|
|
$tree_root->{ID_HASH}->{$tree_inject_id} = $node;
|
|
return $node;
|
|
}
|
|
|
|
|
|
sub _fs_path
|
|
{
|
|
my $node = shift // die;
|
|
return '<BTRFS_ROOT>' if($node->{is_root});
|
|
return _fs_path($node->{TOP_LEVEL}) . '/' . $node->{REL_PATH};
|
|
}
|
|
|
|
|
|
sub _is_child_of
|
|
{
|
|
my $node = shift;
|
|
my $uuid = shift;
|
|
foreach(@{$node->{SUBTREE}}) {
|
|
return 1 if($_->{uuid} eq $uuid);
|
|
return 1 if(_is_child_of($_, $uuid));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
sub _fill_url_cache
|
|
{
|
|
my $node = shift;
|
|
my $abs_path = shift;
|
|
my $node_subdir = shift;
|
|
# TRACE "_fill_url_cache: $abs_path";
|
|
|
|
# traverse tree from given node and update tree cache
|
|
$url_cache{$abs_path} = $node unless(defined($node_subdir));
|
|
foreach(@{$node->{SUBTREE}}) {
|
|
my $rel_path = $_->{REL_PATH};
|
|
if(defined($node_subdir)) {
|
|
next unless($rel_path =~ s/^\Q$node_subdir\E\///);
|
|
}
|
|
_fill_url_cache($_, $abs_path . '/' . $rel_path, undef);
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
|
|
sub _get_longest_match
|
|
{
|
|
my $node = shift;
|
|
my $path = shift;
|
|
my $check_path = shift; # MUST have a trailing slash
|
|
$path .= '/' unless($path =~ /\/$/); # correctly handle root path="/"
|
|
|
|
return undef unless($check_path =~ /^\Q$path\E/);
|
|
foreach(@{$node->{SUBTREE}}) {
|
|
my $ret = _get_longest_match($_, $path . $_->{REL_PATH}, $check_path);
|
|
return $ret if($ret);
|
|
}
|
|
return { node => $node,
|
|
path => $path };
|
|
}
|
|
|
|
|
|
# reverse path lookup
|
|
sub get_cached_url_by_uuid($)
|
|
{
|
|
my $uuid = shift;
|
|
my @result;
|
|
while(my ($url, $node) = each(%url_cache)) {
|
|
next if($node->{is_root});
|
|
next unless($node->{uuid} eq $uuid);
|
|
push @result, $url;
|
|
}
|
|
return @result;
|
|
}
|
|
|
|
|
|
sub vinfo($;$)
|
|
{
|
|
my $url = shift // die;
|
|
my $config = shift;
|
|
my %info;
|
|
|
|
my ($url_prefix, $path) = check_url($url);
|
|
die "invalid url: $url" unless(defined($path));
|
|
my $print = $path;
|
|
my $name = $path;
|
|
$name =~ s/^.*\///;
|
|
$name = '/' if($name eq "");
|
|
|
|
my $host = undef;
|
|
if($url_prefix) {
|
|
$host = $url_prefix;
|
|
die unless($host =~ s/^ssh:\/\///);
|
|
$print = "$host:$path";
|
|
}
|
|
|
|
return {
|
|
HOST => $host,
|
|
NAME => $name,
|
|
PATH => $path,
|
|
PRINT => $print,
|
|
URL => $url_prefix . $path,
|
|
URL_PREFIX => $url_prefix,
|
|
CONFIG => $config,
|
|
}
|
|
}
|
|
|
|
|
|
sub vinfo_child($$;$)
|
|
{
|
|
my $parent = shift || die;
|
|
my $rel_path = shift // die;
|
|
my $config = shift; # override parent config
|
|
my $name = $rel_path;
|
|
my $subvol_dir = "";
|
|
$subvol_dir = $1 if($name =~ s/^(.*)\///);
|
|
|
|
my $vinfo = {
|
|
HOST => $parent->{HOST},
|
|
NAME => $name,
|
|
PATH => "$parent->{PATH}/$rel_path",
|
|
PRINT => "$parent->{PRINT}/$rel_path",
|
|
URL => "$parent->{URL}/$rel_path",
|
|
URL_PREFIX => $parent->{URL_PREFIX},
|
|
SUBVOL_PATH => $rel_path,
|
|
SUBVOL_DIR => $subvol_dir, # SUBVOL_PATH=SUBVOL_DIR/NAME
|
|
CONFIG => $config // $parent->{CONFIG},
|
|
};
|
|
|
|
# TRACE "vinfo_child: created from \"$parent->{PRINT}\": $info{PRINT}";
|
|
return $vinfo;
|
|
}
|
|
|
|
|
|
sub vinfo_rsh($;@)
|
|
{
|
|
my $vinfo = shift || die;
|
|
my %opts = @_;
|
|
my $host = $vinfo->{HOST};
|
|
return undef unless(defined($host));
|
|
|
|
my $config = $vinfo->{CONFIG};
|
|
die unless($config);
|
|
|
|
my $ssh_port = config_key($config, "ssh_port");
|
|
my $ssh_user = config_key($config, "ssh_user");
|
|
my $ssh_identity = config_key($config, "ssh_identity");
|
|
my $ssh_compression = config_key($config, "ssh_compression");
|
|
my $ssh_cipher_spec = config_key($config, "ssh_cipher_spec") // "default";
|
|
my @ssh_options;
|
|
push(@ssh_options, '-p', $ssh_port) if($ssh_port ne "default");
|
|
push(@ssh_options, '-c', $ssh_cipher_spec) if($ssh_cipher_spec ne "default");
|
|
if($ssh_identity) {
|
|
push(@ssh_options, '-i', $ssh_identity);
|
|
} else {
|
|
WARN_ONCE "No SSH identity provided (option ssh_identity is not set) for: " . ($vinfo->{CONFIG}->{url} // $vinfo->{PRINT});
|
|
}
|
|
if($opts{disable_compression}) {
|
|
push(@ssh_options, '-o', 'compression=no'); # force ssh compression=no (in case it is defined in ssh_config)
|
|
} elsif($ssh_compression) {
|
|
push(@ssh_options, '-C');
|
|
}
|
|
return ['ssh', @ssh_options, $ssh_user . '@' . $host ];
|
|
}
|
|
|
|
|
|
sub vinfo_cmd($$@)
|
|
{
|
|
my $vinfo = shift || die;
|
|
my $cmd = shift || die;
|
|
my @cmd_args = @_;
|
|
my $ret;
|
|
my $backend = $vinfo->{HOST} ? config_key($vinfo, "backend_remote") : config_key($vinfo, "backend_local");
|
|
$backend = $backend // config_key($vinfo, "backend") // die;
|
|
my $cmd_mapped = $backend_cmd_map{$backend}{$cmd};
|
|
if(defined($cmd_mapped)) {
|
|
TRACE "vinfo_cmd: found mapping for backend=$backend cmd=\"$cmd\": " . join(' ', @$cmd_mapped);
|
|
$ret = [ @$cmd_mapped, @cmd_args ];
|
|
}
|
|
else {
|
|
my @ret_cmd = split(/\s/, $cmd);
|
|
die unless($ret_cmd[0] eq 'btrfs');
|
|
TRACE "vinfo_cmd: no mapping found for backend=$backend cmd=\"$cmd\", assuming btrfs-progs: " . join(' ', @ret_cmd);
|
|
$ret = [ @ret_cmd, @cmd_args ];
|
|
}
|
|
return $ret;
|
|
}
|
|
|
|
|
|
sub add_btrbk_filename_info($;$)
|
|
{
|
|
my $node = shift;
|
|
my $btrbk_raw_file = shift;
|
|
my $name = $node->{REL_PATH};
|
|
return undef unless(defined($name));
|
|
|
|
# NOTE: unless long-iso file format is encountered, the timestamp is interpreted in local timezone.
|
|
|
|
$name =~ s/^(.*)\///;
|
|
my $btrbk_raw;
|
|
if($btrbk_raw_file && ($name =~ /^(?<name>$file_match)$timestamp_postfix_match$raw_postfix_match$/)) {
|
|
$btrbk_raw = {
|
|
received_uuid => $+{received_uuid} // die,
|
|
remote_parent_uuid => $+{parent_uuid} // '-',
|
|
encrypt => $+{encrypt} // "",
|
|
compress => $+{compress} // "",
|
|
incomplete => $+{incomplete} ? 1 : 0,
|
|
};
|
|
}
|
|
elsif((not $btrbk_raw_file) && ($name =~ /^(?<name>$file_match)$timestamp_postfix_match$/)) { ; }
|
|
else {
|
|
return undef;
|
|
}
|
|
$name = $+{name} // die;
|
|
my @tm = ( ($+{ss} // 0), ($+{mm} // 0), ($+{hh} // 0), $+{DD}, ($+{MM} - 1), ($+{YYYY} - 1900) );
|
|
my $NN = $+{NN} // 0;
|
|
my $zz = $+{zz};
|
|
|
|
my $time;
|
|
if(defined($zz)) {
|
|
eval_quiet { $time = timegm(@tm); };
|
|
} else {
|
|
eval_quiet { $time = timelocal(@tm); };
|
|
}
|
|
unless(defined($time)) {
|
|
WARN "Illegal timestamp on subvolume \"$node->{REL_PATH}\", ignoring";
|
|
# WARN "$@"; # sadly Time::Local croaks, which also prints the line number from here.
|
|
return undef;
|
|
}
|
|
|
|
# handle ISO 8601 time offset
|
|
if(defined($zz)) {
|
|
my $offset;
|
|
if($zz eq 'Z') {
|
|
$offset = 0; # Zulu time == UTC
|
|
}
|
|
elsif($zz =~ /^([+-])([0-9][0-9])([0-9][0-9])$/) {
|
|
$offset = ( $3 * 60 ) + ( $2 * 60 * 60 );
|
|
$offset *= -1 if($1 eq '-');
|
|
}
|
|
else {
|
|
WARN "Failed to parse time offset on subvolume \"$node->{REL_PATH}\", ignoring";
|
|
return undef;
|
|
}
|
|
$time -= $offset;
|
|
}
|
|
|
|
$node->{BTRBK_BASENAME} = $name;
|
|
$node->{BTRBK_DATE} = [ $time, $NN ];
|
|
$node->{BTRBK_RAW} = $btrbk_raw if($btrbk_raw);
|
|
return $node;
|
|
}
|
|
|
|
|
|
sub vinfo_init_root($;@)
|
|
{
|
|
my $vol = shift || die;
|
|
my %opts = @_;
|
|
my $tree_root;
|
|
my @fill_cache;
|
|
|
|
# use cached info if present
|
|
$tree_root = $url_cache{$vol->{URL}};
|
|
TRACE "url_cache " . ($tree_root ? "HIT" : "MISS") . ": URL=$vol->{URL}";
|
|
unless($tree_root) {
|
|
if(my $real_path = $realpath_cache{$vol->{URL}}) {
|
|
my $real_url = $vol->{URL_PREFIX} . $real_path;
|
|
$tree_root = $url_cache{$real_url};
|
|
TRACE "url_cache " . ($tree_root ? "HIT" : "MISS") . ": REAL_URL=$real_url";
|
|
}
|
|
}
|
|
|
|
# TODO: replace the subvolume_show part as soon as resolve_subdir stuff has stabilized
|
|
unless($tree_root) {
|
|
# url_cache miss, read the subvolume detail
|
|
my $detail = btrfs_subvolume_show($vol);
|
|
if($detail) {
|
|
my $real_path = $realpath_cache{$vol->{URL}};
|
|
push @fill_cache, $vol->{URL};
|
|
push @fill_cache, $vol->{URL_PREFIX} . $real_path if($real_path && (not $url_cache{$vol->{URL_PREFIX} . $real_path}));
|
|
|
|
# check uuid_cache
|
|
if($detail->{uuid}) {
|
|
$tree_root = $uuid_cache{$detail->{uuid}};
|
|
TRACE "uuid_cache " . ($tree_root ? "HIT" : "MISS") . ": UUID=$detail->{uuid}";
|
|
}
|
|
unless($tree_root) {
|
|
# cache miss, read the fresh tree
|
|
$tree_root = btr_tree($vol, $detail->{id});
|
|
}
|
|
}
|
|
elsif($opts{resolve_subdir}) {
|
|
# $vol is not a subvolume, read btrfs tree from mount point
|
|
my ($mnt_path, $real_path, $id) = btrfs_mountpoint($vol);
|
|
return undef unless($mnt_path && $real_path);
|
|
my $mnt_tree_root = $url_cache{$vol->{URL_PREFIX} . $mnt_path};
|
|
unless($mnt_tree_root) {
|
|
# read btrfs tree for the mount point
|
|
my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
|
|
unless($id) {
|
|
DEBUG "No subvolid provided in btrfs mounts for: $mnt_path";
|
|
unless($id) {
|
|
# old kernels don't have subvolid=NN in /proc/self/mounts, read it with btrfs-progs
|
|
my $detail = btrfs_subvolume_show($mnt_vol);
|
|
return undef unless($detail);
|
|
$id = $detail->{id} || die;
|
|
}
|
|
}
|
|
$mnt_tree_root = btr_tree($mnt_vol, $id);
|
|
TRACE "url_cache fill: $mnt_vol->{PRINT}";
|
|
_fill_url_cache($mnt_tree_root, $mnt_vol->{URL});
|
|
}
|
|
|
|
# find longest match in tree
|
|
my $ret = _get_longest_match($mnt_tree_root, $mnt_path, $real_path) // die;
|
|
my $node_subdir = $real_path;
|
|
die unless($node_subdir =~ s/^\Q$ret->{path}\E//); # NOTE: $ret->{path} has trailing slash!
|
|
$node_subdir =~ s/\/+$//;
|
|
|
|
# NODE_SUBDIR: if set, then PATH points to a regular (non-subvolume) directory.
|
|
# in other words, "PATH=<path_to_subvolume>/NODE_SUBDIR"
|
|
$vol->{NODE_SUBDIR} = $node_subdir if($node_subdir ne '');
|
|
$tree_root = $ret->{node};
|
|
|
|
TRACE "url_cache fill: $vol->{PRINT}" . ($vol->{NODE_SUBDIR} ? " (subdir=$vol->{NODE_SUBDIR})" : "");
|
|
_fill_url_cache($tree_root, $vol->{URL}, $vol->{NODE_SUBDIR});
|
|
}
|
|
else {
|
|
return undef;
|
|
}
|
|
}
|
|
return undef unless($tree_root);
|
|
|
|
# fill cache if needed
|
|
foreach (@fill_cache) {
|
|
TRACE "url_cache fill: $_";
|
|
_fill_url_cache($tree_root, $_);
|
|
}
|
|
|
|
$vol->{node} = $tree_root;
|
|
|
|
return $tree_root;
|
|
}
|
|
|
|
|
|
sub _vinfo_subtree_list
|
|
{
|
|
my $tree = shift;
|
|
my $vinfo_parent = shift;
|
|
my $node_subdir_filter = shift;
|
|
my $list = shift // [];
|
|
my $path_prefix = shift // "";
|
|
my $depth = shift // 0;
|
|
|
|
foreach my $node (@{$tree->{SUBTREE}}) {
|
|
my $rel_path = $node->{REL_PATH};
|
|
if(defined($node_subdir_filter)) {
|
|
next unless($rel_path =~ s/^\Q$node_subdir_filter\E\///);
|
|
}
|
|
my $path = $path_prefix . $rel_path;
|
|
my $vinfo = vinfo_child($vinfo_parent, $path);
|
|
$vinfo->{node} = $node;
|
|
|
|
# add some additional information to vinfo
|
|
# NOTE: make sure to also set those in raw tree readin!
|
|
$vinfo->{subtree_depth} = $depth;
|
|
if(($depth == 0) && ($rel_path !~ /\//)) {
|
|
$vinfo->{direct_leaf} = 1;
|
|
$vinfo->{btrbk_direct_leaf} = 1 if(exists($node->{BTRBK_BASENAME}));
|
|
}
|
|
|
|
push(@$list, $vinfo);
|
|
_vinfo_subtree_list($node, $vinfo_parent, undef, $list, $path . '/', $depth + 1);
|
|
}
|
|
return $list;
|
|
}
|
|
|
|
|
|
sub vinfo_subvol_list($;@)
|
|
{
|
|
my $vol = shift || die;
|
|
my %opts = @_;
|
|
|
|
# use fake subvolume list if present
|
|
my $subvol_list = $vol->{SUBVOL_LIST};
|
|
|
|
unless($subvol_list) {
|
|
# recurse into tree from $vol->{node}, returns arrayref of vinfo
|
|
$subvol_list = _vinfo_subtree_list($vol->{node}, $vol, $vol->{NODE_SUBDIR});
|
|
}
|
|
|
|
if($opts{sort}) {
|
|
if($opts{sort} eq 'path') {
|
|
my @sorted = sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } @$subvol_list;
|
|
$subvol_list = \@sorted;
|
|
}
|
|
else { die; }
|
|
}
|
|
return $subvol_list;
|
|
}
|
|
|
|
|
|
sub vinfo_subvol($$)
|
|
{
|
|
my $vol = shift || die;
|
|
my $subvol_path = shift // die;
|
|
foreach (@{vinfo_subvol_list($vol)}) {
|
|
return $_ if($_->{SUBVOL_PATH} eq $subvol_path);
|
|
}
|
|
return undef;
|
|
}
|
|
|
|
|
|
sub vinfo_inject_child($$$)
|
|
{
|
|
my $vinfo = shift;
|
|
my $vinfo_child = shift;
|
|
my $detail = shift;
|
|
my $node;
|
|
my $subvol_list = $vinfo->{SUBVOL_LIST};
|
|
|
|
my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
|
|
my $rel_path = $node_subdir . $vinfo_child->{SUBVOL_PATH};
|
|
|
|
if($subvol_list)
|
|
{
|
|
# insert to a SUBVOL_LIST (raw targets)
|
|
$tree_inject_id -= 1;
|
|
my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
|
|
$node = {
|
|
%$detail,
|
|
REL_PATH => $rel_path,
|
|
INJECTED => 1,
|
|
id => $tree_inject_id,
|
|
uuid => $uuid,
|
|
};
|
|
return undef unless(add_btrbk_filename_info($node, 1));
|
|
|
|
# NOTE: make sure to have all the flags set by _vinfo_subtree_list()
|
|
$vinfo_child->{subtree_depth} = 0;
|
|
$vinfo_child->{direct_leaf} = 1;
|
|
$vinfo_child->{btrbk_direct_leaf} = 1;
|
|
$uuid_cache{$uuid} = $node;
|
|
push @$subvol_list, $vinfo_child;
|
|
}
|
|
else {
|
|
my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
|
|
$node = btr_tree_inject_node($vinfo->{node}, $detail, $rel_path);
|
|
return undef unless(add_btrbk_filename_info($node));
|
|
}
|
|
$vinfo_child->{node} = $node;
|
|
$url_cache{$vinfo_child->{URL}} = $node;
|
|
TRACE "vinfo_inject_child: injected child id=$node->{id} to $vinfo->{PRINT}";
|
|
return $vinfo_child;
|
|
}
|
|
|
|
|
|
# returns hash: ( $prefix_{url,path,host,name,subvol_path,rsh} => value, ... )
|
|
sub vinfo_prefixed_keys($$)
|
|
{
|
|
my $prefix = shift // die;
|
|
my $vinfo = shift;
|
|
return () unless($vinfo);
|
|
my %ret;
|
|
if($prefix) {
|
|
$ret{$prefix} = $vinfo->{PRINT};
|
|
$prefix .= '_';
|
|
}
|
|
foreach (qw( URL PATH HOST NAME SUBVOL_PATH )) {
|
|
$ret{$prefix . lc($_)} = $vinfo->{$_};
|
|
}
|
|
$ret{$prefix . "subvol"} = $vinfo->{PATH};
|
|
my $rsh = vinfo_rsh($vinfo);
|
|
$ret{$prefix . "rsh"} = ($rsh ? join(" ", @$rsh) : undef),
|
|
return %ret;
|
|
}
|
|
|
|
|
|
sub vinfo_assign_config($)
|
|
{
|
|
my $vinfo = shift || die;
|
|
my $config = $vinfo->{CONFIG} || die;
|
|
die if($config->{VINFO});
|
|
$config->{VINFO} = $vinfo;
|
|
}
|
|
|
|
|
|
sub vinfo_subsection($$;$)
|
|
{
|
|
# if config: must have SUBSECTION key
|
|
# if vinfo: must have CONFIG key
|
|
my $config_or_vinfo = shift || die;
|
|
my $context = shift || die;
|
|
my $include_aborted = shift;
|
|
my $config_list;
|
|
my $vinfo_check;
|
|
if(exists($config_or_vinfo->{SUBSECTION})) {
|
|
# config
|
|
$config_list = $config_or_vinfo->{SUBSECTION};
|
|
}
|
|
else {
|
|
# vinfo
|
|
$config_list = $config_or_vinfo->{CONFIG}->{SUBSECTION};
|
|
die unless($config_or_vinfo->{CONFIG}->{VINFO} == $config_or_vinfo); # check back reference
|
|
}
|
|
|
|
# for now be paranoid and check all contexts
|
|
my @ret;
|
|
foreach (@$config_list) {
|
|
die unless($_->{CONTEXT} eq $context);
|
|
next if((not $include_aborted) && $_->{ABORTED});
|
|
die unless($_->{VINFO});
|
|
die unless($_->{VINFO}->{CONFIG});
|
|
die unless($_->{VINFO} == $_->{VINFO}->{CONFIG}->{VINFO}); # check all back references
|
|
push @ret, $_->{VINFO};
|
|
}
|
|
return @ret;
|
|
|
|
# much simpler implementation, without checks
|
|
#return map { $_->{ABORTED} ? () : $_->{VINFO} } @$config_list;
|
|
}
|
|
|
|
|
|
sub get_snapshot_children($$;$$)
|
|
{
|
|
my $sroot = shift || die;
|
|
my $svol = shift // die;
|
|
my $subvol_dir = shift // "";
|
|
my $btrbk_basename = shift;
|
|
my @ret;
|
|
|
|
my $sroot_subvols = vinfo_subvol_list($sroot);
|
|
foreach (@$sroot_subvols) {
|
|
next unless($_->{node}{readonly});
|
|
next unless($_->{node}{parent_uuid} eq $svol->{node}{uuid});
|
|
if(defined($btrbk_basename) &&
|
|
( (not exists($_->{node}{BTRBK_BASENAME})) ||
|
|
($_->{SUBVOL_DIR} ne $subvol_dir) ||
|
|
($_->{node}{BTRBK_BASENAME} ne $btrbk_basename)) ) {
|
|
TRACE "get_snapshot_children: child does not match btrbk filename scheme, skipping: $_->{PRINT}";
|
|
next;
|
|
}
|
|
TRACE "get_snapshot_children: found: $_->{PRINT}";
|
|
push(@ret, $_);
|
|
}
|
|
$subvol_dir .= '/' if($subvol_dir);
|
|
DEBUG "Found " . scalar(@ret) . " snapshot children of \"$svol->{PRINT}\" in: $sroot->{PRINT}" . (defined($btrbk_basename) ? "/$subvol_dir$btrbk_basename.*" : "");
|
|
return @ret;
|
|
}
|
|
|
|
|
|
sub get_receive_targets($$;@)
|
|
{
|
|
my $droot = shift || die;
|
|
my $src_vol = shift || die;
|
|
my %opts = @_;
|
|
my $droot_subvols = $opts{droot_subvol_list} // vinfo_subvol_list($droot);
|
|
my @ret;
|
|
my $unexpected_count = 0;
|
|
|
|
if($src_vol->{node}{is_root}) {
|
|
DEBUG "Skip search for targets: source subvolume is btrfs root: $src_vol->{PRINT}";
|
|
return @ret;
|
|
}
|
|
unless($src_vol->{node}{readonly}) {
|
|
DEBUG "Skip search for targets: source subvolume is not read-only: $src_vol->{PRINT}";
|
|
return @ret;
|
|
}
|
|
|
|
# find matches by comparing uuid / received_uuid
|
|
my $uuid = $src_vol->{node}{uuid};
|
|
my $received_uuid = $src_vol->{node}{received_uuid};
|
|
$received_uuid = undef if($received_uuid eq '-');
|
|
TRACE "get_receive_targets: src_vol=\"$src_vol->{PRINT}\", droot=\"$droot->{PRINT}\"";
|
|
|
|
foreach (@$droot_subvols) {
|
|
next unless($_->{node}{readonly});
|
|
my $matched = undef;
|
|
if($_->{node}{received_uuid} eq $uuid) {
|
|
$matched = 'by-uuid';
|
|
}
|
|
elsif(defined($received_uuid) && ($_->{node}{received_uuid} eq $received_uuid)) {
|
|
$matched = 'by-received_uuid';
|
|
}
|
|
next unless($matched);
|
|
|
|
TRACE "get_receive_targets: $matched: Found receive target: $_->{SUBVOL_PATH}";
|
|
push(@{$opts{seen}}, $_) if($opts{seen});
|
|
if($opts{exact_match} && !exists($_->{node}{BTRBK_RAW})) {
|
|
if($_->{direct_leaf} && ($_->{NAME} eq $src_vol->{NAME})) {
|
|
TRACE "get_receive_targets: exact_match: $_->{SUBVOL_PATH}";
|
|
}
|
|
else {
|
|
TRACE "get_receive_targets: $matched: skip non-exact match: $_->{PRINT}";
|
|
WARN "Receive target of \"$src_vol->{PRINT}\" exists at unexpected location: $_->{PRINT}" if($opts{warn});
|
|
next;
|
|
}
|
|
}
|
|
push(@ret, $_);
|
|
}
|
|
TRACE "get_receive_targets: " . scalar(@ret) . " receive targets in \"$droot->{PRINT}/\" for: $src_vol->{PRINT}";
|
|
return @ret;
|
|
}
|
|
|
|
|
|
sub get_receive_targets_fsroot($$@)
|
|
{
|
|
my $droot = shift // die;
|
|
my $src_vol = shift // die;
|
|
my %opts = @_;
|
|
my $id = $src_vol->{node}{id};
|
|
my $uuid = $src_vol->{node}{uuid};
|
|
my $received_uuid = $src_vol->{node}{received_uuid};
|
|
$received_uuid = undef if(defined($received_uuid) && ($received_uuid eq '-'));
|
|
|
|
my @unexpected;
|
|
my @exclude;
|
|
@exclude = map { $_->{node}{id} } @{$opts{exclude}} if($opts{exclude});
|
|
|
|
TRACE "get_receive_target_fsroot: uuid=$uuid, received_uuid=" . ($received_uuid // '-') . " exclude id={ " . join(', ', @exclude) . " }";
|
|
|
|
# search in filesystem for matching received_uuid
|
|
foreach my $node (
|
|
grep({ (not $_->{is_root}) &&
|
|
(($_->{received_uuid} eq $uuid) ||
|
|
(defined($received_uuid) && ($_->{received_uuid} eq $received_uuid)))
|
|
} values(%{$droot->{node}{TREE_ROOT}{ID_HASH}}) ) )
|
|
{
|
|
next if(scalar grep($_ == $node->{id}, @exclude));
|
|
push @unexpected, $node;
|
|
if($opts{warn}) {
|
|
my $text;
|
|
my @url = get_cached_url_by_uuid($node->{uuid});
|
|
if(scalar(@url)) {
|
|
$text = vinfo($url[0])->{PRINT};
|
|
} else {
|
|
$text = '"' . _fs_path($node) . "\" (in filesystem at \"$droot->{PRINT}\")";
|
|
}
|
|
WARN "Receive target of \"$src_vol->{PRINT}\" exists at unexpected location: $text";
|
|
}
|
|
}
|
|
return @unexpected;
|
|
}
|
|
|
|
|
|
sub get_latest_common($$$;$)
|
|
{
|
|
my $sroot = shift || die;
|
|
my $svol = shift // die;
|
|
my $droot = shift || die;
|
|
my $snapshot_dir = shift; # if not set, skip search for btrbk basename (set to empty string to enable at current dir)
|
|
my $sroot_subvol_list = vinfo_subvol_list($sroot);
|
|
|
|
TRACE "get_latest_common: resolving latest common for subvolume: $svol->{PATH} (sroot=$sroot->{PRINT}, droot=$droot->{PRINT}, snapdir=\"" . ($snapshot_dir // '<undef>') . "\")";
|
|
my @candidate;
|
|
if($svol->{node}{readonly}) {
|
|
if($svol->{node}{parent_uuid} ne '-') {
|
|
# add readonly parent
|
|
@candidate = grep { $_->{node}{readonly} && ($_->{node}{uuid} eq $svol->{node}{parent_uuid}) } @$sroot_subvol_list;
|
|
die "multiple parents for $svol->{node}{parent_uuid}" if(scalar(@candidate) > 1);
|
|
TRACE "get_latest_common: subvolume has a read-only parent, add parent candidate" if(scalar(@candidate) > 0);
|
|
|
|
# add snapshots with same parent_uuid (siblings)
|
|
my @siblings = grep { $_->{node}{readonly} && ($_->{node}{parent_uuid} eq $svol->{node}{parent_uuid}) } @$sroot_subvol_list;
|
|
my @siblings_older = grep { $_->{node}{cgen} <= $svol->{node}{cgen} } @siblings;
|
|
my @siblings_newer = grep { $_->{node}{cgen} > $svol->{node}{cgen} } @siblings;
|
|
push @candidate, sort { $b->{node}{cgen} <=> $a->{node}{cgen} } @siblings_older; # older first, descending by cgen
|
|
push @candidate, sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @siblings_newer; # then newer, ascending by cgen
|
|
TRACE "get_latest_common: subvolume has siblings (same parent_uuid), add " . scalar(@siblings_older) . " older and " . scalar(@siblings_newer) . " newer (by cgen) candidates";
|
|
}
|
|
|
|
if(defined($snapshot_dir) && exists($svol->{node}{BTRBK_BASENAME})) {
|
|
# add subvolumes in same directory matching btrbk file name scheme
|
|
my @naming_match = grep { $_->{node}{readonly} && exists($_->{node}{BTRBK_BASENAME}) && ($_->{SUBVOL_DIR} eq $snapshot_dir) && ($_->{node}{BTRBK_BASENAME} eq $svol->{node}{BTRBK_BASENAME}) } @$sroot_subvol_list;
|
|
my @naming_match_older = grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) < 0 } @naming_match;
|
|
my @naming_match_newer = grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) > 0 } @naming_match;
|
|
push @candidate, sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } @naming_match_older;
|
|
push @candidate, sort { cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) } @naming_match_newer;
|
|
TRACE "get_latest_common: subvolume has btrbk naming scheme, add " . scalar(@naming_match_older) . " older and " . scalar(@naming_match_newer) . " newer (by file suffix) candidates with scheme: $sroot->{PRINT}/$snapshot_dir/$svol->{node}{BTRBK_BASENAME}.*";
|
|
}
|
|
}
|
|
else
|
|
{
|
|
@candidate = sort { $b->{node}{cgen} <=> $a->{node}{cgen} } get_snapshot_children($sroot, $svol);
|
|
TRACE "get_latest_common: subvolume is read-write, add " . scalar(@candidate) . " snapshot children, sorted by cgen: $svol->{PATH}";
|
|
|
|
if(defined($snapshot_dir)) {
|
|
# add subvolumes in same directory matching btrbk file name scheme (using $svol->{NAME} as basename)
|
|
my @naming_match = grep { $_->{node}{readonly} && exists($_->{node}{BTRBK_BASENAME}) && ($_->{SUBVOL_DIR} eq $snapshot_dir) && ($_->{node}{BTRBK_BASENAME} eq $svol->{NAME}) } @$sroot_subvol_list;
|
|
push @candidate, sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } @naming_match;
|
|
TRACE "get_latest_common: snapshot_dir is set, add " . scalar(@naming_match) . " candidates with scheme: $sroot->{PRINT}/$snapshot_dir/$svol->{NAME}.*";
|
|
}
|
|
}
|
|
|
|
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list
|
|
foreach my $child (@candidate) {
|
|
if($child->{node}{id} == $svol->{node}{id}) {
|
|
TRACE "get_latest_common: skip self: $child->{PRINT}";
|
|
next;
|
|
}
|
|
my @receive_targets = get_receive_targets($droot, $child, droot_subvol_list => $droot_subvol_list);
|
|
if(scalar @receive_targets) {
|
|
DEBUG("Latest common subvolumes for: $svol->{PRINT}: src=$child->{PRINT} target=$receive_targets[0]->{PRINT}");
|
|
return ($child, $receive_targets[0]);
|
|
}
|
|
}
|
|
DEBUG("No common subvolumes of \"$svol->{PRINT}\" found in src=\"$sroot->{PRINT}/\", target=\"$droot->{PRINT}/\"");
|
|
return (undef, undef);
|
|
}
|
|
|
|
|
|
sub get_latest_snapshot_child($$)
|
|
{
|
|
my $sroot = shift || die;
|
|
my $svol = shift // die;
|
|
my $latest = undef;
|
|
my $gen = -1;
|
|
foreach (get_snapshot_children($sroot, $svol)) {
|
|
if($_->{node}{cgen} > $gen) {
|
|
$latest = $_;
|
|
$gen = $_->{node}{cgen};
|
|
}
|
|
}
|
|
if($latest) {
|
|
DEBUG "Latest snapshot child for \"$svol->{PRINT}#$svol->{node}{gen}\" is: $latest->{PRINT}#$latest->{node}{cgen}";
|
|
} else {
|
|
DEBUG "No latest snapshots found for: $svol->{PRINT}";
|
|
}
|
|
return $latest;
|
|
}
|
|
|
|
|
|
sub check_file($$;@)
|
|
{
|
|
my $file = shift // die;
|
|
my $accept = shift || die;
|
|
my %opts = @_;
|
|
my $key = $opts{config_key}; # only for error text
|
|
my $config_file = $opts{config_file}; # only for error text
|
|
my $sanitize = $opts{sanitize};
|
|
|
|
my $match = $file_match;
|
|
$match = $glob_match if($accept->{wildcards});
|
|
|
|
if($file =~ /^($match)$/) {
|
|
$file = $1;
|
|
if($accept->{absolute}) {
|
|
unless($file =~ /^\//) {
|
|
ERROR "Only absolute files allowed for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
|
|
return undef;
|
|
}
|
|
}
|
|
elsif($accept->{relative}) {
|
|
if($file =~ /^\//) {
|
|
ERROR "Only relative files allowed for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
|
|
return undef;
|
|
}
|
|
}
|
|
elsif($accept->{name_only}) {
|
|
if($file =~ /\//) {
|
|
ERROR "Option \"$key\" is not a valid file name in \"$config_file\" line $.: $file" if($key && $config_file);
|
|
return undef;
|
|
}
|
|
}
|
|
else {
|
|
die("accept_type must contain either 'relative' or 'absolute'");
|
|
}
|
|
}
|
|
else {
|
|
ERROR "Ambiguous file for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
|
|
return undef;
|
|
}
|
|
# check directory traversal
|
|
if(($file =~ /^\.\.$/) || ($file =~ /^\.\.\//) || ($file =~ /\/\.\.\//) || ($file =~ /\/\.\.$/)) {
|
|
ERROR "Illegal directory traversal for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
|
|
return undef;
|
|
}
|
|
if($sanitize) {
|
|
$file =~ s/\/+/\//g; # sanitize multiple slash
|
|
$file =~ s/\/\.\//\//g; # sanitize "/./" -> "/"
|
|
$file =~ s/\/$// unless($file eq '/'); # remove trailing slash
|
|
}
|
|
return $file;
|
|
}
|
|
|
|
|
|
sub check_url($;$$)
|
|
{
|
|
my $url = shift // die;
|
|
my $key = shift; # only for error text
|
|
my $config_file = shift; # only for error text
|
|
my $url_prefix = "";
|
|
|
|
if($url =~ s/^(ssh:\/\/($ip_addr_match|$host_name_match))\//\//) {
|
|
$url_prefix = $1;
|
|
}
|
|
elsif($url =~ s/^($ip_addr_match|$host_name_match)://) {
|
|
# convert "my.host.com:/my/path" to ssh url
|
|
$url_prefix = "ssh://" . $1;
|
|
}
|
|
|
|
return ( $url_prefix, check_file($url, { absolute => 1 }, sanitize => 1, config_key => $key, config_file => $config_file) );
|
|
}
|
|
|
|
|
|
sub config_key($$;@)
|
|
{
|
|
my $config = shift || die;
|
|
my $key = shift || die;
|
|
my %opts = @_;
|
|
my $orig_config = $config;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
|
|
if(exists($config_override{$key})) {
|
|
TRACE "config_key: OVERRIDE key=$key to value=" . ($config_override{$key} // "<undef>");
|
|
return $config_override{$key};
|
|
}
|
|
|
|
while(not exists($config->{$key})) {
|
|
# note: while all config keys exist in root context (at least with default values),
|
|
# we also allow fake configs (CONTEXT="cmdline") which have no PARENT.
|
|
return undef unless($config->{PARENT});
|
|
$config = $config->{PARENT};
|
|
}
|
|
my $retval = $config->{$key};
|
|
$retval = $opts{prefix} . $retval if(defined($opts{prefix}) && defined($retval));
|
|
$retval .= $opts{postfix} if(defined($opts{postfix}) && defined($retval));
|
|
return $retval;
|
|
}
|
|
|
|
|
|
sub config_preserve_hash($$)
|
|
{
|
|
my $config = shift || die;
|
|
my $prefix = shift || die;
|
|
my $ret = config_key($config, $prefix . "_preserve") // {};
|
|
my $preserve_min = config_key($config, $prefix . "_preserve_min");
|
|
if(defined($preserve_min)) {
|
|
$ret->{min} = $preserve_min; # used for raw schedule output
|
|
if(($preserve_min eq 'all') || ($preserve_min eq 'latest')) {
|
|
$ret->{min_q} = $preserve_min;
|
|
}
|
|
elsif($preserve_min =~ /^([0-9]+)([hdwmy])$/) {
|
|
$ret->{min_n} = $1;
|
|
$ret->{min_q} = $2;
|
|
}
|
|
else { die; }
|
|
}
|
|
$ret->{dow} = config_key($config, "preserve_day_of_week");
|
|
return $ret;
|
|
}
|
|
|
|
|
|
sub config_compress_hash($$)
|
|
{
|
|
my $config = shift || die;
|
|
my $config_key = shift || die;
|
|
my $compress_key = config_key($config, $config_key);
|
|
return undef unless($compress_key);
|
|
return {
|
|
key => $compress_key,
|
|
level => config_key($config, $config_key . "_level"),
|
|
threads => config_key($config, $config_key . "_threads"),
|
|
};
|
|
}
|
|
|
|
|
|
sub config_dump_keys($;@)
|
|
{
|
|
my $config = shift || die;
|
|
my %opts = @_;
|
|
my @ret;
|
|
my $maxlen = 0;
|
|
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
|
|
|
|
foreach my $key (sort keys %config_options)
|
|
{
|
|
my $val;
|
|
next if($config_options{$key}->{deprecated});
|
|
if($opts{resolve}) {
|
|
$val = config_key($config, $key);
|
|
} else {
|
|
next unless exists($config->{$key});
|
|
$val = $config->{$key};
|
|
}
|
|
if($opts{skip_defaults}) {
|
|
if(defined($config_options{$key}->{default}) && defined($val)) {
|
|
next if($val eq $config_options{$key}->{default});
|
|
}
|
|
if((not defined($config_options{$key}->{default})) && (not (defined($val)))) {
|
|
next; # both undef, skip
|
|
}
|
|
}
|
|
if(defined($val)) {
|
|
if($config_options{$key}->{accept_preserve_matrix}) {
|
|
$val = format_preserve_matrix($val);
|
|
}
|
|
if(ref($val) eq "ARRAY") {
|
|
my $val2 = join(',', @$val);
|
|
$val = $val2;
|
|
}
|
|
}
|
|
$val //= exists($config->{$key}) ? "no" : "<unset>";
|
|
my $len = length($key);
|
|
$maxlen = $len if($len > $maxlen);
|
|
push @ret, { key => $key, val => $val, len => $len };
|
|
}
|
|
# print as table
|
|
return map { ($opts{prefix} // "") . $_->{key} . (' ' x (1 + $maxlen - $_->{len})) . ' ' . $_->{val} } @ret;
|
|
}
|
|
|
|
|
|
sub append_config_option($$$$;$)
|
|
{
|
|
my $config = shift;
|
|
my $key = shift;
|
|
my $value = shift;
|
|
my $context = shift;
|
|
my $config_file = shift; # only for error text
|
|
my $config_file_statement = $config_file ? " in \"$config_file\" line $." : "";
|
|
|
|
my $opt = $config_options{$key};
|
|
|
|
# accept only keys listed in %config_options
|
|
unless($opt) {
|
|
ERROR "Unknown option \"$key\"" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
|
|
if($opt->{context} && !grep(/^$context$/, @{$opt->{context}})) {
|
|
ERROR "Option \"$key\" is only allowed in " . join(" or ", map("\"$_\"", @{$opt->{context}})) . " context" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
|
|
if($opt->{deny_glob_context} && $config->{GLOB_CONTEXT}) {
|
|
ERROR "Option \"$key\" is not allowed on section with wildcards" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
|
|
if(grep(/^\Q$value\E$/, @{$opt->{accept}})) {
|
|
TRACE "option \"$key=$value\" found in accept list";
|
|
}
|
|
elsif($opt->{accept_numeric} && ($value =~ /^[0-9]+$/)) {
|
|
TRACE "option \"$key=$value\" is numeric, accepted";
|
|
}
|
|
elsif($opt->{accept_file})
|
|
{
|
|
# be very strict about file options, for security sake
|
|
$value = check_file($value, $opt->{accept_file}, sanitize => 1, config_key => $key, config_file => $config_file);
|
|
return undef unless(defined($value));
|
|
|
|
TRACE "option \"$key=$value\" is a valid file, accepted";
|
|
$value = "no" if($value eq "."); # maps to undef later
|
|
}
|
|
elsif($opt->{accept_regexp}) {
|
|
my $match = $opt->{accept_regexp};
|
|
if($value =~ m/$match/) {
|
|
TRACE "option \"$key=$value\" matched regexp, accepted";
|
|
}
|
|
else {
|
|
ERROR "Value \"$value\" failed input validation for option \"$key\"" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
}
|
|
elsif($opt->{accept_preserve_matrix}) {
|
|
my %preserve;
|
|
my $s = ' ' . $value;
|
|
while($s =~ s/\s+(\*|[0-9]+)([hdwmyHDWMY])//) {
|
|
my $n = $1;
|
|
my $q = lc($2); # qw( h d w m y )
|
|
$n = 'all' if($n eq '*');
|
|
if(exists($preserve{$q})) {
|
|
ERROR "Value \"$value\" failed input validation for option \"$key\": multiple definitions of '$q'" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
$preserve{$q} = $n;
|
|
}
|
|
unless($s eq "") {
|
|
ERROR "Value \"$value\" failed input validation for option \"$key\"" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
TRACE "adding preserve matrix $context context:" . Data::Dumper->new([\%preserve], [ $key ])->Indent(0)->Pad(' ')->Quotekeys(0)->Pair('=>')->Dump() if($do_dumper);
|
|
$config->{$key} = \%preserve;
|
|
return $config;
|
|
}
|
|
else
|
|
{
|
|
ERROR "Unsupported value \"$value\" for option \"$key\"" . $config_file_statement;
|
|
return undef;
|
|
}
|
|
|
|
if($opt->{split}) {
|
|
$value = [ split($opt->{split}, $value) ];
|
|
TRACE "splitted option \"$key\": " . join(',', @$value);
|
|
}
|
|
|
|
if($opt->{require_bin} && (not check_exe($opt->{require_bin}))) {
|
|
WARN "Found option \"$key\", but required executable \"$opt->{require_bin}\" does not exist on your system. Please install \"$opt->{require_bin}\".";
|
|
WARN "Ignoring option \"$key\"" . $config_file_statement;
|
|
$value = "no";
|
|
}
|
|
|
|
if($opt->{deprecated}) {
|
|
if(my $warn_msg = ($opt->{deprecated}->{$value}->{warn} || $opt->{deprecated}->{DEFAULT}->{warn})) {
|
|
WARN "Found deprecated option \"$key $value\"" . $config_file_statement . ": " . $warn_msg;
|
|
}
|
|
if($opt->{deprecated}->{$value}->{ABORT} || $opt->{deprecated}->{DEFAULT}->{ABORT}) {
|
|
ERROR 'Deprecated (incompatible) option found, refusing to continue';
|
|
return undef;
|
|
}
|
|
if($opt->{deprecated}->{$value}->{FAILSAFE_PRESERVE} || $opt->{deprecated}->{DEFAULT}->{FAILSAFE_PRESERVE}) {
|
|
unless($config_override{FAILSAFE_PRESERVE}) { # warn only once
|
|
WARN "Entering failsafe mode:";
|
|
WARN " - preserving ALL snapshots for ALL subvolumes";
|
|
WARN " - ignoring ALL targets (skipping backup creation)";
|
|
WARN " - please read \"doc/upgrade_to_v0.23.0.md\"";
|
|
$config_override{FAILSAFE_PRESERVE} = "Failsafe mode active (deprecated configuration)";
|
|
}
|
|
$config_override{snapshot_preserve_min} = 'all';
|
|
return $config;
|
|
}
|
|
my $replace_key = $opt->{deprecated}->{$value}->{replace_key};
|
|
my $replace_value = $opt->{deprecated}->{$value}->{replace_value};
|
|
if(defined($replace_key)) {
|
|
$key = $replace_key;
|
|
$value = $replace_value;
|
|
WARN "Using \"$key $value\"";
|
|
}
|
|
}
|
|
|
|
TRACE "adding option \"$key=$value\" to $context context";
|
|
$value = undef if($value eq "no"); # we don't want to check for "no" all the time
|
|
$config->{$key} = $value;
|
|
return $config;
|
|
}
|
|
|
|
|
|
sub parse_config_line($$$$$)
|
|
{
|
|
my ($file, $root, $cur, $key, $value) = @_;
|
|
|
|
if($key eq "volume")
|
|
{
|
|
$cur = $root;
|
|
TRACE "config: context forced to: $cur->{CONTEXT}";
|
|
|
|
# be very strict about file options, for security sake
|
|
my ($url_prefix, $path) = check_url($value, $key, $file);
|
|
return undef unless(defined($path));
|
|
TRACE "config: adding volume \"$url_prefix$path\" to root context";
|
|
die unless($cur->{CONTEXT} eq "root");
|
|
my $volume = { CONTEXT => "volume",
|
|
PARENT => $cur,
|
|
SUBSECTION => [],
|
|
url => $url_prefix . $path,
|
|
};
|
|
push(@{$cur->{SUBSECTION}}, $volume);
|
|
$cur = $volume;
|
|
}
|
|
elsif($key eq "subvolume")
|
|
{
|
|
while($cur->{CONTEXT} ne "volume") {
|
|
if(($cur->{CONTEXT} eq "root") || (not $cur->{PARENT})) {
|
|
ERROR "Subvolume keyword outside volume context, in \"$file\" line $.";
|
|
return undef;
|
|
}
|
|
$cur = $cur->{PARENT} || die;
|
|
TRACE "config: context changed to: $cur->{CONTEXT}";
|
|
}
|
|
# be very strict about file options, for security sake
|
|
my $rel_path = check_file($value, { relative => 1, wildcards => 1 }, sanitize => 1, config_key => $key, config_file => $file);
|
|
return undef unless(defined($rel_path));
|
|
|
|
TRACE "config: adding subvolume \"$rel_path\" to volume context: $cur->{url}";
|
|
my $snapshot_name = $rel_path;
|
|
$snapshot_name =~ s/^.*\///; # snapshot_name defaults to subvolume name
|
|
die unless($cur->{CONTEXT} eq "volume");
|
|
my $subvolume = { CONTEXT => "subvolume",
|
|
PARENT => $cur,
|
|
# SUBSECTION => [], # handled by target propagation
|
|
rel_path => $rel_path,
|
|
url => $cur->{url} . '/' . $rel_path,
|
|
snapshot_name => $snapshot_name,
|
|
};
|
|
$subvolume->{GLOB_CONTEXT} = 1 if($value =~ /\*/);
|
|
push(@{$cur->{SUBSECTION}}, $subvolume);
|
|
$cur = $subvolume;
|
|
}
|
|
elsif($key eq "target")
|
|
{
|
|
if($cur->{CONTEXT} eq "target") {
|
|
$cur = $cur->{PARENT} || die;
|
|
TRACE "config: context changed to: $cur->{CONTEXT}";
|
|
}
|
|
if($value =~ /^(\S+)\s+(\S+)$/)
|
|
{
|
|
my ($target_type, $url) = ($1, $2);
|
|
unless(grep(/^\Q$target_type\E$/, @config_target_types)) {
|
|
ERROR "Unknown target type \"$target_type\" in \"$file\" line $.";
|
|
return undef;
|
|
}
|
|
# be very strict about file options, for security sake
|
|
my ($url_prefix, $path) = check_url($url, $key, $file);
|
|
return undef unless(defined($path));
|
|
|
|
TRACE "config: adding target \"$url_prefix$path\" (type=$target_type) to $cur->{CONTEXT} context" . ($cur->{url} ? ": $cur->{url}" : "");
|
|
my $target = { CONTEXT => "target",
|
|
PARENT => $cur,
|
|
target_type => $target_type,
|
|
url => $url_prefix . $path,
|
|
};
|
|
# NOTE: target sections are propagated to the apropriate SUBSECTION in _config_propagate_target()
|
|
$cur->{TARGET} //= [];
|
|
push(@{$cur->{TARGET}}, $target);
|
|
$cur = $target;
|
|
}
|
|
else
|
|
{
|
|
ERROR "Ambiguous target configuration, in \"$file\" line $.";
|
|
return undef;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
return append_config_option($cur, $key, $value, $cur->{CONTEXT}, $file);
|
|
}
|
|
|
|
return $cur;
|
|
}
|
|
|
|
|
|
sub _config_propagate_target
|
|
{
|
|
my $cur = shift;
|
|
foreach my $subsection (@{$cur->{SUBSECTION}}) {
|
|
my @propagate_target;
|
|
foreach my $target (@{$cur->{TARGET}}) {
|
|
TRACE "propagating target \"$target->{url}\" from $cur->{CONTEXT} context to: $subsection->{CONTEXT} $subsection->{url}";
|
|
die if($target->{SUBSECTION});
|
|
|
|
# don't propagate if a target of same target_type and url already exists in subsection
|
|
if($subsection->{TARGET} &&
|
|
grep({ ($_->{url} eq $target->{url}) && ($_->{target_type} eq $target->{target_type}) } @{$subsection->{TARGET}}))
|
|
{
|
|
DEBUG "Skip propagation of \"target $target->{target_type} $target->{url}\" from $cur->{CONTEXT} context to \"$subsection->{CONTEXT} $subsection->{url}\": same target already exists";
|
|
next;
|
|
}
|
|
|
|
my %copy = ( %$target, PARENT => $subsection );
|
|
push @propagate_target, \%copy;
|
|
}
|
|
$subsection->{TARGET} //= [];
|
|
unshift @{$subsection->{TARGET}}, @propagate_target; # maintain config order: propagated targets go in front of already defined targets
|
|
if($subsection->{CONTEXT} eq "subvolume") {
|
|
# finally create missing SUBSECTION in subvolume context
|
|
die if($subsection->{SUBSECTION});
|
|
$subsection->{SUBSECTION} = $subsection->{TARGET};
|
|
}
|
|
else {
|
|
# recurse into SUBSECTION
|
|
_config_propagate_target($subsection);
|
|
}
|
|
}
|
|
delete $cur->{TARGET};
|
|
return $cur;
|
|
}
|
|
|
|
|
|
sub init_config(@)
|
|
{
|
|
my %config_root = ( CONTEXT => "root", SUBSECTION => [], @_ );
|
|
|
|
# set defaults
|
|
foreach (keys %config_options) {
|
|
next if $config_options{$_}->{deprecated}; # don't pollute hash with deprecated options
|
|
$config_root{$_} = $config_options{$_}->{default};
|
|
}
|
|
return \%config_root;
|
|
}
|
|
|
|
|
|
sub parse_config(@)
|
|
{
|
|
my @config_files = @_;
|
|
my $file = undef;
|
|
foreach(@config_files) {
|
|
TRACE "config: checking for file: $_";
|
|
if(-r "$_") {
|
|
$file = $_;
|
|
last;
|
|
}
|
|
}
|
|
unless($file) {
|
|
ERROR "Configuration file not found: " . join(', ', @config_files);
|
|
return undef;
|
|
}
|
|
|
|
my $root = init_config(SRC_FILE => $file);
|
|
my $cur = $root;
|
|
|
|
INFO "Using configuration: $file";
|
|
open(FILE, '<', $file) or die $!;
|
|
while (<FILE>) {
|
|
chomp;
|
|
next if /^\s*#/; # ignore comments
|
|
next if /^\s*$/; # ignore empty lines
|
|
TRACE "config: parsing line $. with context=$cur->{CONTEXT}: \"$_\"";
|
|
if(/^(\s*)([a-zA-Z_]+)\s+(.*)$/)
|
|
{
|
|
# NOTE: we do not perform checks on indentation!
|
|
my ($indent, $key, $value) = (length($1), lc($2), $3);
|
|
$value =~ s/\s*$//;
|
|
$cur = parse_config_line($file, $root, $cur, $key, $value);
|
|
unless(defined($cur)) {
|
|
# error, bail out
|
|
$root = undef;
|
|
last;
|
|
}
|
|
TRACE "line processed: new context=$cur->{CONTEXT}";
|
|
}
|
|
else
|
|
{
|
|
ERROR "Parse error in \"$file\" line $.";
|
|
$root = undef;
|
|
last;
|
|
}
|
|
}
|
|
close FILE || ERROR "Failed to close configuration file: $!";
|
|
|
|
_config_propagate_target($root);
|
|
|
|
return $root;
|
|
}
|
|
|
|
|
|
# sets $target->{CONFIG}->{ABORTED} on failure
|
|
# sets $target->{SUBVOL_RECEIVED}
|
|
sub macro_send_receive(@)
|
|
{
|
|
my %info = @_;
|
|
my $source = $info{source} || die;
|
|
my $target = $info{target} || die;
|
|
my $parent = $info{parent};
|
|
my $config_target = $target->{CONFIG};
|
|
die unless($config_target->{CONTEXT} eq "target");
|
|
my $target_type = $config_target->{target_type} || die;
|
|
my $incremental = config_key($config_target, "incremental");
|
|
|
|
# check for existing target subvolume
|
|
if(my $err_vol = vinfo_subvol($target, $source->{NAME})) {
|
|
ABORTED($config_target, "Target subvolume \"$err_vol->{PRINT}\" already exists");
|
|
$config_target->{UNRECOVERABLE} = "Please delete stray subvolume (\"btrbk clean\"): $err_vol->{PRINT}";
|
|
ERROR $config_target->{ABORTED} . ", aborting send/receive of: $source->{PRINT}";
|
|
ERROR $config_target->{UNRECOVERABLE};
|
|
$info{ERROR} = 1;
|
|
return undef;
|
|
}
|
|
|
|
if($incremental)
|
|
{
|
|
# create backup from latest common
|
|
if($parent) {
|
|
INFO "Creating incremental backup...";
|
|
}
|
|
elsif($incremental ne "strict") {
|
|
INFO "No common parent subvolume present, creating full backup...";
|
|
}
|
|
else {
|
|
WARN "Backup to $target->{PRINT} failed: no common parent subvolume found for \"$source->{PRINT}\", and option \"incremental\" is set to \"strict\"";
|
|
$info{ERROR} = 1;
|
|
ABORTED($config_target, "No common parent subvolume found, and option \"incremental\" is set to \"strict\"");
|
|
return undef;
|
|
}
|
|
}
|
|
else {
|
|
INFO "Creating full backup...";
|
|
$parent = undef;
|
|
delete $info{parent};
|
|
}
|
|
|
|
my $ret;
|
|
my $vol_received;
|
|
if($target_type eq "send-receive")
|
|
{
|
|
$ret = btrfs_send_receive($source, $target, $parent, \$vol_received, rate_limit => config_key($config_target, "rate_limit"));
|
|
ABORTED($config_target, "Failed to send/receive subvolume") unless($ret);
|
|
}
|
|
elsif($target_type eq "raw")
|
|
{
|
|
unless($dryrun) {
|
|
# make sure we know the source uuid
|
|
if($source->{node}{uuid} =~ /^$fake_uuid_prefix/) {
|
|
DEBUG "Fetching uuid of new subvolume: $source->{PRINT}";
|
|
my $detail = btrfs_subvolume_show($source);
|
|
die unless($detail->{uuid});
|
|
$source->{node}{uuid} = $detail->{uuid};
|
|
$uuid_cache{$detail->{uuid}} = $source->{node};
|
|
}
|
|
}
|
|
|
|
my $encrypt = undef;
|
|
my $encrypt_type = config_key($config_target, "raw_target_encrypt");
|
|
if($encrypt_type) {
|
|
die unless($encrypt_type eq "gpg");
|
|
$encrypt = { type => $encrypt_type,
|
|
keyring => config_key($config_target, "gpg_keyring"),
|
|
recipient => config_key($config_target, "gpg_recipient"),
|
|
}
|
|
}
|
|
$ret = btrfs_send_to_file($source, $target, $parent, \$vol_received,
|
|
compress => config_compress_hash($config_target, "raw_target_compress"),
|
|
encrypt => $encrypt,
|
|
rate_limit => config_key($config_target, "rate_limit"),
|
|
);
|
|
ABORTED($config_target, "Failed to send subvolume to raw file") unless($ret);
|
|
}
|
|
else
|
|
{
|
|
die "Illegal target type \"$target_type\"";
|
|
}
|
|
|
|
# inject fake vinfo
|
|
|
|
# NOTE: it's not possible to add (and compare) correct target $detail
|
|
# from btrfs_send_receive(), as source detail also has fake uuid.
|
|
if($ret) {
|
|
vinfo_inject_child($target, $vol_received, {
|
|
# NOTE: this is not necessarily the correct parent_uuid (on
|
|
# receive, btrfs-progs picks the uuid of the first (lowest id)
|
|
# matching possible parent), whereas the target_parent is the
|
|
# first from get_receive_targets().
|
|
#
|
|
# NOTE: the parent_uuid of an injected receive target is not used
|
|
# anywhere in btrbk at the time of writing
|
|
parent_uuid => $parent ? $info{latest_common_target}->{node}{uuid} : '-',
|
|
received_uuid => $source->{node}{received_uuid} eq '-' ? $source->{node}{uuid} : $source->{node}{received_uuid},
|
|
readonly => 1,
|
|
TARGET_TYPE => $target_type,
|
|
FORCE_PRESERVE => 'preserve forced: created just now',
|
|
});
|
|
}
|
|
|
|
# add info to $config->{SUBVOL_RECEIVED}
|
|
$info{received_type} = $target_type || die;
|
|
$info{received_subvolume} = $vol_received || die;
|
|
$target->{SUBVOL_RECEIVED} //= [];
|
|
push(@{$target->{SUBVOL_RECEIVED}}, \%info);
|
|
unless($ret) {
|
|
$info{ERROR} = 1;
|
|
return undef;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
|
|
# sets $result_vinfo->{CONFIG}->{ABORTED} on failure
|
|
# sets $result_vinfo->{SUBVOL_DELETED}
|
|
sub macro_delete($$$$$;@)
|
|
{
|
|
my $root_subvol = shift || die;
|
|
my $subvol_dir = shift // die;
|
|
my $subvol_basename = shift // die;
|
|
my $result_vinfo = shift || die;
|
|
my $schedule_options = shift || die;
|
|
my %delete_options = @_;
|
|
$subvol_dir =~ s/\/+$//;
|
|
|
|
my @schedule;
|
|
foreach my $vol (@{vinfo_subvol_list($root_subvol)}) {
|
|
unless($vol->{node}{BTRBK_DATE} &&
|
|
($vol->{SUBVOL_DIR} eq $subvol_dir) &&
|
|
($vol->{node}{BTRBK_BASENAME} eq $subvol_basename)) {
|
|
TRACE "Target subvolume does not match btrbk filename scheme, skipping: $vol->{PRINT}";
|
|
next;
|
|
}
|
|
push(@schedule, { value => $vol,
|
|
# name => $vol->{PRINT}, # only for logging
|
|
btrbk_date => $vol->{node}{BTRBK_DATE},
|
|
preserve => $vol->{node}{FORCE_PRESERVE},
|
|
});
|
|
}
|
|
my (undef, $delete) = schedule(
|
|
%$schedule_options,
|
|
schedule => \@schedule,
|
|
preserve_date_in_future => 1,
|
|
);
|
|
my $ret = btrfs_subvolume_delete($delete, %delete_options);
|
|
if(defined($ret)) {
|
|
$subvol_dir .= '/' if($subvol_dir ne "");
|
|
INFO "Deleted $ret subvolumes in: $root_subvol->{PRINT}/$subvol_dir$subvol_basename.*";
|
|
$result_vinfo->{SUBVOL_DELETED} //= [];
|
|
push @{$result_vinfo->{SUBVOL_DELETED}}, @$delete;
|
|
return $delete;
|
|
}
|
|
else {
|
|
ABORTED($result_vinfo, "Failed to delete subvolume");
|
|
return undef;
|
|
}
|
|
}
|
|
|
|
|
|
sub macro_archive_target($$$;$)
|
|
{
|
|
my $sroot = shift || die;
|
|
my $droot = shift || die;
|
|
my $snapshot_name = shift // die;
|
|
my $schedule_options = shift // {};
|
|
my @schedule;
|
|
|
|
# NOTE: this is pretty much the same as "resume missing"
|
|
my @unexpected_location;
|
|
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list for get_receive_targets()
|
|
foreach my $svol (@{vinfo_subvol_list($sroot, sort => 'path')})
|
|
{
|
|
next unless($svol->{node}{readonly});
|
|
next unless($svol->{btrbk_direct_leaf} && ($svol->{node}{BTRBK_BASENAME} eq $snapshot_name));
|
|
|
|
my $warning_seen = [];
|
|
my @receive_targets = get_receive_targets($droot, $svol, exact_match => 1, warn => 1, seen => $warning_seen, droot_subvol_list => $droot_subvol_list );
|
|
push @unexpected_location, get_receive_targets_fsroot($droot, $svol, exclude => $warning_seen, warn => 1); # warn if unexpected on fs
|
|
|
|
next if(scalar(@receive_targets));
|
|
DEBUG "Adding archive candidate: $svol->{PRINT}";
|
|
|
|
push @schedule, { value => $svol,
|
|
btrbk_date => $svol->{node}{BTRBK_DATE},
|
|
preserve => $svol->{node}{FORCE_PRESERVE},
|
|
};
|
|
}
|
|
|
|
if(scalar(@unexpected_location)) {
|
|
ABORTED($droot, "Receive targets of archive candidates exist at unexpected location");
|
|
WARN "Skipping archiving of \"$sroot->{PRINT}/${snapshot_name}.*\": $abrt";
|
|
return undef;
|
|
}
|
|
|
|
# add all present archives as informative_only: these are needed for correct results of schedule()
|
|
foreach my $dvol (@$droot_subvol_list)
|
|
{
|
|
next unless($dvol->{btrbk_direct_leaf} && ($dvol->{node}{BTRBK_BASENAME} eq $snapshot_name));
|
|
next unless($dvol->{node}{readonly});
|
|
push @schedule, { informative_only => 1,
|
|
value => $dvol,
|
|
btrbk_date => $dvol->{node}{BTRBK_DATE},
|
|
};
|
|
}
|
|
|
|
my ($preserve, undef) = schedule(
|
|
schedule => \@schedule,
|
|
preserve => config_preserve_hash($droot, "archive"),
|
|
result_preserve_action_text => 'archive',
|
|
result_delete_action_text => '',
|
|
%$schedule_options
|
|
);
|
|
my @archive = grep defined, @$preserve; # remove entries with no value from list (archive subvolumes)
|
|
my $archive_total = scalar @archive;
|
|
my $archive_success = 0;
|
|
foreach my $svol (@archive)
|
|
{
|
|
my ($latest_common_src, $latest_common_target) = get_latest_common($sroot, $svol, $droot, "");
|
|
if(macro_send_receive(source => $svol,
|
|
target => $droot,
|
|
parent => $latest_common_src,
|
|
latest_common_target => $latest_common_target,
|
|
))
|
|
{
|
|
$archive_success++;
|
|
}
|
|
else {
|
|
ERROR("Error while cloning, aborting");
|
|
last;
|
|
}
|
|
}
|
|
|
|
if($archive_total) {
|
|
INFO "Archived $archive_success/$archive_total subvolumes";
|
|
} else {
|
|
INFO "No missing archives found";
|
|
}
|
|
|
|
return $archive_success;
|
|
}
|
|
|
|
|
|
sub cmp_date($$)
|
|
{
|
|
return (($_[0]->[0] <=> $_[1]->[0]) || # unix time
|
|
($_[0]->[1] <=> $_[1]->[1])); # NN
|
|
}
|
|
|
|
|
|
sub schedule(@)
|
|
{
|
|
my %args = @_;
|
|
my $schedule = $args{schedule} || die;
|
|
my $preserve = $args{preserve} || die;
|
|
my $preserve_date_in_future = $args{preserve_date_in_future};
|
|
my $results_list = $args{results};
|
|
my $result_hints = $args{result_hints} // {};
|
|
my $result_preserve_action_text = $args{result_preserve_action_text};
|
|
my $result_delete_action_text = $args{result_delete_action_text} // 'delete';
|
|
|
|
my $preserve_day_of_week = $preserve->{dow} || die;
|
|
my $preserve_min_n = $preserve->{min_n};
|
|
my $preserve_min_q = $preserve->{min_q};
|
|
my $preserve_hourly = $preserve->{h};
|
|
my $preserve_daily = $preserve->{d};
|
|
my $preserve_weekly = $preserve->{w};
|
|
my $preserve_monthly = $preserve->{m};
|
|
my $preserve_yearly = $preserve->{y};
|
|
|
|
DEBUG "Schedule: " . format_preserve_matrix($preserve, format => "debug_text");
|
|
|
|
# 0 1 2 3 4 5 6 7 8
|
|
# sec, min, hour, mday, mon, year, wday, yday, isdst
|
|
|
|
# sort the schedule, ascending by date
|
|
# regular entries come in front of informative_only
|
|
my @sorted_schedule = sort { cmp_date($a->{btrbk_date}, $b->{btrbk_date} ) ||
|
|
(($a->{informative_only} ? ($b->{informative_only} ? 0 : 1) : ($b->{informative_only} ? -1 : 0)))
|
|
} @$schedule;
|
|
|
|
DEBUG "Scheduler reference time: " . timestamp(\@tm_now, 'debug-iso');
|
|
|
|
# first, do our calendar calculations
|
|
# - weeks start on $preserve_day_of_week
|
|
# - leap hours are NOT taken into account for $delta_hours
|
|
my $now_h = timegm_nocheck( 0, 0, $tm_now[2], $tm_now[3], $tm_now[4], $tm_now[5] ); # use timelocal() here (and below) if you want to honor leap hours
|
|
my $now_d = timegm_nocheck( 0, 0, 0, $tm_now[3], $tm_now[4], $tm_now[5] );
|
|
|
|
foreach my $href (@sorted_schedule)
|
|
{
|
|
my $time = $href->{btrbk_date}->[0];
|
|
my @tm = localtime($time);
|
|
my $delta_days_from_eow = $tm[6] - $day_of_week_map{$preserve_day_of_week};
|
|
$delta_days_from_eow += 7 if($delta_days_from_eow < 0);
|
|
|
|
# check timegm: ignores leap hours
|
|
my $delta_days = int(($now_d - timegm_nocheck( 0, 0, 0, $tm[3], $tm[4], $tm[5] ) ) / (60 * 60 * 24));
|
|
my $delta_hours = int(($now_h - timegm_nocheck( 0, 0, $tm[2], $tm[3], $tm[4], $tm[5] ) ) / (60 * 60));
|
|
my $delta_weeks = int(($delta_days + $delta_days_from_eow) / 7); # weeks from beginning of week
|
|
my $delta_years = ($tm_now[5] - $tm[5]);
|
|
my $delta_months = $delta_years * 12 + ($tm_now[4] - $tm[4]);
|
|
|
|
$href->{delta_hours} = $delta_hours;
|
|
$href->{delta_days} = $delta_days;
|
|
$href->{delta_weeks} = $delta_weeks;
|
|
$href->{delta_months} = $delta_months;
|
|
$href->{delta_years} = $delta_years;
|
|
|
|
# only for text output
|
|
my $year = $tm[5] + 1900;
|
|
my $year_month = "${year}-" . ($tm[4] < 9 ? '0' : "") . ($tm[4] + 1);
|
|
$href->{year_month} = $year_month;
|
|
$href->{year} = $year;
|
|
$href->{err_days} = ($delta_days_from_eow ? "+$delta_days_from_eow days after " : "on ") . "$preserve_day_of_week";
|
|
|
|
if($preserve_date_in_future && ($href->{delta_hours} < 0)) {
|
|
$href->{preserve} = "preserve forced: " . -($href->{delta_hours}) . " hours in the future";
|
|
}
|
|
}
|
|
|
|
my %first_in_delta_hours;
|
|
my %first_in_delta_days;
|
|
my %first_in_delta_weeks;
|
|
my %first_weekly_in_delta_months;
|
|
my %first_monthly_in_delta_years;
|
|
|
|
# filter "preserve all within N days/weeks/..."
|
|
foreach my $href (@sorted_schedule) {
|
|
if($preserve_min_q) {
|
|
if($preserve_min_q eq 'all') {
|
|
$href->{preserve} = "preserve min: all";
|
|
} elsif($preserve_min_q eq 'h') {
|
|
$href->{preserve} = "preserve min: $href->{delta_hours} hours ago" if($href->{delta_hours} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'd') {
|
|
$href->{preserve} = "preserve min: $href->{delta_days} days ago" if($href->{delta_days} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'w') {
|
|
$href->{preserve} = "preserve min: $href->{delta_weeks} weeks ago" if($href->{delta_weeks} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'm') {
|
|
$href->{preserve} = "preserve min: $href->{delta_months} months ago" if($href->{delta_months} <= $preserve_min_n);
|
|
} elsif($preserve_min_q eq 'y') {
|
|
$href->{preserve} = "preserve min: $href->{delta_years} years ago" if($href->{delta_years} <= $preserve_min_n);
|
|
}
|
|
}
|
|
$first_in_delta_hours{$href->{delta_hours}} //= $href;
|
|
}
|
|
if($preserve_min_q && ($preserve_min_q eq 'latest') && (scalar @sorted_schedule)) {
|
|
my $href = $sorted_schedule[-1];
|
|
$href->{preserve} = 'preserve min: latest';
|
|
}
|
|
|
|
# filter hourly, daily, weekly, monthly, yearly
|
|
foreach (sort {$b <=> $a} keys %first_in_delta_hours) {
|
|
my $href = $first_in_delta_hours{$_} || die;
|
|
if($preserve_hourly && (($preserve_hourly eq 'all') || ($href->{delta_hours} <= $preserve_hourly))) {
|
|
$href->{preserve} = "preserve hourly: first of hour, $href->{delta_hours} hours ago";
|
|
}
|
|
$first_in_delta_days{$href->{delta_days}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_in_delta_days) {
|
|
my $href = $first_in_delta_days{$_} || die;
|
|
if($preserve_daily && (($preserve_daily eq 'all') || ($href->{delta_days} <= $preserve_daily))) {
|
|
$href->{preserve} = "preserve daily: first of day, $href->{delta_days} days ago";
|
|
}
|
|
$first_in_delta_weeks{$href->{delta_weeks}} //= $href;
|
|
}
|
|
|
|
foreach (sort {$b <=> $a} keys %first_in_delta_weeks) {
|
|
my $href = $first_in_delta_weeks{$_} || die;
|
|
if($preserve_weekly && (($preserve_weekly eq 'all') || ($href->{delta_weeks} <= $preserve_weekly))) {
|
|
$href->{preserve} = "preserve weekly: $href->{delta_weeks} weeks ago, $href->{err_days}";
|
|
}
|
|
$first_weekly_in_delta_months{$href->{delta_months}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_weekly_in_delta_months) {
|
|
my $href = $first_weekly_in_delta_months{$_} || die;
|
|
if($preserve_monthly && (($preserve_monthly eq 'all') || ($href->{delta_months} <= $preserve_monthly))) {
|
|
$href->{preserve} = "preserve monthly: first weekly of month $href->{year_month} ($href->{delta_months} months ago, $href->{err_days})";
|
|
}
|
|
$first_monthly_in_delta_years{$href->{delta_years}} //= $href;
|
|
}
|
|
foreach (sort {$b <=> $a} keys %first_monthly_in_delta_years) {
|
|
my $href = $first_monthly_in_delta_years{$_} || die;
|
|
if($preserve_yearly && (($preserve_yearly eq 'all') || ($href->{delta_years} <= $preserve_yearly))) {
|
|
$href->{preserve} = "preserve yearly: first weekly of year $href->{year} ($href->{delta_years} years ago, $href->{err_days})";
|
|
}
|
|
}
|
|
|
|
# assemble results
|
|
my @delete;
|
|
my @preserve;
|
|
my %result_base = ( %$preserve,
|
|
scheme => format_preserve_matrix($preserve),
|
|
%$result_hints,
|
|
);
|
|
my $count_defined = 0;
|
|
foreach my $href (@sorted_schedule)
|
|
{
|
|
$count_defined++ unless($href->{informative_only});
|
|
if($href->{preserve}) {
|
|
push(@preserve, $href->{value}) unless($href->{informative_only});
|
|
push @$results_list, { %result_base,
|
|
action => $href->{informative_only} ? undef : $result_preserve_action_text,
|
|
reason => $href->{preserve},
|
|
value => $href->{value},
|
|
} if($results_list);
|
|
TRACE "schedule: $href->{value}->{PRINT}: " . ($href->{informative_only} ? '(informative_only)' : '') . " $href->{preserve}" if($href->{value} && $href->{value}->{PRINT});
|
|
}
|
|
else {
|
|
push(@delete, $href->{value}) unless($href->{informative_only});
|
|
push @$results_list, { %result_base,
|
|
action => $href->{informative_only} ? undef : $result_delete_action_text,
|
|
value => $href->{value},
|
|
} if($results_list);
|
|
TRACE "schedule: $href->{value}->{PRINT}: delete ($result_delete_action_text)" if($href->{value} && $href->{value}->{PRINT});
|
|
}
|
|
}
|
|
DEBUG "Preserving " . @preserve . "/" . $count_defined . " items";
|
|
return (\@preserve, \@delete);
|
|
}
|
|
|
|
|
|
sub format_preserve_matrix($@)
|
|
{
|
|
my $preserve = shift || die;
|
|
my %opts = @_;
|
|
my $format = $opts{format} // "short";
|
|
|
|
if($format eq "debug_text") {
|
|
my @out;
|
|
my %trans = ( h => 'hours', d => 'days', w => 'weeks', m => 'months', y => 'years' );
|
|
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
|
|
push @out, "all forever";
|
|
}
|
|
else {
|
|
push @out, "latest" if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
|
|
push @out, "all within $preserve->{min_n} $trans{$preserve->{min_q}}" if($preserve->{min_n} && $preserve->{min_q});
|
|
push @out, "first of day for $preserve->{d} days" if($preserve->{d});
|
|
unless($preserve->{d} && ($preserve->{d} eq 'all')) {
|
|
push @out, "first daily in week (starting on $preserve->{dow}) for $preserve->{w} weeks" if($preserve->{w});
|
|
unless($preserve->{w} && ($preserve->{w} eq 'all')) {
|
|
push @out, "first weekly of month for $preserve->{m} months" if($preserve->{m});
|
|
unless($preserve->{m} && ($preserve->{m} eq 'all')) {
|
|
push @out, "first weekly of year for $preserve->{y} years" if($preserve->{y});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return 'preserving ' . join('; ', @out);
|
|
}
|
|
|
|
my $s = "";
|
|
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
|
|
$s = '*d+';
|
|
}
|
|
else {
|
|
# $s .= '.+' if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
|
|
$s .= $preserve->{min_n} . $preserve->{min_q} . '+' if($preserve->{min_n} && $preserve->{min_q});
|
|
foreach (qw(h d w m y)) {
|
|
my $val = $preserve->{$_} // 0;
|
|
next unless($val);
|
|
$val = '*' if($val eq 'all');
|
|
$s .= ($s ? ' ' : '') . $val . $_;
|
|
}
|
|
$s .= " ($preserve->{dow})" if($preserve->{dow} && ($preserve->{w} || $preserve->{m} || $preserve->{y}));
|
|
}
|
|
return $s;
|
|
}
|
|
|
|
|
|
sub timestamp($$;$)
|
|
{
|
|
my $time = shift // die; # unixtime, or arrayref from localtime()
|
|
my $format = shift;
|
|
my $tm_is_utc = shift;
|
|
my @tm = ref($time) ? @$time : localtime($time);
|
|
my $ts;
|
|
# NOTE: can't use POSIX::strftime(), as "%z" always prints offset of local timezone!
|
|
|
|
if($format eq "short") {
|
|
return sprintf('%04u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3]);
|
|
}
|
|
elsif($format eq "long") {
|
|
return sprintf('%04u%02u%02uT%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1]);
|
|
}
|
|
elsif($format eq "long-iso") {
|
|
$ts = sprintf('%04u%02u%02uT%02u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
|
|
}
|
|
elsif($format eq "debug-iso") {
|
|
$ts = sprintf('%04u-%02u-%02uT%02u:%02u:%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
|
|
}
|
|
else { die; }
|
|
|
|
if($tm_is_utc) {
|
|
$ts .= '+0000'; # or 'Z'
|
|
} else {
|
|
my $offset = timegm(@tm) - timelocal(@tm);
|
|
if($offset < 0) { $ts .= '-'; $offset = -$offset; } else { $ts .= '+'; }
|
|
my $h = int($offset / (60 * 60));
|
|
die if($h > 24); # sanity check, something went really wrong
|
|
$ts .= sprintf('%02u%02u', $h, int($offset / 60) % 60);
|
|
}
|
|
return $ts;
|
|
|
|
return undef;
|
|
}
|
|
|
|
|
|
sub print_header(@)
|
|
{
|
|
my %args = @_;
|
|
my $config = $args{config};
|
|
|
|
print "--------------------------------------------------------------------------------\n";
|
|
print "$args{title} ($VERSION_INFO)\n\n";
|
|
if($args{time}) {
|
|
print " Date: " . localtime($args{time}) . "\n";
|
|
}
|
|
if($config) {
|
|
print " Config: $config->{SRC_FILE}\n";
|
|
}
|
|
if($dryrun) {
|
|
print " Dryrun: YES\n";
|
|
}
|
|
if($config && $config->{CMDLINE_FILTER_LIST}) {
|
|
my @list = sort @{$config->{CMDLINE_FILTER_LIST}};
|
|
my @sorted = ( grep(/^group/, @list),
|
|
grep(/^volume/, @list),
|
|
grep(/^subvolume/, @list),
|
|
grep(/^target/, @list) );
|
|
die unless(scalar(@list) == scalar(@sorted));
|
|
print " Filter: ";
|
|
print join("\n ", @sorted);
|
|
print "\n";
|
|
}
|
|
if($args{info}) {
|
|
print "\n" . join("\n", grep(defined, @{$args{info}})) . "\n";
|
|
}
|
|
if($args{legend}) {
|
|
print "\nLegend:\n ";
|
|
print join("\n ", @{$args{legend}});
|
|
print "\n";
|
|
}
|
|
print "--------------------------------------------------------------------------------\n";
|
|
}
|
|
|
|
|
|
sub print_table($;$)
|
|
{
|
|
my $data = shift;
|
|
my $spacing = shift // " ";
|
|
my $maxlen = 0;
|
|
foreach (@$data) {
|
|
$maxlen = length($_->[0]) if($maxlen < length($_->[0]));
|
|
}
|
|
foreach (@$data) {
|
|
print $_->[0] . ((' ' x ($maxlen - length($_->[0]))) . $spacing) . $_->[1] . "\n";
|
|
}
|
|
}
|
|
|
|
|
|
sub print_formatted(@)
|
|
{
|
|
my $format_key = shift || die;
|
|
my $data = shift || die;
|
|
my $default_format = "table";
|
|
my %args = @_;
|
|
my $title = $args{title};
|
|
my $format = $args{output_format} || $output_format || $default_format;
|
|
my $keys = $table_formats{$format_key}->{$format};
|
|
my $ralign = $table_formats{$format_key}->{RALIGN} // {};
|
|
my $fh = $args{outfile} // *STDOUT;
|
|
my $table_spacing = 2;
|
|
|
|
unless($keys) {
|
|
WARN "Unsupported output format \"$format\", defaulting to \"$default_format\" format.";
|
|
$keys = $table_formats{$format_key}->{$default_format} || die;
|
|
$format = $default_format;
|
|
}
|
|
|
|
print $fh "$title\n" if($title);
|
|
if($format eq "raw")
|
|
{
|
|
# output: key0="value0" key1="value1" ...
|
|
foreach my $row (@$data) {
|
|
print $fh "format=\"$format_key\" ";
|
|
print $fh join(' ', map { "$_=\"" . ($row->{$_} // "") . "\""; } @$keys) . "\n";
|
|
}
|
|
}
|
|
elsif(($format eq "tlog") || ($format eq "syslog"))
|
|
{
|
|
# output: value0 value1, ...
|
|
unless($args{no_header}) {
|
|
print $fh join(' ', @$keys) . "\n";
|
|
}
|
|
foreach my $row (@$data) {
|
|
my $line = join(' ', map { ((defined($row->{$_}) && ($_ eq "message")) ? '# ' : '') . ($row->{$_} // "-") } @$keys);
|
|
if($format eq "syslog") { # dirty hack, ignore outfile on syslog format
|
|
syslog($line);
|
|
} else {
|
|
print $fh ($line . "\n");
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
# sanitize and calculate maxlen for each column
|
|
# NOTE: this is destructive on data!
|
|
my %maxlen;
|
|
my @sane_data;
|
|
foreach my $key (@$keys) {
|
|
$maxlen{$key} = length($key); # initialize with size of key
|
|
}
|
|
foreach my $row (@$data) {
|
|
foreach my $key (@$keys) {
|
|
my $val = $row->{$key};
|
|
if(ref $val eq "ARRAY") {
|
|
$val = join(',', @{$val});
|
|
}
|
|
$val //= "-";
|
|
$val = "-" if($val eq "");
|
|
$row->{$key} = $val; # write back the sanitized value
|
|
$maxlen{$key} = length($val) if($maxlen{$key} < length($val));
|
|
}
|
|
}
|
|
|
|
# print keys (headings)
|
|
unless($args{no_header}) {
|
|
my $fill = 0;
|
|
foreach (@$keys) {
|
|
print $fh ' ' x $fill;
|
|
$fill = $maxlen{$_} - length($_);
|
|
if($ralign->{$_}) {
|
|
print $fh ' ' x $fill;
|
|
$fill = 0;
|
|
}
|
|
print $fh $_;
|
|
$fill += $table_spacing;
|
|
}
|
|
print $fh "\n";
|
|
print $fh join(' ' x $table_spacing, map { '-' x ($maxlen{$_}) } @$keys) . "\n";
|
|
}
|
|
|
|
# print values
|
|
foreach my $row (@$data) {
|
|
my $fill = 0;
|
|
foreach (@$keys) {
|
|
my $val = $row->{$_};
|
|
print $fh ' ' x $fill;
|
|
$fill = $maxlen{$_} - length($val);
|
|
if($ralign->{$_}) {
|
|
print $fh ' ' x $fill;
|
|
$fill = 0;
|
|
}
|
|
print $fh $val;
|
|
$fill += $table_spacing;
|
|
}
|
|
print $fh "\n";
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
sub _origin_tree
|
|
{
|
|
my $prefix = shift;
|
|
my $node = shift // die;
|
|
my $lines = shift;
|
|
my $nodelist = shift;
|
|
my $depth = shift // 0;
|
|
my $seen = shift // [];
|
|
my $norecurse = shift;
|
|
my $uuid = $node->{uuid} || die;
|
|
|
|
# cache a bit, this might be large
|
|
$nodelist //= [ (sort { $a->{REL_PATH} cmp $b->{REL_PATH} } values %uuid_cache) ];
|
|
|
|
my @url = get_cached_url_by_uuid($uuid);
|
|
my $out_path;
|
|
if(scalar @url) {
|
|
$out_path = join(" === ", sort map { vinfo($_)->{PRINT} } @url);
|
|
} else {
|
|
$out_path = _fs_path($node);
|
|
}
|
|
my $prefix_spaces = ' ' x (($depth * 4) - ($prefix ? 4 : 0));
|
|
push(@$lines, { tree => "${prefix_spaces}${prefix}$out_path",
|
|
uuid => $node->{uuid},
|
|
parent_uuid => $node->{parent_uuid},
|
|
received_uuid => $node->{received_uuid},
|
|
});
|
|
|
|
# handle deep recursion
|
|
return 0 if(grep /^$uuid$/, @$seen);
|
|
|
|
if($node->{parent_uuid} ne '-') {
|
|
my $parent_node = $uuid_cache{$node->{parent_uuid}};
|
|
if($parent_node) {
|
|
if($norecurse) {
|
|
push(@$lines,{ tree => "${prefix_spaces} ^-- ...",
|
|
uuid => $parent_node->{uuid},
|
|
parent_uuid => $parent_node->{parent_uuid},
|
|
received_uuid => $parent_node->{received_uuid},
|
|
recursion => 'stop_recursion',
|
|
});
|
|
return 0;
|
|
}
|
|
if($parent_node->{readonly}) {
|
|
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1, undef, 1); # end recursion
|
|
}
|
|
else {
|
|
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1);
|
|
}
|
|
}
|
|
else {
|
|
push(@$lines,{ tree => "${prefix_spaces} ^-- <unknown>" });
|
|
}
|
|
}
|
|
|
|
return 0 if($norecurse);
|
|
push(@$seen, $uuid);
|
|
|
|
if($node->{received_uuid} ne '-') {
|
|
my $received_uuid = $node->{received_uuid};
|
|
my @receive_parents; # there should be only one!
|
|
my @receive_twins;
|
|
|
|
foreach (@$nodelist) {
|
|
next if($_->{uuid} eq $uuid);
|
|
if($received_uuid eq $_->{uuid} && $_->{readonly}) {
|
|
_origin_tree("", $_, \@receive_parents, $nodelist, $depth, $seen);
|
|
}
|
|
elsif(($_->{received_uuid} ne '-') && ($received_uuid eq $_->{received_uuid}) && $_->{readonly}) {
|
|
_origin_tree("", $_, \@receive_twins, $nodelist, $depth, $seen, 1); # end recursion
|
|
}
|
|
}
|
|
push @$lines, @receive_twins;
|
|
push @$lines, @receive_parents;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
sub exit_status
|
|
{
|
|
my $config = shift;
|
|
foreach my $subsection (@{$config->{SUBSECTION}}) {
|
|
return 10 if($subsection->{ABORTED} && ($subsection->{ABORTED} ne "USER_SKIP"));
|
|
return 10 if(exit_status($subsection));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
MAIN:
|
|
{
|
|
# set PATH instead of using absolute "/sbin/btrfs" (for now), as
|
|
# different distros (and even different versions of btrfs-progs)
|
|
# install the "btrfs" executable to different locations.
|
|
$ENV{PATH} = '/sbin:/bin:/usr/sbin:/usr/bin';
|
|
|
|
Getopt::Long::Configure qw(gnu_getopt);
|
|
my $start_time = time;
|
|
@tm_now = localtime($start_time);
|
|
|
|
my %config_override_cmdline;
|
|
my ($config_cmdline, $quiet, $verbose, $preserve_backups, $resume_only, $print_schedule, $lockfile_cmdline);
|
|
unless(GetOptions(
|
|
'help|h' => sub { VERSION_MESSAGE(); HELP_MESSAGE(0); exit 0; },
|
|
'version' => sub { VERSION_MESSAGE(); exit 0; },
|
|
'config|c=s' => \$config_cmdline,
|
|
'dry-run|n' => \$dryrun,
|
|
'preserve|p' => \$preserve_backups,
|
|
'resume-only|r' => \$resume_only,
|
|
'quiet|q' => \$quiet,
|
|
'verbose|v' => sub { $loglevel = 2; },
|
|
'loglevel|l=s' => \$loglevel,
|
|
'progress' => \$show_progress,
|
|
'table|t' => sub { $output_format = "table" },
|
|
'format=s' => \$output_format,
|
|
'print-schedule' => \$print_schedule,
|
|
'lockfile=s' => \$lockfile_cmdline,
|
|
'override=s' => \%config_override_cmdline, # e.g. --override=incremental=no
|
|
))
|
|
{
|
|
VERSION_MESSAGE();
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
my $command = shift @ARGV;
|
|
unless($command) {
|
|
VERSION_MESSAGE();
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
|
|
# assign command line options
|
|
@config_src = ( $config_cmdline ) if($config_cmdline);
|
|
if (lc($loglevel) eq "warn") { $loglevel = 1; }
|
|
elsif(lc($loglevel) eq "info") { $loglevel = 2; }
|
|
elsif(lc($loglevel) eq "debug") { $loglevel = 3; }
|
|
elsif(lc($loglevel) eq "trace") { $loglevel = 4; }
|
|
elsif($loglevel =~ /^[0-9]+$/) { ; }
|
|
else { $loglevel = 1; }
|
|
require_data_dumper() if(($loglevel >= 4) || ($VERSION =~ /-dev$/));
|
|
|
|
# check command line options
|
|
if($show_progress && (not check_exe('pv'))) {
|
|
WARN 'Found option "--progress", but required executable "pv" does not exist on your system. Please install "pv".';
|
|
$show_progress = 0;
|
|
}
|
|
my ($action_run, $action_usage, $action_resolve, $action_diff, $action_origin, $action_config_print, $action_list, $action_clean, $action_archive);
|
|
my @filter_args;
|
|
my $args_allow_group = 1;
|
|
my $args_expected_min = 0;
|
|
my $args_expected_max = 9999;
|
|
if(($command eq "run") || ($command eq "dryrun")) {
|
|
$action_run = 1;
|
|
$dryrun = 1 if($command eq "dryrun");
|
|
$args_allow_group = 1;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "clean") {
|
|
$action_clean = 1;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "archive") {
|
|
$action_archive = 1;
|
|
$args_expected_min = $args_expected_max = 2;
|
|
$args_allow_group = 0;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "usage") {
|
|
$action_usage = 1;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "diff") {
|
|
$action_diff = 1;
|
|
$args_expected_min = $args_expected_max = 2;
|
|
$args_allow_group = 0;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "origin") {
|
|
$action_origin = 1;
|
|
$args_expected_min = $args_expected_max = 1;
|
|
$args_allow_group = 0;
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif($command eq "list") {
|
|
my $subcommand = shift @ARGV // "";
|
|
if(($subcommand eq "config") ||
|
|
($subcommand eq "volume") ||
|
|
($subcommand eq "source") ||
|
|
($subcommand eq "target"))
|
|
{
|
|
$action_list = $subcommand;
|
|
}
|
|
elsif(($subcommand eq "snapshots") ||
|
|
($subcommand eq "backups") ||
|
|
($subcommand eq "latest"))
|
|
{
|
|
$action_resolve = $subcommand;
|
|
}
|
|
else {
|
|
$action_list = "config";
|
|
unshift @ARGV, $subcommand if($subcommand ne "");
|
|
}
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif($command eq "stats") {
|
|
$action_resolve = "stats";
|
|
@filter_args = @ARGV;
|
|
}
|
|
elsif ($command eq "config") {
|
|
my $subcommand = shift @ARGV // "";
|
|
@filter_args = @ARGV;
|
|
if(($subcommand eq "print") || ($subcommand eq "print-all")) {
|
|
$action_config_print = $subcommand;
|
|
}
|
|
elsif($subcommand eq "list") {
|
|
$action_list = "config";
|
|
}
|
|
else {
|
|
ERROR "Unknown subcommand for \"config\" command: $subcommand";
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
}
|
|
else {
|
|
ERROR "Unrecognized command: $command";
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
if(($args_expected_min > scalar(@ARGV)) || ($args_expected_max < scalar(@ARGV))) {
|
|
ERROR "Incorrect number of arguments";
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
|
|
# input validation
|
|
foreach (@filter_args) {
|
|
if($args_allow_group && /^($group_match)$/) { # matches group
|
|
$_ = $1; # untaint argument
|
|
next;
|
|
}
|
|
else {
|
|
my ($url_prefix, $path) = check_url($_);
|
|
if(defined($path)) {
|
|
$_ = $url_prefix . $path;
|
|
next;
|
|
}
|
|
}
|
|
ERROR "Bad argument: not a subvolume" . ($args_allow_group ? "/group" : "") . " declaration: $_";
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
foreach my $key (keys %config_override_cmdline) {
|
|
DEBUG "config_override: \"$key=$config_override_cmdline{$key}\"";
|
|
unless(append_config_option(\%config_override, $key, $config_override_cmdline{$key}, "root")) {
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
}
|
|
if(defined($lockfile_cmdline)) {
|
|
if($lockfile_cmdline =~ /^($file_match)$/) {
|
|
$lockfile = $1; # untaint argument
|
|
} else {
|
|
ERROR "Option \"--lockfile\" is not a valid file name: \"$lockfile_cmdline\"";
|
|
HELP_MESSAGE(0);
|
|
exit 2;
|
|
}
|
|
}
|
|
|
|
|
|
INFO "$VERSION_INFO (" . localtime($start_time) . ")";
|
|
|
|
if($action_diff)
|
|
{
|
|
#
|
|
# print snapshot diff
|
|
#
|
|
my $src_url = $filter_args[0] || die;
|
|
my $target_url = $filter_args[1] || die;
|
|
my $default_config = init_config();
|
|
# NOTE: ssh://{src,target} uses default config
|
|
|
|
my $src_vol = vinfo($src_url, $default_config);
|
|
unless(vinfo_init_root($src_vol)) { ERROR "Failed to fetch subvolume detail for '$src_vol->{PRINT}'" . ($err ? ": $err" : ""); exit 1; }
|
|
if($src_vol->{node}{is_root}) { ERROR "Subvolume is btrfs root: $src_vol->{PRINT}"; exit 1; }
|
|
|
|
my $target_vol = vinfo($target_url, $default_config);
|
|
unless(vinfo_init_root($target_vol)) { ERROR "Failed to fetch subvolume detail for '$target_vol->{PRINT}'" . ($err ? ": $err" : ""); exit 1; }
|
|
if($target_vol->{node}{is_root}) { ERROR "Subvolume is btrfs root: $target_vol->{PRINT}"; exit 1; }
|
|
|
|
unless(_is_child_of($src_vol->{node}->{TREE_ROOT}, $target_vol->{node}{uuid})) {
|
|
ERROR "Subvolumes are not on the same btrfs filesystem!";
|
|
exit 1;
|
|
}
|
|
|
|
# NOTE: in some cases "cgen" differs from "gen", even for read-only snapshots (observed: gen=cgen+1)
|
|
my $lastgen = $src_vol->{node}{gen} + 1;
|
|
|
|
# dump files, sorted and unique
|
|
my $ret = btrfs_subvolume_find_new($target_vol, $lastgen);
|
|
exit 1 unless(ref($ret));
|
|
|
|
print_header(title => "Subvolume Diff",
|
|
time => $start_time,
|
|
info => [
|
|
"Showing changed files for subvolume:",
|
|
" $target_vol->{PRINT} (gen=$target_vol->{node}{gen})",
|
|
"",
|
|
"Starting at creation generation of subvolume:",
|
|
" $src_vol->{PRINT} (cgen=$src_vol->{node}{cgen})",
|
|
"",
|
|
"This will show all files modified within generation range: [$lastgen..$target_vol->{node}{gen}]",
|
|
"Newest file generation (transid marker) was: $ret->{transid_marker}",
|
|
($ret->{parse_errors} ? "Parse errors: $ret->{parse_errors}" : undef),
|
|
],
|
|
legend => [
|
|
"+.. file accessed at offset 0 (at least once)",
|
|
".c. flags COMPRESS or COMPRESS|INLINE set (at least once)",
|
|
"..i flags INLINE or COMPRESS|INLINE set (at least once)",
|
|
"<count> file was modified in <count> generations",
|
|
"<size> file was modified for a total of <size> bytes",
|
|
]
|
|
);
|
|
|
|
my $files = $ret->{files};
|
|
|
|
# calculate the character offsets
|
|
my $total_len = 0;
|
|
my $len_charlen = 0;
|
|
my $gen_charlen = 0;
|
|
foreach (values %$files) {
|
|
my $len = length($_->{len});
|
|
my $gen = length(scalar(keys(%{$_->{gen}})));
|
|
$len_charlen = $len if($len > $len_charlen);
|
|
$gen_charlen = $gen if($gen > $gen_charlen);
|
|
$total_len += $_->{len};
|
|
}
|
|
|
|
# finally print the output
|
|
foreach my $name (sort keys %$files) {
|
|
print ($files->{$name}->{new} ? '+' : '.');
|
|
print ($files->{$name}->{flags}->{compress} ? 'c' : '.');
|
|
print ($files->{$name}->{flags}->{inline} ? 'i' : '.');
|
|
|
|
# make nice table
|
|
my $gens = scalar(keys(%{$files->{$name}->{gen}}));
|
|
my $len = $files->{$name}->{len};
|
|
print " " . (' ' x ($gen_charlen - length($gens))) . $gens;
|
|
print " " . (' ' x ($len_charlen - length($len))) . $len;
|
|
|
|
print " $name\n";
|
|
}
|
|
print "\nTotal size: $total_len bytes\n";
|
|
exit 0;
|
|
}
|
|
|
|
|
|
#
|
|
# parse config file
|
|
#
|
|
my $config = parse_config(@config_src);
|
|
unless($config) {
|
|
ERROR "Failed to parse configuration file";
|
|
exit 2;
|
|
}
|
|
unless(ref($config->{SUBSECTION}) eq "ARRAY") {
|
|
ERROR "No volumes defined in configuration file";
|
|
exit 2;
|
|
}
|
|
|
|
#
|
|
# try exclusive lock if set in config or command-line option
|
|
#
|
|
$lockfile //= config_key($config, "lockfile");
|
|
if(defined($lockfile) && (not $dryrun)) {
|
|
unless(open(LOCKFILE, ">>$lockfile")) {
|
|
# NOTE: the lockfile is never deleted by design
|
|
ERROR "Failed to open lock file '$lockfile': $!";
|
|
exit 3;
|
|
}
|
|
unless(flock(LOCKFILE, 6)) { # exclusive, non-blocking (LOCK_EX | LOCK_NB)
|
|
ERROR "Failed to take lock (another btrbk instance is running): $lockfile";
|
|
exit 3;
|
|
}
|
|
}
|
|
|
|
|
|
if($action_archive)
|
|
{
|
|
#
|
|
# archive (clone) tree
|
|
#
|
|
# NOTE: This is intended to work without a config file! The only
|
|
# thing used from the configuration is the SSH and transaction log
|
|
# stuff.
|
|
#
|
|
init_transaction_log(config_key($config, "transaction_log"),
|
|
config_key($config, "transaction_syslog"));
|
|
|
|
my $src_url = $filter_args[0] || die;
|
|
my $archive_url = $filter_args[1] || die;
|
|
|
|
# FIXME: add command line options for preserve logic
|
|
$config->{SUBSECTION} = []; # clear configured subsections, we build them dynamically
|
|
|
|
my $src_root = vinfo($src_url, $config);
|
|
unless(vinfo_init_root($src_root, resolve_subdir => 1)) {
|
|
ERROR "Failed to fetch subvolume detail for '$src_root->{PRINT}'" . ($err ? ": $err" : "");
|
|
exit 1;
|
|
}
|
|
my $archive_root = vinfo($archive_url, $config);
|
|
unless(vinfo_init_root($archive_root, resolve_subdir => 1)) {
|
|
ERROR "Failed to fetch subvolume detail for '$archive_root->{PRINT}'" . ($err ? ": $err" : "");
|
|
exit 1;
|
|
}
|
|
|
|
my %name_uniq;
|
|
my @subvol_list = @{vinfo_subvol_list($src_root)};
|
|
my @sorted = sort { ($a->{subtree_depth} <=> $b->{subtree_depth}) || ($a->{SUBVOL_DIR} cmp $b->{SUBVOL_DIR}) } @subvol_list;
|
|
foreach my $vol (@sorted) {
|
|
next unless($vol->{node}{readonly});
|
|
my $snapshot_name = $vol->{node}{BTRBK_BASENAME};
|
|
unless(defined($snapshot_name)) {
|
|
WARN "Skipping subvolume (not a btrbk subvolume): $vol->{PRINT}";
|
|
next;
|
|
}
|
|
my $subvol_dir = $vol->{SUBVOL_DIR};
|
|
next if($name_uniq{"$subvol_dir/$snapshot_name"});
|
|
$name_uniq{"$subvol_dir/$snapshot_name"} = 1;
|
|
my $droot_url = $archive_url . ($subvol_dir eq "" ? "" : "/$subvol_dir");
|
|
my $sroot_url = $src_url . ($subvol_dir eq "" ? "" : "/$subvol_dir");
|
|
my $config_sroot = { CONTEXT => "archive_source",
|
|
PARENT => $config,
|
|
url => $sroot_url, # ABORTED() needs this
|
|
snapshot_name => $snapshot_name,
|
|
};
|
|
my $config_droot = { CONTEXT => "target",
|
|
PARENT => $config_sroot,
|
|
target_type => "send-receive", # macro_send_receive checks this
|
|
url => $droot_url, # ABORTED() needs this
|
|
};
|
|
$config_sroot->{SUBSECTION} = [ $config_droot ];
|
|
push(@{$config->{SUBSECTION}}, $config_sroot);
|
|
|
|
my $sroot = vinfo($sroot_url, $config_sroot);
|
|
vinfo_assign_config($sroot);
|
|
unless(vinfo_init_root($sroot, resolve_subdir => 1)) {
|
|
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
WARN "Skipping archive source \"$sroot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
|
|
my $droot = vinfo($droot_url, $config_droot);
|
|
vinfo_assign_config($droot);
|
|
unless(vinfo_init_root($droot, resolve_subdir => 1)) {
|
|
DEBUG("Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
unless(system_mkdir($droot)) {
|
|
ABORTED($droot, "Failed to create directory: $droot->{PRINT}/");
|
|
WARN "Skipping archive target \"$droot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
$droot->{SUBDIR_CREATED} = 1;
|
|
if($dryrun) {
|
|
# we need to fake this directory on dryrun
|
|
$droot->{node} = $archive_root->{node};
|
|
$droot->{NODE_SUBDIR} = $subvol_dir;
|
|
}
|
|
else {
|
|
# after directory is created, try to init again
|
|
unless(vinfo_init_root($droot, resolve_subdir => 1)) {
|
|
ABORTED($droot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
WARN "Skipping archive target \"$droot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
}
|
|
}
|
|
if(_is_child_of($droot->{node}->{TREE_ROOT}, $vol->{node}{uuid})) {
|
|
ERROR "Source and target subvolumes are on the same btrfs filesystem!";
|
|
exit 1;
|
|
}
|
|
}
|
|
|
|
my $schedule_results = [];
|
|
my $aborted;
|
|
foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
|
|
if($aborted) {
|
|
# abort all subsequent sources on any abort (we don't want to go on hammering on "disk full" errors)
|
|
ABORTED($sroot, $aborted);
|
|
next;
|
|
}
|
|
foreach my $droot (vinfo_subsection($sroot, 'target')) {
|
|
my $snapshot_name = config_key($droot, "snapshot_name") // die;
|
|
INFO "Archiving subvolumes: $sroot->{PRINT}/${snapshot_name}.*";
|
|
macro_archive_target($sroot, $droot, $snapshot_name, { results => $schedule_results });
|
|
if(ABORTED($droot)) {
|
|
# also abort $sroot
|
|
$aborted = "At least one target aborted earlier";
|
|
ABORTED($sroot, $aborted);
|
|
WARN "Skipping archiving of \"$sroot->{PRINT}/\": $abrt";
|
|
last;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
my $del_schedule_results = [];
|
|
if($preserve_backups || $resume_only) {
|
|
INFO "Preserving all archives (option \"-p\" or \"-r\" present)";
|
|
}
|
|
else
|
|
{
|
|
foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
|
|
foreach my $droot (vinfo_subsection($sroot, 'target')) {
|
|
my $snapshot_name = config_key($droot, "snapshot_name") // die;
|
|
INFO "Cleaning archive: $droot->{PRINT}/${snapshot_name}.*";
|
|
macro_delete($droot, "", $snapshot_name, $droot,
|
|
{ preserve => config_preserve_hash($droot, "archive"),
|
|
results => $del_schedule_results,
|
|
result_hints => { topic => "archive", root_path => $droot->{PATH} },
|
|
},
|
|
commit => config_key($droot, "btrfs_commit_delete"),
|
|
type => "delete_archive",
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
my $exit_status = exit_status($config);
|
|
my $time_elapsed = time - $start_time;
|
|
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
|
|
action("finished",
|
|
status => $exit_status ? "partial" : "success",
|
|
duration => $time_elapsed,
|
|
message => $exit_status ? "At least one backup task aborted" : undef,
|
|
);
|
|
close_transaction_log();
|
|
|
|
unless($quiet)
|
|
{
|
|
# print scheduling results
|
|
if($print_schedule) {
|
|
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
|
|
print_formatted("schedule", \@data, title => "ARCHIVE SCHEDULE");
|
|
print "\n";
|
|
}
|
|
|
|
if($print_schedule && not ($preserve_backups || $resume_only)) {
|
|
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$del_schedule_results;
|
|
print_formatted("schedule", \@data, title => "DELETE SCHEDULE");
|
|
print "\n";
|
|
}
|
|
|
|
# print summary
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom")
|
|
{
|
|
my @unrecoverable;
|
|
my @out;
|
|
foreach my $sroot (vinfo_subsection($config, 'archive_source', 1)) {
|
|
foreach my $droot (vinfo_subsection($sroot, 'target', 1)) {
|
|
my @subvol_out;
|
|
if($droot->{SUBDIR_CREATED}) {
|
|
push @subvol_out, "++. $droot->{PRINT}/";
|
|
}
|
|
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
|
|
my $create_mode = "***";
|
|
$create_mode = ">>>" if($_->{parent});
|
|
$create_mode = "!!!" if($_->{ERROR});
|
|
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
|
|
}
|
|
foreach(sort { $a->{PATH} cmp $b->{PATH} } @{$droot->{SUBVOL_DELETED} // []}) {
|
|
push @subvol_out, "--- $_->{PRINT}";
|
|
}
|
|
if((ABORTED($droot) && (ABORTED($droot) ne "USER_SKIP")) ||
|
|
(ABORTED($sroot) && (ABORTED($sroot) ne "USER_SKIP"))) {
|
|
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . (ABORTED($droot) || ABORTED($sroot));
|
|
}
|
|
if($droot->{CONFIG}->{UNRECOVERABLE}) {
|
|
push(@unrecoverable, $droot->{CONFIG}->{UNRECOVERABLE});
|
|
}
|
|
if(@subvol_out) {
|
|
push @out, "$sroot->{PRINT}/$sroot->{CONFIG}->{snapshot_name}.*", @subvol_out, "";
|
|
}
|
|
}
|
|
}
|
|
|
|
print_header(title => "Archive Summary",
|
|
time => $start_time,
|
|
legend => [
|
|
"++. created directory",
|
|
"--- deleted subvolume",
|
|
"*** received subvolume (non-incremental)",
|
|
">>> received subvolume (incremental)",
|
|
],
|
|
);
|
|
|
|
print join("\n", @out);
|
|
|
|
if($exit_status || scalar(@unrecoverable)) {
|
|
print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
|
|
print "Please check warning and error messages above.\n";
|
|
print join("\n", @unrecoverable) . "\n" if(@unrecoverable);
|
|
}
|
|
if($dryrun) {
|
|
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
|
|
}
|
|
}
|
|
else
|
|
{
|
|
# print action log (without transaction start messages)
|
|
my @data = grep { $_->{status} ne "starting" } @transaction_log;
|
|
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
|
|
}
|
|
}
|
|
|
|
exit $exit_status;
|
|
}
|
|
|
|
|
|
#
|
|
# expand subvolume globs (wildcards)
|
|
#
|
|
foreach my $config_vol (@{$config->{SUBSECTION}}) {
|
|
die unless($config_vol->{CONTEXT} eq "volume");
|
|
|
|
# read-in subvolume list (and expand globs) only if needed
|
|
next unless(grep defined($_->{GLOB_CONTEXT}), @{$config_vol->{SUBSECTION}});
|
|
my $sroot = vinfo($config_vol->{url}, $config_vol);
|
|
unless(vinfo_init_root($sroot)) {
|
|
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
WARN "Skipping volume \"$sroot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
|
|
my @vol_subsection_expanded;
|
|
foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
|
|
die unless($config_subvol->{CONTEXT} eq "subvolume");
|
|
if($config_subvol->{GLOB_CONTEXT}) {
|
|
my $globs = $config_subvol->{rel_path};
|
|
INFO "Expanding wildcards: $sroot->{PRINT}/$globs";
|
|
|
|
# support "*some*file*", "*/*"
|
|
my $match = join('[^\/]*', map(quotemeta($_), split(/\*+/, $globs, -1)));
|
|
TRACE "translated globs \"$globs\" to regex \"$match\"";
|
|
my $expand_count = 0;
|
|
foreach my $vol (@{vinfo_subvol_list($sroot, sort => 'path')})
|
|
{
|
|
if($vol->{node}{readonly}) {
|
|
TRACE "skipping readonly subvolume: $vol->{PRINT}";
|
|
next;
|
|
}
|
|
unless($vol->{SUBVOL_PATH} =~ /^$match$/) {
|
|
TRACE "skipping non-matching subvolume: $vol->{PRINT}";
|
|
next;
|
|
}
|
|
INFO "Found source subvolume: $vol->{PRINT}";
|
|
my %conf = ( %$config_subvol,
|
|
rel_path_glob => $globs,
|
|
rel_path => $vol->{SUBVOL_PATH},
|
|
url => $vol->{URL},
|
|
snapshot_name => $vol->{NAME}, # snapshot_name defaults to subvolume name
|
|
);
|
|
# deep copy of target subsection
|
|
my @subsection_copy = map { { %$_, PARENT => \%conf }; } @{$config_subvol->{SUBSECTION}};
|
|
$conf{SUBSECTION} = \@subsection_copy;
|
|
push @vol_subsection_expanded, \%conf;
|
|
$expand_count += 1;
|
|
}
|
|
unless($expand_count) {
|
|
WARN "No subvolumes found matching: $sroot->{PRINT}/$globs";
|
|
}
|
|
}
|
|
else {
|
|
push @vol_subsection_expanded, $config_subvol;
|
|
}
|
|
}
|
|
$config_vol->{SUBSECTION} = \@vol_subsection_expanded;
|
|
}
|
|
TRACE(Data::Dumper->Dump([$config], ["config"])) if($do_dumper);
|
|
|
|
|
|
#
|
|
# create vinfo nodes (no readin yet)
|
|
#
|
|
foreach my $config_vol (@{$config->{SUBSECTION}}) {
|
|
die unless($config_vol->{CONTEXT} eq "volume");
|
|
my $sroot = vinfo($config_vol->{url}, $config_vol);
|
|
vinfo_assign_config($sroot);
|
|
foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
|
|
die unless($config_subvol->{CONTEXT} eq "subvolume");
|
|
my $svol = vinfo_child($sroot, $config_subvol->{rel_path}, $config_subvol);
|
|
vinfo_assign_config($svol);
|
|
foreach my $config_target (@{$config_subvol->{SUBSECTION}}) {
|
|
die unless($config_target->{CONTEXT} eq "target");
|
|
my $droot = vinfo($config_target->{url}, $config_target);
|
|
vinfo_assign_config($droot);
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
#
|
|
# filter subvolumes matching command line arguments
|
|
#
|
|
if(($action_run || $action_clean || $action_resolve || $action_usage || $action_list || $action_config_print) && scalar(@filter_args))
|
|
{
|
|
my %match;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
|
|
my $vol_url = $sroot->{URL};
|
|
my $found_vol = 0;
|
|
foreach my $filter (@filter_args) {
|
|
if(($vol_url eq $filter) || (map { ($filter eq $_) || () } @{$sroot->{CONFIG}->{group}})) {
|
|
TRACE "filter argument \"$filter\" matches volume: $vol_url";
|
|
$match{$filter} = ($vol_url eq $filter) ? "volume=$sroot->{PRINT}" : "group=$filter";
|
|
$found_vol = 1;
|
|
# last; # need to cycle through all filter_args for correct %match
|
|
}
|
|
}
|
|
next if($found_vol);
|
|
|
|
my @filter_subvol;
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
|
|
my $subvol_url = $svol->{URL};
|
|
my $found_subvol = 0;
|
|
foreach my $filter (@filter_args) {
|
|
if(($subvol_url eq $filter) || (map { ($filter eq $_) || () } @{$svol->{CONFIG}->{group}})) {
|
|
TRACE "filter argument \"$filter\" matches subvolume: $subvol_url";
|
|
$match{$filter} = ($subvol_url eq $filter) ? "subvolume=$svol->{PRINT}" : "group=$filter";
|
|
$found_subvol = 1;
|
|
$found_vol = 1;
|
|
# last; # need to cycle through all filter_args for correct %match
|
|
}
|
|
}
|
|
next if($found_subvol);
|
|
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
my $target_url = $droot->{URL};
|
|
my $found_target = 0;
|
|
foreach my $filter (@filter_args) {
|
|
if(($filter eq $target_url) ||
|
|
($filter eq "$target_url/$snapshot_name") ||
|
|
(map { ($filter eq $_) || () } @{$droot->{CONFIG}->{group}})) {
|
|
TRACE "filter argument \"$filter\" matches target: $target_url";
|
|
$match{$filter} = ($target_url eq $filter) ? "target=$droot->{PRINT}" : "group=$filter";
|
|
$found_target = 1;
|
|
$found_subvol = 1;
|
|
$found_vol = 1;
|
|
# last; # need to cycle through all filter_args for correct %match
|
|
}
|
|
}
|
|
unless($found_target) {
|
|
DEBUG "No match on filter command line argument, skipping target: $target_url";
|
|
ABORTED($droot, "USER_SKIP");
|
|
}
|
|
}
|
|
unless($found_subvol) {
|
|
DEBUG "No match on filter command line argument, skipping subvolume: $subvol_url";
|
|
ABORTED($svol, "USER_SKIP");
|
|
}
|
|
}
|
|
unless($found_vol) {
|
|
DEBUG "No match on filter command line argument, skipping volume: $vol_url";
|
|
ABORTED($sroot, "USER_SKIP");
|
|
}
|
|
}
|
|
# make sure all args have a match
|
|
my @nomatch = map { $match{$_} ? () : $_ } @filter_args;
|
|
if(@nomatch) {
|
|
foreach(@nomatch) {
|
|
ERROR "Command line argument does not match any volume, subvolume, target or group declaration: $_";
|
|
}
|
|
exit 2;
|
|
}
|
|
$config->{CMDLINE_FILTER_LIST} = [ values %match ];
|
|
}
|
|
|
|
|
|
if($action_usage)
|
|
{
|
|
#
|
|
# print filesystem information
|
|
#
|
|
my @data;
|
|
my %processed;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
unless($processed{$sroot->{URL}}) {
|
|
my $usage = btrfs_filesystem_usage($sroot) // {};
|
|
push @data, { %$usage,
|
|
type => "source",
|
|
vinfo_prefixed_keys("", $sroot),
|
|
};
|
|
$processed{$sroot->{URL}} = 1;
|
|
}
|
|
}
|
|
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
unless($processed{$droot->{URL}}) {
|
|
my $usage = btrfs_filesystem_usage($droot) // {};
|
|
push @data, { %$usage,
|
|
type => "target",
|
|
vinfo_prefixed_keys("", $droot),
|
|
};
|
|
$processed{$droot->{URL}} = 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
print_formatted("usage", \@data);
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
if($action_config_print)
|
|
{
|
|
my $resolve = ($action_config_print eq "print-all");
|
|
#
|
|
# print configuration lines, machine readable
|
|
#
|
|
my @out;
|
|
push @out, config_dump_keys($config, skip_defaults => 1);
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
push @out, "\nvolume $sroot->{URL}";
|
|
push @out, config_dump_keys($sroot, prefix => "\t", resolve => $resolve);
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
push @out, ""; # newline
|
|
push @out, "\t# subvolume $svol->{CONFIG}->{rel_path_glob}" if(defined($svol->{CONFIG}->{rel_path_glob}));
|
|
push @out, "\tsubvolume $svol->{SUBVOL_PATH}";
|
|
push @out, config_dump_keys($svol, prefix => "\t\t", resolve => $resolve);
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
push @out, "\n\t\ttarget $droot->{CONFIG}->{target_type} $droot->{URL}";
|
|
push @out, config_dump_keys($droot, prefix => "\t\t\t", resolve => $resolve);
|
|
}
|
|
}
|
|
}
|
|
|
|
print_header(title => "Configuration Dump",
|
|
config => $config,
|
|
time => $start_time,
|
|
);
|
|
|
|
print join("\n", @out) . "\n";
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
if($action_list)
|
|
{
|
|
my @vol_data;
|
|
my @subvol_data;
|
|
my @target_data;
|
|
my @mixed_data;
|
|
my %target_uniq;
|
|
|
|
#
|
|
# print configuration lines, machine readable
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
my $volh = { vinfo_prefixed_keys("volume", $sroot) };
|
|
push @vol_data, $volh;
|
|
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $subvolh = { %$volh,
|
|
vinfo_prefixed_keys("source", $svol),
|
|
snapshot_path => $sroot->{PATH} . (config_key($svol, "snapshot_dir", prefix => '/') // ""),
|
|
snapshot_name => config_key($svol, "snapshot_name"),
|
|
snapshot_preserve => format_preserve_matrix(config_preserve_hash($svol, "snapshot")),
|
|
};
|
|
push @subvol_data, $subvolh;
|
|
|
|
my $found = 0;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my $targeth = { %$subvolh,
|
|
vinfo_prefixed_keys("target", $droot),
|
|
target_preserve => format_preserve_matrix(config_preserve_hash($droot, "target")),
|
|
};
|
|
if($action_list eq "target") {
|
|
next if($target_uniq{$droot->{URL}});
|
|
$target_uniq{$droot->{URL}} = 1;
|
|
}
|
|
push @target_data, $targeth;
|
|
push @mixed_data, $targeth;
|
|
$found = 1;
|
|
}
|
|
# make sure the subvol is always printed (even if no targets around)
|
|
push @mixed_data, $subvolh unless($found);
|
|
}
|
|
}
|
|
if($action_list eq "volume") {
|
|
print_formatted("list_volume", \@vol_data);
|
|
}
|
|
elsif($action_list eq "source") {
|
|
print_formatted("list_source", \@subvol_data);
|
|
}
|
|
elsif($action_list eq "target") {
|
|
print_formatted("list_target", \@target_data);
|
|
}
|
|
else {
|
|
# default format
|
|
print_formatted("list", \@mixed_data);
|
|
}
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
#
|
|
# fill vinfo hash, basic checks on configuration
|
|
#
|
|
|
|
# read volume btrfs tree, and make sure subvolume exist
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
DEBUG "Initializing volume section: $sroot->{PRINT}";
|
|
unless(vinfo_init_root($sroot)) {
|
|
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
WARN "Skipping volume \"$sroot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
DEBUG "Initializing subvolume section: $svol->{PRINT}";
|
|
unless(vinfo_init_root($svol)) {
|
|
ABORTED($svol, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
if($svol->{node}{uuid} && _is_child_of($sroot->{node}, $svol->{node}{uuid})) {
|
|
DEBUG "Found \"$svol->{PRINT}\" (id=$svol->{node}{id}) in btrfs subtree of: $sroot->{PRINT}";
|
|
} else {
|
|
ABORTED($svol, "Not a child subvolume of: $sroot->{PRINT}");
|
|
WARN "Skipping subvolume \"$svol->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
}
|
|
}
|
|
|
|
# read target btrfs tree
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
DEBUG "Initializing target section: $droot->{PRINT}";
|
|
my $target_type = $droot->{CONFIG}->{target_type} || die;
|
|
if($target_type eq "send-receive")
|
|
{
|
|
unless(vinfo_init_root($droot, resolve_subdir => 1)) {
|
|
ABORTED($droot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
|
|
WARN "Skipping target \"$droot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
}
|
|
elsif($target_type eq "raw")
|
|
{
|
|
DEBUG "Creating raw subvolume list: $droot->{PRINT}";
|
|
$droot->{SUBVOL_LIST} = [];
|
|
my $ret = run_cmd(
|
|
cmd => [ 'find', { unsafe => $droot->{PATH} . '/' }, '-maxdepth', '1', '-type', 'f' ],
|
|
rsh => vinfo_rsh($droot),
|
|
# note: use something like this to get the real (link resolved) path
|
|
# cmd => [ "find", $droot->{PATH} . '/', "-maxdepth", "1", "-name", "$snapshot_basename.\*.raw\*", '-printf', '%f\0', '-exec', 'realpath', '-z', '{}', ';' ],
|
|
non_destructive => 1,
|
|
);
|
|
unless(defined($ret)) {
|
|
ABORTED($droot, "Failed to list files from: $droot->{PATH}");
|
|
WARN "Skipping target \"$droot->{PRINT}\": $abrt";
|
|
next;
|
|
}
|
|
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
my %child_uuid_list;
|
|
foreach (split("\n", $ret))
|
|
{
|
|
unless(/^($file_match)$/) {
|
|
DEBUG "Skipping non-parseable file: \"$_\"";
|
|
next;
|
|
}
|
|
my $file = $1; # untaint argument
|
|
unless($file =~ s/^\Q$droot->{PATH}\E\///) {
|
|
ABORTED($droot, "Unexpected result from 'find': file \"$file\" is not under \"$droot->{PATH}\"");
|
|
last;
|
|
}
|
|
|
|
# Set btrfs subvolume information (received_uuid, parent_uuid) from filename info.
|
|
#
|
|
# NOTE: remote_parent_uuid in BTRBK_RAW is the "parent of the source subvolume", NOT the
|
|
# "parent of the received subvolume".
|
|
my $subvol = vinfo_child($droot, $file);
|
|
unless(vinfo_inject_child($droot, $subvol, { TARGET_TYPE => 'raw' }))
|
|
{
|
|
DEBUG "Skipping file (filename scheme mismatch): \"$file\"";
|
|
next;
|
|
}
|
|
unless(defined($subvol->{node}{BTRBK_RAW}) &&
|
|
($snapshot_basename eq $subvol->{node}{BTRBK_BASENAME}))
|
|
{
|
|
# vinfo_inject_child() pushes all "valid" subvols to $droot->{SUBVOL_LIST},
|
|
# remove the non-matching ones again.
|
|
# If we don't remove them from the list, they will also
|
|
# be taken into account for incremental backups!
|
|
pop @{$droot->{SUBVOL_LIST}};
|
|
DEBUG "Skipping file (base name != \"$snapshot_basename\"): \"$file\"";
|
|
next;
|
|
}
|
|
|
|
# incomplete raw fakes get same semantics as real subvolumes (readonly=0, received_uuid='-')
|
|
$subvol->{node}{received_uuid} = ($subvol->{node}{BTRBK_RAW}->{incomplete} ? '-' : $subvol->{node}{BTRBK_RAW}->{received_uuid});
|
|
$subvol->{node}{parent_uuid} = undef; # correct value gets inserted below
|
|
$subvol->{node}{readonly} = ($subvol->{node}{BTRBK_RAW}->{incomplete} ? 0 : 1);
|
|
|
|
if($subvol->{node}{BTRBK_RAW}->{remote_parent_uuid} ne '-') {
|
|
$child_uuid_list{$subvol->{node}{BTRBK_RAW}->{remote_parent_uuid}} //= [];
|
|
push @{$child_uuid_list{$subvol->{node}{BTRBK_RAW}->{remote_parent_uuid}}}, $subvol;
|
|
}
|
|
}
|
|
if(ABORTED($droot)) {
|
|
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED($droot);
|
|
next;
|
|
}
|
|
my @subvol_list = @{vinfo_subvol_list($droot, sort => 'path')};
|
|
DEBUG "Found " . scalar(@subvol_list) . " raw subvolume backups of: $svol->{PRINT}";
|
|
|
|
# Make sure that incremental backup chains are never broken:
|
|
foreach my $subvol (@subvol_list)
|
|
{
|
|
# If restoring a backup from raw btrfs images (using "incremental yes|strict"):
|
|
# "btrfs send -p parent source > svol.btrfs", the backups
|
|
# on the target will get corrupted (unusable!) as soon as
|
|
# an any files in the chain gets deleted.
|
|
#
|
|
# We need to make sure btrbk will NEVER delete those:
|
|
# - svol.<timestamp>--<received_uuid_0>.btrfs : root (full) image
|
|
# - svol.<timestamp>--<received_uuid-n>[@<received_uuid_n-1>].btrfs : incremental image
|
|
|
|
foreach my $child (@{$child_uuid_list{$subvol->{node}{received_uuid}}}) {
|
|
$child->{node}{parent_uuid} = $subvol->{node}{uuid};
|
|
|
|
DEBUG "Found parent/child partners, forcing preserve of: \"$subvol->{PRINT}\", \"$child->{PRINT}\"";
|
|
$subvol->{node}{FORCE_PRESERVE} = "preserve forced: parent of another raw target";
|
|
$child->{node}{FORCE_PRESERVE} ||= "preserve forced: child of another raw target";
|
|
}
|
|
|
|
# For now, always preserve all raw files.
|
|
# TODO: remove this line as soon as incremental rotation is implemented.
|
|
$subvol->{node}{FORCE_PRESERVE} = "preserve forced: raw target";
|
|
}
|
|
# TRACE(Data::Dumper->Dump([\@subvol_list], ["vinfo_raw_subvol_list{$droot}"]));
|
|
}
|
|
|
|
if($config_override{FAILSAFE_PRESERVE}) {
|
|
ABORTED($droot, $config_override{FAILSAFE_PRESERVE});
|
|
WARN "Skipping target \"$droot->{PRINT}\": $abrt";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# check for duplicate snapshot locations
|
|
my %snapshot_check;
|
|
my %backup_check;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
# check for duplicate snapshot locations
|
|
my $snapdir_ts = config_key($svol, "snapshot_dir", postfix => '/') // "";
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
my $snapshot_target = $sroot->{URL_PREFIX} . ($realpath_cache{$sroot->{URL}} // $sroot->{PATH}) . '/' . $snapdir_ts . $snapshot_basename;
|
|
if(my $prev = $snapshot_check{$snapshot_target}) {
|
|
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same snapshot: $snapshot_target";
|
|
ERROR "Please fix \"snapshot_name\" configuration options!";
|
|
exit 1;
|
|
}
|
|
$snapshot_check{$snapshot_target} = $svol->{PRINT};
|
|
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
# check for duplicate snapshot locations
|
|
my $snapshot_backup_target = $droot->{URL_PREFIX} . ($realpath_cache{$droot->{URL}} // $droot->{PATH}) . '/' . $snapshot_basename;
|
|
if(my $prev = $backup_check{$snapshot_backup_target}) {
|
|
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same backup target: $snapshot_backup_target";
|
|
ERROR "Please fix \"snapshot_name\" or \"target\" configuration options!";
|
|
exit 1;
|
|
}
|
|
$backup_check{$snapshot_backup_target} = $svol->{PRINT};
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
if($action_origin)
|
|
{
|
|
#
|
|
# print origin information
|
|
#
|
|
my $url = $filter_args[0] || die;
|
|
my $vol = vinfo($url, $config);
|
|
unless(vinfo_init_root($vol)) {
|
|
ERROR "Failed to fetch subvolume detail for: $url" . ($err ? ": $err" : "");
|
|
exit 1;
|
|
}
|
|
if($vol->{node}{is_root}) {
|
|
ERROR "Subvolume is btrfs root: $url\n";
|
|
exit 1;
|
|
}
|
|
|
|
my $lines = [];
|
|
_origin_tree("", $vol->{node}, $lines);
|
|
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom") {
|
|
print_header(title => "Origin Tree",
|
|
config => $config,
|
|
time => $start_time,
|
|
legend => [
|
|
"^-- : parent subvolume",
|
|
"newline : received-from relationship with subvolume (identical content)",
|
|
]
|
|
);
|
|
print join("\n", map { $_->{tree} } @$lines) . "\n";
|
|
}
|
|
else {
|
|
print_formatted('origin_tree', $lines );
|
|
}
|
|
exit 0;
|
|
}
|
|
|
|
|
|
if($action_resolve)
|
|
{
|
|
my @data;
|
|
my @stats_data;
|
|
my $stats_snapshots_total = 0;
|
|
my $stats_backups_total = 0;
|
|
my $stats_backups_total_incomplete = 0;
|
|
my $stats_backups_total_orphaned = 0;
|
|
if($action_resolve eq "snapshots")
|
|
{
|
|
#
|
|
# print all snapshots and their receive targets
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
foreach my $snapshot (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } get_snapshot_children($sroot, $svol)) {
|
|
my $snapshot_data = { type => "snapshot",
|
|
status => ($snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
|
|
vinfo_prefixed_keys("source", $svol),
|
|
vinfo_prefixed_keys("snapshot", $snapshot),
|
|
snapshot_name => $snapshot_name,
|
|
};
|
|
my $found = 0;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
foreach (sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } get_receive_targets($droot, $snapshot)) {
|
|
push @data, { %$snapshot_data,
|
|
type => "received",
|
|
vinfo_prefixed_keys("target", $_),
|
|
};
|
|
$found = 1;
|
|
}
|
|
}
|
|
push @data, $snapshot_data unless($found);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
elsif(($action_resolve eq "backups") || ($action_resolve eq "stats"))
|
|
{
|
|
#
|
|
# print all targets and their corresponding source snapshots
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
my @snapshot_children = get_snapshot_children($sroot, $svol);
|
|
my $stats_snapshot_uptodate = "";
|
|
foreach my $snapshot (@snapshot_children) {
|
|
if($snapshot->{node}{cgen} == $svol->{node}{gen}) {
|
|
$stats_snapshot_uptodate = " (up-to-date)";
|
|
last;
|
|
}
|
|
}
|
|
push @stats_data, [ $svol->{PRINT}, sprintf("%4u snapshots$stats_snapshot_uptodate", scalar(@snapshot_children)) ];
|
|
$stats_snapshots_total += scalar(@snapshot_children); # NOTE: this adds ALL snaphot children under $sroot (not only the ones created by btrbk!)
|
|
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my $stats_received = 0;
|
|
my $stats_orphaned = 0;
|
|
my $stats_incomplete = 0;
|
|
foreach my $target_vol (@{vinfo_subvol_list($droot, sort => 'path')}) {
|
|
my $parent_snapshot;
|
|
my $incomplete_backup;
|
|
foreach (@snapshot_children) {
|
|
if($target_vol->{node}{received_uuid} eq '-') {
|
|
# incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
|
|
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
|
|
$parent_snapshot = undef;
|
|
$incomplete_backup = 1;
|
|
last;
|
|
}
|
|
if($_->{node}{uuid} eq $target_vol->{node}{received_uuid}) {
|
|
$parent_snapshot = $_;
|
|
last;
|
|
}
|
|
}
|
|
if($parent_snapshot) {
|
|
$stats_received++;
|
|
push @data, { type => "received",
|
|
vinfo_prefixed_keys("target", $target_vol),
|
|
vinfo_prefixed_keys("snapshot", $parent_snapshot),
|
|
vinfo_prefixed_keys("source", $svol),
|
|
status => ($parent_snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
|
|
};
|
|
}
|
|
else {
|
|
# don't display all subvolumes in $droot, only the ones matching snapshot_name
|
|
if($target_vol->{btrbk_direct_leaf} && ($target_vol->{node}{BTRBK_BASENAME} eq $snapshot_name)) {
|
|
if($incomplete_backup) { $stats_incomplete++; } else { $stats_orphaned++; }
|
|
push @data, { type => "received",
|
|
# suppress "orphaned" status here (snapshot column is empty anyways)
|
|
# status => ($incomplete_backup ? "incomplete" : "orphaned"),
|
|
status => ($incomplete_backup ? "incomplete" : undef),
|
|
vinfo_prefixed_keys("target", $target_vol),
|
|
vinfo_prefixed_keys("source", $svol),
|
|
};
|
|
}
|
|
else {
|
|
DEBUG "ignoring subvolume with non-matching snapshot_name";
|
|
}
|
|
}
|
|
}
|
|
my $stats_total = $stats_received + $stats_incomplete + $stats_orphaned;
|
|
$stats_backups_total += $stats_total;
|
|
$stats_backups_total_incomplete += $stats_incomplete;
|
|
$stats_backups_total_orphaned += $stats_orphaned;
|
|
my @stats_detail;
|
|
push @stats_detail, "$stats_orphaned orphaned" if($stats_orphaned);
|
|
push @stats_detail, "$stats_incomplete incomplete" if($stats_incomplete);
|
|
my $stats_detail_print = join(', ', @stats_detail);
|
|
$stats_detail_print = " ($stats_detail_print)" if($stats_detail_print);
|
|
push @stats_data, [ "^-- $droot->{PRINT}/$snapshot_name.*", sprintf("%4u backups$stats_detail_print", $stats_total) ];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
elsif($action_resolve eq "latest")
|
|
{
|
|
#
|
|
# print latest common
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $found = 0;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my ($latest_common_src, $latest_common_target) = get_latest_common($sroot, $svol, $droot);
|
|
if ($latest_common_src && $latest_common_target) {
|
|
push @data, { type => "latest_common",
|
|
status => ($latest_common_src->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
|
|
vinfo_prefixed_keys("source", $svol),
|
|
vinfo_prefixed_keys("snapshot", $latest_common_src),
|
|
vinfo_prefixed_keys("target", $latest_common_target),
|
|
};
|
|
$found = 1;
|
|
}
|
|
}
|
|
unless($found) {
|
|
my $latest_snapshot = get_latest_snapshot_child($sroot, $svol);
|
|
push @data, { type => "latest_snapshot",
|
|
status => ($latest_snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
|
|
vinfo_prefixed_keys("source", $svol),
|
|
vinfo_prefixed_keys("snapshot", $latest_snapshot), # all unset if no $latest_snapshot
|
|
};
|
|
}
|
|
}
|
|
}
|
|
}
|
|
else {
|
|
die;
|
|
}
|
|
|
|
if($action_resolve eq "stats") {
|
|
print_header(title => "Statistics",
|
|
config => $config,
|
|
time => $start_time,
|
|
legend => [
|
|
"up-to-date: latest snapshot is up to date with parent subvolume",
|
|
"orphaned : parent snapshot was deleted (by snapshot_preserve policy)",
|
|
],
|
|
);
|
|
|
|
print_table(\@stats_data, " ");
|
|
print "\n";
|
|
my $stats_filter = $config->{CMDLINE_FILTER_LIST} ? join("; ", @{$config->{CMDLINE_FILTER_LIST}}) : "";
|
|
my @stats_total_detail;
|
|
push @stats_total_detail, "$stats_backups_total_orphaned orphaned" if($stats_backups_total_orphaned);
|
|
push @stats_total_detail, "$stats_backups_total_incomplete incomplete" if($stats_backups_total_incomplete);
|
|
my $stats_total_detail_print = join(', ', @stats_total_detail);
|
|
$stats_total_detail_print = " ($stats_total_detail_print)" if($stats_total_detail_print);
|
|
print "Total" . ($stats_filter ? " ($stats_filter)" : "") . ":\n";
|
|
my $maxlen = ($stats_snapshots_total > $stats_backups_total) ? length($stats_snapshots_total) : length($stats_backups_total);
|
|
printf("%" . $maxlen . "u snapshots\n", $stats_snapshots_total);
|
|
printf("%" . $maxlen . "u backups$stats_total_detail_print\n", $stats_backups_total);
|
|
}
|
|
else {
|
|
print_formatted("resolved", \@data);
|
|
}
|
|
|
|
exit exit_status($config);
|
|
}
|
|
|
|
|
|
if($action_clean)
|
|
{
|
|
#
|
|
# identify and delete incomplete backups
|
|
#
|
|
init_transaction_log(config_key($config, "transaction_log"),
|
|
config_key($config, "transaction_syslog"));
|
|
|
|
my @out;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapshot_name = config_key($svol, "snapshot_name") // die;
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
my $target_type = $droot->{CONFIG}->{target_type} || die;
|
|
|
|
INFO "Cleaning incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
|
|
push @out, "$droot->{PRINT}/$snapshot_name.*";
|
|
my @delete;
|
|
foreach my $target_vol (@{vinfo_subvol_list($droot, sort => 'path')}) {
|
|
# incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
|
|
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
|
|
next unless($target_vol->{btrbk_direct_leaf} && ($target_vol->{node}{BTRBK_BASENAME} eq $snapshot_name));
|
|
if($target_vol->{node}{received_uuid} eq '-') {
|
|
DEBUG "Found incomplete target subvolume: $target_vol->{PRINT}";
|
|
push(@delete, $target_vol);
|
|
push @out, "--- $target_vol->{PRINT}";
|
|
}
|
|
}
|
|
my $ret;
|
|
if($target_type eq "raw") {
|
|
if(scalar(@delete)) {
|
|
DEBUG "[raw] delete:";
|
|
DEBUG "[raw] file: $_->{PRINT}" foreach(@delete);
|
|
$ret = run_cmd({
|
|
cmd => ['rm', (map { { unsafe => $_->{PATH} } } @delete) ],
|
|
rsh => vinfo_rsh($droot),
|
|
});
|
|
} else {
|
|
$ret = 0;
|
|
}
|
|
}
|
|
else {
|
|
$ret = btrfs_subvolume_delete(\@delete, commit => config_key($droot, "btrfs_commit_delete"), type => "delete_garbled");
|
|
}
|
|
if(defined($ret)) {
|
|
INFO "Deleted $ret incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
|
|
$droot->{SUBVOL_DELETED} //= [];
|
|
push @{$droot->{SUBVOL_DELETED}}, @delete;
|
|
}
|
|
else {
|
|
ABORTED($droot, "Failed to delete incomplete target subvolume");
|
|
push @out, "!!! Target \"$droot->{PRINT}\" aborted: $abrt";
|
|
}
|
|
push(@out, "<no_action>") unless(scalar(@delete));
|
|
push(@out, "");
|
|
}
|
|
}
|
|
}
|
|
|
|
my $exit_status = exit_status($config);
|
|
my $time_elapsed = time - $start_time;
|
|
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
|
|
action("finished",
|
|
status => $exit_status ? "partial" : "success",
|
|
duration => $time_elapsed,
|
|
message => $exit_status ? "At least one delete operation failed" : undef,
|
|
);
|
|
close_transaction_log();
|
|
|
|
#
|
|
# print summary
|
|
#
|
|
unless($quiet)
|
|
{
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom")
|
|
{
|
|
print_header(title => "Cleanup Summary",
|
|
config => $config,
|
|
time => $start_time,
|
|
legend => [
|
|
"--- deleted subvolume (incomplete backup)",
|
|
],
|
|
);
|
|
print join("\n", @out);
|
|
if($dryrun) {
|
|
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
|
|
}
|
|
}
|
|
else
|
|
{
|
|
# print action log (without transaction start messages)
|
|
my @data = grep { $_->{status} ne "starting" } @transaction_log;
|
|
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
|
|
}
|
|
}
|
|
|
|
exit $exit_status;
|
|
}
|
|
|
|
|
|
if($action_run)
|
|
{
|
|
init_transaction_log(config_key($config, "transaction_log"),
|
|
config_key($config, "transaction_syslog"));
|
|
|
|
if($resume_only) {
|
|
INFO "Skipping snapshot creation (option \"-r\" present)";
|
|
}
|
|
else
|
|
{
|
|
#
|
|
# create snapshots
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapdir_ts = config_key($svol, "snapshot_dir", postfix => '/') // "";
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
|
|
# check if we need to create a snapshot
|
|
my $snapshot_create = config_key($svol, "snapshot_create");
|
|
if(not $snapshot_create) {
|
|
DEBUG "Snapshot creation disabled (snapshot_create=no)";
|
|
next;
|
|
}
|
|
elsif($snapshot_create eq "always") {
|
|
DEBUG "Snapshot creation enabled (snapshot_create=always)";
|
|
}
|
|
elsif($snapshot_create eq "onchange") {
|
|
# check if latest snapshot is up-to-date with source subvolume (by generation)
|
|
my $latest = get_latest_snapshot_child($sroot, $svol);
|
|
if($latest) {
|
|
if($latest->{node}{cgen} == $svol->{node}{gen}) {
|
|
INFO "Snapshot creation skipped: snapshot_create=onchange, snapshot is up-to-date: $latest->{PRINT}";
|
|
$svol->{SNAPSHOT_UP_TO_DATE} = $latest;
|
|
next;
|
|
}
|
|
DEBUG "Snapshot creation enabled: snapshot_create=onchange, gen=$svol->{node}{gen} > snapshot_cgen=$latest->{node}{cgen}";
|
|
}
|
|
else {
|
|
DEBUG "Snapshot creation enabled: snapshot_create=onchange, no snapshots found";
|
|
}
|
|
}
|
|
elsif($snapshot_create eq "ondemand") {
|
|
# check if at least one target is present
|
|
if(scalar vinfo_subsection($svol, 'target')) {
|
|
DEBUG "Snapshot creation enabled (snapshot_create=ondemand): at least one target is present";
|
|
}
|
|
else {
|
|
INFO "Snapshot creation skipped: snapshot_create=ondemand, and no target is present for: $svol->{PRINT}";
|
|
next;
|
|
}
|
|
}
|
|
else {
|
|
die "illegal value for snapshot_create configuration option: $snapshot_create";
|
|
}
|
|
|
|
# find unique snapshot name
|
|
my $timestamp = timestamp(\@tm_now, config_key($svol, "timestamp_format"));
|
|
my @unconfirmed_target_name;
|
|
my @lookup = map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($sroot)};
|
|
@lookup = grep s/^\Q$snapdir_ts\E// , @lookup;
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
if(ABORTED($droot)) {
|
|
push(@unconfirmed_target_name, $droot);
|
|
next;
|
|
}
|
|
push(@lookup, map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($droot)});
|
|
}
|
|
@lookup = grep /^\Q$snapshot_basename.$timestamp\E(_[0-9]+)?$/ ,@lookup;
|
|
TRACE "Present snapshot names for \"$svol->{PRINT}\": " . join(', ', @lookup);
|
|
@lookup = map { /_([0-9]+)$/ ? $1 : 0 } @lookup;
|
|
@lookup = sort { $b <=> $a } @lookup;
|
|
my $postfix_counter = $lookup[0] // -1;
|
|
$postfix_counter++;
|
|
my $snapshot_name = $snapshot_basename . '.' . $timestamp . ($postfix_counter ? "_$postfix_counter" : "");
|
|
|
|
if(@unconfirmed_target_name) {
|
|
INFO "Assuming non-present subvolume \"$snapshot_name\" in skipped targets: " . join(", ", map { "\"$_->{PRINT}\"" } @unconfirmed_target_name);
|
|
}
|
|
|
|
# finally create the snapshot
|
|
INFO "Creating subvolume snapshot for: $svol->{PRINT}";
|
|
my $snapshot = vinfo_child($sroot, "$snapdir_ts$snapshot_name");
|
|
if(btrfs_subvolume_snapshot($svol, $snapshot))
|
|
{
|
|
vinfo_inject_child($sroot, $snapshot, {
|
|
parent_uuid => $svol->{node}{uuid},
|
|
received_uuid => '-',
|
|
readonly => 1,
|
|
FORCE_PRESERVE => 'preserve forced: created just now',
|
|
});
|
|
$svol->{SNAPSHOT_CREATED} = $snapshot;
|
|
}
|
|
else {
|
|
ABORTED($svol, "Failed to create snapshot: $svol->{PRINT} -> $sroot->{PRINT}/$snapdir_ts$snapshot_name");
|
|
WARN "Skipping subvolume section: $abrt";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#
|
|
# create backups
|
|
#
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapdir = config_key($svol, "snapshot_dir") // "";
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
my @snapshot_children = sort({ cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
|
|
get_snapshot_children($sroot, $svol, $snapdir, $snapshot_basename));
|
|
|
|
foreach my $droot (vinfo_subsection($svol, 'target')) {
|
|
INFO "Checking for missing backups of subvolume \"$svol->{PRINT}\" in \"$droot->{PRINT}/\"";
|
|
my @schedule;
|
|
my $resume_total = 0;
|
|
my $resume_success = 0;
|
|
|
|
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list for get_receive_targets()
|
|
foreach my $child (@snapshot_children)
|
|
{
|
|
my $warning_seen = [];
|
|
my @receive_targets = get_receive_targets($droot, $child, exact_match => 1, warn => 1, seen => $warning_seen, droot_subvol_list => $droot_subvol_list );
|
|
get_receive_targets_fsroot($droot, $child, exclude => $warning_seen, warn => 1); # warn on unexpected on fs
|
|
if(scalar(@receive_targets)){
|
|
DEBUG "Found receive target of: $child->{PRINT}";
|
|
next;
|
|
}
|
|
|
|
DEBUG "Adding backup candidate: $child->{PRINT}";
|
|
push(@schedule, { value => $child,
|
|
btrbk_date => $child->{node}{BTRBK_DATE},
|
|
# not enforcing resuming of latest snapshot anymore (since v0.23.0)
|
|
# preserve => $child->{node}{FORCE_PRESERVE},
|
|
});
|
|
}
|
|
|
|
if(scalar @schedule)
|
|
{
|
|
DEBUG "Checking schedule for backup candidates";
|
|
# add all present backups as informative_only: these are needed for correct results of schedule()
|
|
foreach my $vol (@$droot_subvol_list) {
|
|
unless($vol->{btrbk_direct_leaf} && ($vol->{node}{BTRBK_BASENAME} eq $snapshot_basename)) {
|
|
TRACE "Receive target does not match btrbk filename scheme, skipping: $vol->{PRINT}";
|
|
next;
|
|
}
|
|
push(@schedule, { informative_only => 1,
|
|
value => $vol,
|
|
btrbk_date => $vol->{node}{BTRBK_DATE},
|
|
});
|
|
}
|
|
my ($preserve, undef) = schedule(
|
|
schedule => \@schedule,
|
|
preserve => config_preserve_hash($droot, "target"),
|
|
);
|
|
my @resume = grep defined, @$preserve; # remove entries with no value from list (target subvolumes)
|
|
$resume_total = scalar @resume;
|
|
|
|
foreach my $child (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @resume)
|
|
{
|
|
# Continue gracefully (skip instead of abort) on existing (possibly garbled) target
|
|
my $err_vol = vinfo_subvol($droot, $child->{NAME});
|
|
if($err_vol) {
|
|
my $status_msg = "Please delete stray subvolume (\"btrbk clean\"): $err_vol->{PRINT}";
|
|
WARN "Target subvolume \"$err_vol->{PRINT}\" exists, but is not a receive target of \"$child->{PRINT}\"";
|
|
WARN $status_msg;
|
|
WARN "Skipping backup of: $child->{PRINT}";
|
|
$droot->{SUBVOL_RECEIVED} //= [];
|
|
push(@{$droot->{SUBVOL_RECEIVED}}, { ERROR => $status_msg, received_subvolume => $err_vol });
|
|
$droot->{CONFIG}->{UNRECOVERABLE} = $status_msg;
|
|
next;
|
|
}
|
|
|
|
INFO "Creating subvolume backup (send-receive) for: $child->{PRINT}";
|
|
my ($latest_common_src, $latest_common_target) = get_latest_common($sroot, $child, $droot, $snapdir);
|
|
if(macro_send_receive(source => $child,
|
|
target => $droot,
|
|
parent => $latest_common_src, # this is <undef> if no common found
|
|
latest_common_target => $latest_common_target,
|
|
))
|
|
{
|
|
$resume_success++;
|
|
}
|
|
else {
|
|
# note: ABORTED flag is already set by macro_send_receive()
|
|
ERROR("Error while resuming backups, aborting");
|
|
last;
|
|
}
|
|
}
|
|
}
|
|
|
|
if($resume_total) {
|
|
INFO "Created $resume_success/$resume_total missing backups";
|
|
} else {
|
|
INFO "No missing backups found";
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
#
|
|
# remove backups following a preserve daily/weekly/monthly scheme
|
|
#
|
|
my $schedule_results = [];
|
|
if($preserve_backups || $resume_only) {
|
|
INFO "Preserving all snapshots and backups (option \"-p\" or \"-r\" present)";
|
|
}
|
|
else
|
|
{
|
|
foreach my $sroot (vinfo_subsection($config, 'volume')) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
|
|
my $snapdir = config_key($svol, "snapshot_dir") // "";
|
|
my $snapdir_ts = config_key($svol, "snapshot_dir", postfix => '/') // "";
|
|
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
|
|
my $target_aborted = 0;
|
|
my @snapshot_children = sort({ cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } # sort descending
|
|
get_snapshot_children($sroot, $svol, $snapdir, $snapshot_basename));
|
|
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
if(ABORTED($droot)) {
|
|
if(ABORTED($droot) eq "USER_SKIP") {
|
|
$target_aborted ||= -1;
|
|
} else {
|
|
$target_aborted = 1;
|
|
}
|
|
next;
|
|
}
|
|
|
|
# always preserve latest common snapshot/backup pair
|
|
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list for get_receive_targets()
|
|
foreach my $child (@snapshot_children) {
|
|
my @receive_targets = get_receive_targets($droot, $child, droot_subvol_list => $droot_subvol_list);
|
|
if(scalar(@receive_targets)) {
|
|
DEBUG "Force preserve for latest common snapshot: $child->{PRINT}";
|
|
$child->{node}{FORCE_PRESERVE} = 'preserve forced: latest common snapshot';
|
|
foreach(@receive_targets) {
|
|
DEBUG "Force preserve for latest common target: $_->{PRINT}";
|
|
$_->{node}{FORCE_PRESERVE} = 'preserve forced: latest common target';
|
|
}
|
|
last;
|
|
}
|
|
}
|
|
|
|
#
|
|
# delete backups
|
|
#
|
|
INFO "Cleaning backups of subvolume \"$svol->{PRINT}\": $droot->{PRINT}/$snapshot_basename.*";
|
|
unless(macro_delete($droot, "", $snapshot_basename, $droot,
|
|
{ preserve => config_preserve_hash($droot, "target"),
|
|
results => $schedule_results,
|
|
result_hints => { topic => "backup", root_path => $droot->{PATH} },
|
|
},
|
|
commit => config_key($droot, "btrfs_commit_delete"),
|
|
type => "delete_target",
|
|
))
|
|
{
|
|
$target_aborted = -1;
|
|
}
|
|
}
|
|
|
|
#
|
|
# delete snapshots
|
|
#
|
|
if($target_aborted) {
|
|
if($target_aborted == -1) {
|
|
INFO "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target is skipped by command line argument";
|
|
} else {
|
|
WARN "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target aborted earlier";
|
|
}
|
|
next;
|
|
}
|
|
INFO "Cleaning snapshots: $sroot->{PRINT}/$snapdir_ts$snapshot_basename.*";
|
|
macro_delete($sroot, $snapdir, $snapshot_basename, $svol,
|
|
{ preserve => config_preserve_hash($svol, "snapshot"),
|
|
results => $schedule_results,
|
|
result_hints => { topic => "snapshot", root_path => $sroot->{PATH} },
|
|
},
|
|
commit => config_key($svol, "btrfs_commit_delete"),
|
|
type => "delete_snapshot",
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
my $exit_status = exit_status($config);
|
|
my $time_elapsed = time - $start_time;
|
|
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
|
|
action("finished",
|
|
status => $exit_status ? "partial" : "success",
|
|
duration => $time_elapsed,
|
|
message => $exit_status ? "At least one backup task aborted" : undef,
|
|
);
|
|
close_transaction_log();
|
|
|
|
|
|
unless($quiet)
|
|
{
|
|
#
|
|
# print scheduling results
|
|
#
|
|
if($print_schedule) {
|
|
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
|
|
my @data_snapshot = grep { $_->{topic} eq "snapshot" } @data;
|
|
my @data_backup = grep { $_->{topic} eq "backup" } @data;
|
|
|
|
if(scalar(@data_snapshot)) {
|
|
print_formatted("schedule", \@data_snapshot, title => "SNAPSHOT SCHEDULE");
|
|
print "\n";
|
|
}
|
|
if(scalar(@data_backup)) {
|
|
print_formatted("schedule", \@data_backup, title => "BACKUP SCHEDULE");
|
|
print "\n";
|
|
}
|
|
}
|
|
|
|
|
|
#
|
|
# print summary
|
|
#
|
|
$output_format ||= "custom";
|
|
if($output_format eq "custom")
|
|
{
|
|
my @unrecoverable;
|
|
my @out;
|
|
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
|
|
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
|
|
my @subvol_out;
|
|
if($svol->{SNAPSHOT_UP_TO_DATE}) {
|
|
push @subvol_out, "=== $svol->{SNAPSHOT_UP_TO_DATE}->{PRINT}";
|
|
}
|
|
if($svol->{SNAPSHOT_CREATED}) {
|
|
push @subvol_out, "+++ $svol->{SNAPSHOT_CREATED}->{PRINT}";
|
|
}
|
|
foreach(sort { $a->{PATH} cmp $b->{PATH} } @{$svol->{SUBVOL_DELETED} // []}) {
|
|
push @subvol_out, "--- $_->{PRINT}";
|
|
}
|
|
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
|
|
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
|
|
my $create_mode = "***";
|
|
$create_mode = ">>>" if($_->{parent});
|
|
# substr($create_mode, 0, 1, '%') if($_->{resume});
|
|
$create_mode = "!!!" if($_->{ERROR});
|
|
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
|
|
}
|
|
|
|
foreach(sort { $a->{PATH} cmp $b->{PATH} } @{$droot->{SUBVOL_DELETED} // []}) {
|
|
push @subvol_out, "--- $_->{PRINT}";
|
|
}
|
|
|
|
if(ABORTED($droot) && (ABORTED($droot) ne "USER_SKIP")) {
|
|
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED($droot);
|
|
}
|
|
|
|
if($droot->{CONFIG}->{UNRECOVERABLE}) {
|
|
push(@unrecoverable, $droot->{CONFIG}->{UNRECOVERABLE});
|
|
}
|
|
}
|
|
if(ABORTED($sroot) && (ABORTED($sroot) ne "USER_SKIP")) {
|
|
# repeat volume errors in subvolume context
|
|
push @subvol_out, "!!! Volume \"$sroot->{PRINT}\" aborted: " . ABORTED($sroot);
|
|
}
|
|
if(ABORTED($svol) && (ABORTED($svol) ne "USER_SKIP")) {
|
|
push @subvol_out, "!!! Aborted: " . ABORTED($svol);
|
|
}
|
|
|
|
if(@subvol_out) {
|
|
push @out, "$svol->{PRINT}", @subvol_out, "";
|
|
}
|
|
elsif(ABORTED($svol) && (ABORTED($svol) eq "USER_SKIP")) {
|
|
# don't print "<no_action>" on USER_SKIP
|
|
}
|
|
else {
|
|
push @out, "$svol->{PRINT}", "<no_action>", "";
|
|
}
|
|
}
|
|
}
|
|
|
|
print_header(title => "Backup Summary",
|
|
config => $config,
|
|
time => $start_time,
|
|
legend => [
|
|
"=== up-to-date subvolume (source snapshot)",
|
|
"+++ created subvolume (source snapshot)",
|
|
"--- deleted subvolume",
|
|
"*** received subvolume (non-incremental)",
|
|
">>> received subvolume (incremental)",
|
|
],
|
|
);
|
|
|
|
print join("\n", @out);
|
|
|
|
if($resume_only) {
|
|
print "\nNOTE: No snapshots created (option -r present)\n";
|
|
}
|
|
if($preserve_backups || $resume_only) {
|
|
print "\nNOTE: Preserved all snapshots and backups (option -p or -r present)\n";
|
|
}
|
|
if($exit_status || scalar(@unrecoverable)) {
|
|
print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
|
|
print "Please check warning and error messages above.\n";
|
|
print join("\n", @unrecoverable) . "\n" if(@unrecoverable);
|
|
}
|
|
if($dryrun) {
|
|
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
|
|
}
|
|
}
|
|
else
|
|
{
|
|
# print action log (without transaction start messages)
|
|
my @data = grep { $_->{status} ne "starting" } @transaction_log;
|
|
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
|
|
}
|
|
}
|
|
|
|
exit $exit_status if($exit_status);
|
|
}
|
|
}
|
|
|
|
|
|
1;
|