2015-01-13 14:38:44 +01:00
#!/usr/bin/perl -T
2014-12-11 18:03:10 +01:00
=head1 NAME
2016-04-23 14:58:08 +02:00
btrbk - create snapshots and remote backups of btrfs subvolumes
2014-12-11 18:03:10 +01:00
=head1 SYNOPSIS
btrbk --help
=head1 DESCRIPTION
2015-01-14 14:10:41 +01:00
Backup tool for btrfs subvolumes, taking advantage of btrfs specific
send-receive mechanism, allowing incremental backups at file-system
level.
2014-12-11 18:03:10 +01:00
2016-03-16 17:45:32 +01:00
The full btrbk documentation is available at L<http://digint.ch/btrbk/>.
2014-12-11 18:03:10 +01:00
=head1 AUTHOR
Axel Burri <axel@tty0.ch>
=head1 COPYRIGHT AND LICENSE
2016-01-21 17:41:40 +01:00
Copyright (c) 2014-2016 Axel Burri. All rights reserved.
2014-12-11 18:03:10 +01:00
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=cut
use strict;
use warnings FATAL => qw( all );
2015-01-13 14:38:44 +01:00
use Carp qw(confess);
2015-08-15 17:51:00 +02:00
use Getopt::Long qw(GetOptions);
2016-04-21 13:27:54 +02:00
use Time::Local qw( timelocal timegm timegm_nocheck );
2014-12-11 18:03:10 +01:00
2016-05-10 15:51:44 +02:00
our $VERSION = '0.24.0-dev';
2016-04-15 01:22:19 +02:00
our $AUTHOR = 'Axel Burri <axel@tty0.ch>';
our $PROJECT_HOME = '<http://digint.ch/btrbk/>';
2016-05-03 13:19:42 +02:00
our $BTRFS_PROGS_MIN = '3.18.2'; # required since btrbk-v0.23.0
2016-04-15 01:22:19 +02:00
2016-05-03 13:19:42 +02:00
my $VERSION_INFO = "btrbk command line client, version $VERSION";
2014-12-11 18:03:10 +01:00
2015-01-17 13:14:47 +01:00
my @config_src = ("/etc/btrbk.conf", "/etc/btrbk/btrbk.conf");
2014-12-11 18:03:10 +01:00
2016-08-18 14:02:51 +02:00
my %compression = (
gzip => { name => 'gzip', format => 'gz', compress_cmd => [ 'gzip', '-c' ], decompress_cmd => [ 'gzip', '-d', '-c' ], level_min => 1, level_max => 9 },
pigz => { name => 'pigz', format => 'gz', compress_cmd => [ 'pigz', '-c' ], decompress_cmd => [ 'pigz', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
bzip2 => { name => 'bzip2', format => 'bz2', compress_cmd => [ 'bzip2', '-c' ], decompress_cmd => [ 'bzip2', '-d', '-c' ], level_min => 1, level_max => 9 },
pbzip2 => { name => 'pbzip2', format => 'bz2', compress_cmd => [ 'pbzip2', '-c' ], decompress_cmd => [ 'pbzip2', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
xz => { name => 'xz', format => 'xz', compress_cmd => [ 'xz', '-c' ], decompress_cmd => [ 'xz', '-d', '-c' ], level_min => 0, level_max => 9, threads => '-T' },
lzo => { name => 'lzo', format => 'lzo', compress_cmd => [ 'lzop', '-c' ], decompress_cmd => [ 'lzop', '-d', '-c' ], level_min => 1, level_max => 9 },
);
my $compress_format_alt = join '|', map { $_->{format} } values %compression; # note: this contains duplicate alternations
2015-09-02 11:04:22 +02:00
my $ip_addr_match = qr/(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])/;
my $host_name_match = qr/(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])/;
my $file_match = qr/[0-9a-zA-Z_@\+\-\.\/]+/; # note: ubuntu uses '@' in the subvolume layout: <https://help.ubuntu.com/community/btrfs>
2016-04-25 14:23:15 +02:00
my $glob_match = qr/[0-9a-zA-Z_@\+\-\.\/\*]+/; # file_match plus '*'
2015-09-29 14:07:58 +02:00
my $uuid_match = qr/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/;
2016-04-21 13:27:54 +02:00
my $timestamp_postfix_match = qr/\.(?<YYYY>[0-9]{4})(?<MM>[0-9]{2})(?<DD>[0-9]{2})(T(?<hh>[0-9]{2})(?<mm>[0-9]{2})((?<ss>[0-9]{2})(?<zz>(Z|[+-][0-9]{4})))?)?(_(?<NN>[0-9]+))?/; # matches "YYYYMMDD[Thhmm[ss+0000]][_NN]"
2016-08-18 14:02:51 +02:00
my $raw_postfix_match = qr/--(?<received_uuid>$uuid_match)(\@(?<parent_uuid>$uuid_match))?\.btrfs?(\.(?<compress>($compress_format_alt)))?(\.(?<encrypt>gpg))?(\.(?<incomplete>part))?/; # matches ".btrfs_<received_uuid>[@<parent_uuid>][.gz|bz2|xz][.gpg][.part]"
2015-09-02 11:04:22 +02:00
my $group_match = qr/[a-zA-Z0-9_:-]+/;
2015-09-20 18:32:19 +02:00
my $ssh_cipher_match = qr/[a-z0-9][a-z0-9@.-]+/;
2016-05-11 20:15:46 +02:00
my $safe_cmd_match = $file_match; # good enough
2015-09-02 11:04:22 +02:00
2016-04-20 22:45:11 +02:00
my %day_of_week_map = ( sunday => 0, monday => 1, tuesday => 2, wednesday => 3, thursday => 4, friday => 5, saturday => 6 );
2016-04-25 18:36:15 +02:00
my @syslog_facilities = qw( user mail daemon auth lpr news cron authpriv local0 local1 local2 local3 local4 local5 local6 local7 );
2015-01-13 12:38:01 +01:00
2015-01-12 15:46:24 +01:00
my %config_options = (
# NOTE: the parser always maps "no" to undef
2015-01-16 17:29:04 +01:00
# NOTE: keys "volume", "subvolume" and "target" are hardcoded
2015-10-23 21:28:58 +02:00
# NOTE: files "." and "no" map to <undef>
2016-04-21 13:27:54 +02:00
timestamp_format => { default => "short", accept => [ "short", "long", "long-iso" ], context => [ "root", "volume", "subvolume" ] },
2016-01-14 18:02:53 +01:00
snapshot_dir => { default => undef, accept_file => { relative => 1 } },
2016-03-16 13:25:19 +01:00
snapshot_name => { default => undef, accept_file => { name_only => 1 }, context => [ "subvolume" ], deny_glob_context => 1 }, # NOTE: defaults to the subvolume name (hardcoded)
2016-01-14 18:02:53 +01:00
snapshot_create => { default => "always", accept => [ "no", "always", "ondemand", "onchange" ] },
incremental => { default => "yes", accept => [ "yes", "no", "strict" ] },
preserve_day_of_week => { default => "sunday", accept => [ (keys %day_of_week_map) ] },
2016-04-14 14:15:12 +02:00
snapshot_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1, context => [ "root", "volume", "subvolume" ], },
2016-04-19 21:07:04 +02:00
snapshot_preserve_min => { default => "all", accept => [ "all", "latest" ], accept_regexp => qr/^[1-9][0-9]*[hdwmy]$/, context => [ "root", "volume", "subvolume" ], },
2016-04-14 14:15:12 +02:00
target_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1 },
2016-04-19 21:07:04 +02:00
target_preserve_min => { default => "all", accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/ },
2016-04-07 15:33:32 +02:00
archive_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1 },
archive_preserve_min => { default => "all", accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/ },
2016-01-14 18:02:53 +01:00
btrfs_commit_delete => { default => undef, accept => [ "after", "each", "no" ] },
ssh_identity => { default => undef, accept_file => { absolute => 1 } },
ssh_user => { default => "root", accept_regexp => qr/^[a-z_][a-z0-9_-]*$/ },
ssh_port => { default => "default", accept => [ "default" ], accept_numeric => 1 },
ssh_compression => { default => undef, accept => [ "yes", "no" ] },
ssh_cipher_spec => { default => "default", accept_regexp => qr/^$ssh_cipher_match(,$ssh_cipher_match)*$/ },
2016-05-03 14:34:04 +02:00
rate_limit => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgt]?$/, require_bin => 'pv' },
2016-01-14 18:02:53 +01:00
transaction_log => { default => undef, accept_file => { absolute => 1 } },
2016-04-25 18:36:15 +02:00
transaction_syslog => { default => undef, accept => \@syslog_facilities },
2016-06-07 16:17:02 +02:00
lockfile => { default => undef, accept_file => { absolute => 1 }, context => [ "root" ] },
2016-01-14 18:02:53 +01:00
2016-05-11 20:15:46 +02:00
stream_compress => { default => undef, accept => [ "no", (keys %compression) ] },
stream_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
stream_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
2016-05-11 20:17:58 +02:00
raw_target_compress => { default => undef, accept => [ "no", (keys %compression) ] },
2016-01-14 18:02:53 +01:00
raw_target_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
raw_target_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
raw_target_encrypt => { default => undef, accept => [ "no", "gpg" ] },
gpg_keyring => { default => undef, accept_file => { absolute => 1 } },
gpg_recipient => { default => undef, accept_regexp => qr/^[0-9a-zA-Z_@\+\-\.]+$/ },
group => { default => undef, accept_regexp => qr/^$group_match(\s*,\s*$group_match)*$/, split => qr/\s*,\s*/ },
2015-05-20 18:20:16 +02:00
# deprecated options
2016-04-15 01:22:19 +02:00
btrfs_progs_compat => { default => undef, accept => [ "yes", "no" ],
deprecated => { DEFAULT => { ABORT => 1, warn => 'This feature has been dropped in btrbk-v0.23.0. Please update to newest btrfs-progs, AT LEAST >= $BTRFS_PROGS_MIN' } } },
2016-04-13 17:13:03 +02:00
snapshot_preserve_daily => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
snapshot_preserve_weekly => { default => 0, accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
snapshot_preserve_monthly => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
target_preserve_daily => { default => 'all', accept => [ "all" ], accept_numeric => 1,
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
target_preserve_weekly => { default => 0, accept => [ "all" ], accept_numeric => 1,
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
target_preserve_monthly => { default => 'all', accept => [ "all" ], accept_numeric => 1,
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
resume_missing => { default => "yes", accept => [ "yes", "no" ],
deprecated => { yes => { warn => 'ignoring (missing backups are always resumed since btrbk v0.23.0)' },
2016-04-14 13:01:28 +02:00
no => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve_min latest" and "target_preserve no" if you want to keep only the latest backup', } } },
2016-01-14 18:02:53 +01:00
snapshot_create_always => { default => undef, accept => [ "yes", "no" ],
deprecated => { yes => { warn => "Please use \"snapshot_create always\"",
replace_key => "snapshot_create",
replace_value => "always",
},
no => { warn => "Please use \"snapshot_create no\" or \"snapshot_create ondemand\"",
replace_key => "snapshot_create",
replace_value => "ondemand",
}
},
},
receive_log => { default => undef, accept => [ "sidecar", "no" ], accept_file => { absolute => 1 },
deprecated => { DEFAULT => { warn => "ignoring" } },
}
2015-01-09 18:09:32 +01:00
);
2015-06-02 22:16:33 +02:00
my @config_target_types = qw(send-receive raw);
2015-01-09 18:09:32 +01:00
2015-10-13 01:10:06 +02:00
my %table_formats = (
2015-10-22 17:45:27 +02:00
list_volume => { table => [ qw( volume_host volume_path ) ],
long => [ qw( volume_host volume_path ) ],
2015-10-13 01:10:06 +02:00
raw => [ qw( volume_url volume_host volume_path volume_rsh ) ],
},
2015-10-14 17:18:21 +02:00
list_source => { table => [ qw( source_host source_subvol snapshot_path snapshot_name ) ],
long => [ qw( source_host source_subvol snapshot_path snapshot_name ) ],
2015-10-13 01:10:06 +02:00
raw => [ qw( source_url source_host source_path snapshot_path snapshot_name source_rsh ) ],
},
2016-03-30 22:26:28 +02:00
list_target => { table => [ qw( target_host target_path ) ],
long => [ qw( target_host target_path ) ],
2015-10-13 01:10:06 +02:00
raw => [ qw( target_url target_host target_path target_rsh ) ],
},
2016-03-30 22:26:28 +02:00
list => { table => [ qw( source_host source_subvol snapshot_path snapshot_name target_host target_path ) ],
long => [ qw( source_host source_subvol snapshot_path snapshot_name snapshot_preserve target_host target_path target_preserve ) ],
raw => [ qw( source_url source_host source_subvol snapshot_path snapshot_name snapshot_preserve target_url target_host target_path target_preserve source_rsh target_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2015-10-14 17:18:21 +02:00
resolved => { table => [ qw( source_host source_subvol snapshot_subvol status target_host target_subvol ) ],
2015-10-14 17:02:25 +02:00
long => [ qw( type source_host source_subvol snapshot_subvol status target_host target_subvol ) ],
raw => [ qw( type source_host source_path snapshot_path snapshot_name status target_host target_path source_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2015-10-19 22:10:08 +02:00
schedule => { table => [ qw( action host subvol scheme reason ) ],
long => [ qw( action host root_path subvol_path scheme reason ) ],
2016-04-12 11:47:28 +02:00
raw => [ qw( topic action url host path dow min h d w m y) ],
2015-10-13 01:10:06 +02:00
},
2015-10-19 22:10:08 +02:00
usage => { table => [ qw( host path size used free ) ],
long => [ qw( type host path size device_allocated device_unallocated device_missing used free free_min data_ratio metadata_ratio used global_reserve global_reserve_used ) ],
raw => [ qw( type host path size device_allocated device_unallocated device_missing used free free_min data_ratio metadata_ratio used global_reserve global_reserve_used ) ],
RALIGN => { size=>1, device_allocated=>1, device_unallocated=>1, device_missing=>1, used=>1, free=>1, free_min=>1, data_ratio=>1, metadata_ratio=>1, used=>1, global_reserve=>1, global_reserve_used=>1 },
},
2015-10-20 18:23:54 +02:00
transaction => { table => [ qw( type status target_host target_subvol source_host source_subvol parent_subvol ) ],
2015-10-14 17:18:21 +02:00
long => [ qw( localtime type status duration target_host target_subvol source_host source_subvol parent_subvol message ) ],
2015-10-13 18:24:30 +02:00
raw => [ qw( time localtime type status duration target_url source_url parent_url message ) ],
2015-10-14 14:33:53 +02:00
tlog => [ qw( localtime type status duration target_url source_url parent_url message ) ],
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
syslog => [ qw( type status duration target_url source_url parent_url message ) ],
2015-10-13 01:10:06 +02:00
},
2016-04-15 22:00:10 +02:00
origin_tree => { table => [ qw( tree uuid parent_uuid received_uuid ) ],
long => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
raw => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
},
2015-10-13 01:10:06 +02:00
);
2016-03-30 15:32:28 +02:00
my %url_cache; # map URL to btr_tree node
my %fstab_cache; # map HOST to btrfs mount points
my %uuid_cache; # map UUID to btr_tree node
2016-03-30 23:43:41 +02:00
my %realpath_cache; # map URL to realpath (symlink target)
2016-04-13 22:04:53 +02:00
my $tree_inject_id = 0; # fake subvolume id for injected nodes (negative)
2016-04-15 22:00:10 +02:00
my $fake_uuid_prefix = 'XXXXXXXX-XXXX-XXXX-XXXX-'; # plus 0-padded inject_id: XXXXXXXX-XXXX-XXXX-XXXX-000000000000
2016-03-10 05:26:43 +01:00
2015-01-13 14:38:44 +01:00
my $dryrun;
my $loglevel = 1;
2016-04-18 16:40:49 +02:00
my $do_dumper;
2015-08-15 18:23:48 +02:00
my $show_progress = 0;
2015-05-19 18:22:55 +02:00
my $err = "";
2016-03-07 19:17:33 +01:00
my $abrt = ""; # last ABORTED() message
2015-10-13 18:24:30 +02:00
my $output_format;
2016-06-07 16:17:02 +02:00
my $lockfile;
2015-10-13 18:24:30 +02:00
my $tlog_fh;
2016-04-25 19:40:11 +02:00
my $syslog_enabled = 0;
2015-10-20 18:23:54 +02:00
my $current_transaction;
my @transaction_log;
2015-10-23 14:43:36 +02:00
my %config_override;
2016-04-21 13:27:54 +02:00
my @tm_now; # current localtime ( sec, min, hour, mday, mon, year, wday, yday, isdst )
2016-05-10 15:50:33 +02:00
my %warn_once;
2015-01-13 14:38:44 +01:00
$SIG{__DIE__} = sub {
print STDERR "\nERROR: process died unexpectedly (btrbk v$VERSION)";
print STDERR "\nPlease contact the author: $AUTHOR\n\n";
print STDERR "Stack Trace:\n----------------------------------------\n";
Carp::confess @_;
};
2014-12-14 19:23:02 +01:00
2015-10-20 20:17:31 +02:00
$SIG{INT} = sub {
print STDERR "\nERROR: Cought SIGINT, dumping transaction log:\n";
action("signal", status => "SIGINT");
print_formatted("transaction", \@transaction_log, output_format => "tlog", outfile => *STDERR);
exit 1;
};
2014-12-11 18:03:10 +01:00
sub VERSION_MESSAGE
{
2016-04-15 01:22:19 +02:00
print STDERR $VERSION_INFO . "\n\n";
2014-12-11 18:03:10 +01:00
}
sub HELP_MESSAGE
{
2015-10-22 17:45:27 +02:00
print STDERR "usage: btrbk [options] <command> [filter...]\n";
2014-12-11 18:03:10 +01:00
print STDERR "\n";
print STDERR "options:\n";
2015-08-15 17:51:00 +02:00
# "--------------------------------------------------------------------------------"; # 80
print STDERR " -h, --help display this help message\n";
2015-08-27 11:57:58 +02:00
print STDERR " --version display version information\n";
2015-08-15 17:51:00 +02:00
print STDERR " -c, --config=FILE specify configuration file\n";
2016-01-14 15:53:52 +01:00
print STDERR " -n, --dry-run perform a trial run with no changes made\n";
2015-08-15 17:51:00 +02:00
print STDERR " -p, --preserve preserve all backups (do not delete any old targets)\n";
print STDERR " -r, --resume-only resume only (do not create new snapshots, only resume\n";
print STDERR " missing backups)\n";
print STDERR " -v, --verbose be verbose (set loglevel=info)\n";
print STDERR " -q, --quiet be quiet (do not print summary for the \"run\" command)\n";
print STDERR " -l, --loglevel=LEVEL set logging level (warn, info, debug, trace)\n";
2015-10-12 17:13:23 +02:00
print STDERR " -t, --table change output to table format\n";
2015-10-11 20:14:32 +02:00
print STDERR " --format=FORMAT change output format, FORMAT=table|long|raw\n";
2016-04-09 14:16:14 +02:00
print STDERR " --print-schedule print scheduler details (for the \"run\" command)\n";
2015-08-15 18:23:48 +02:00
print STDERR " --progress show progress bar on send-receive operation\n";
2014-12-13 15:15:58 +01:00
print STDERR "\n";
print STDERR "commands:\n";
2016-04-16 01:09:17 +02:00
print STDERR " run perform backup operations as defined in the config\n";
print STDERR " dryrun don't run btrfs commands; show what would be executed\n";
print STDERR " stats print snapshot/backup statistics\n";
print STDERR " list <subcommand> available subcommands are:\n";
print STDERR " backups all backups and corresponding snapshots\n";
print STDERR " snapshots all snapshots and corresponding backups\n";
print STDERR " latest most recent snapshots and backups\n";
print STDERR " config configured source/snapshot/target relations\n";
print STDERR " source configured source/snapshot relations\n";
print STDERR " volume configured volume sections\n";
print STDERR " target configured targets\n";
print STDERR " clean delete incomplete (garbled) backups\n";
print STDERR " archive <src> <dst> recursively copy all subvolumes (experimental)\n";
print STDERR " usage print filesystem usage\n";
print STDERR " origin <subvol> print origin information for subvolume\n";
print STDERR " diff <from> <to> shows new files between related subvolumes\n";
2014-12-11 18:03:10 +01:00
print STDERR "\n";
print STDERR "For additional information, see $PROJECT_HOME\n";
}
2015-10-22 17:45:27 +02:00
2015-04-29 00:34:11 +02:00
sub TRACE { my $t = shift; print STDERR "... $t\n" if($loglevel >= 4); }
sub DEBUG { my $t = shift; print STDERR "$t\n" if($loglevel >= 3); }
sub INFO { my $t = shift; print STDERR "$t\n" if($loglevel >= 2); }
sub WARN { my $t = shift; print STDERR "WARNING: $t\n" if($loglevel >= 1); }
sub ERROR { my $t = shift; print STDERR "ERROR: $t\n"; }
2014-12-11 18:03:10 +01:00
2016-05-10 15:50:33 +02:00
sub WARN_ONCE {
my $t = shift;
if($warn_once{$t}) { DEBUG("WARN AGAIN: " . $t); }
else { $warn_once{$t} = 1; WARN($t); }
}
2016-03-10 18:29:21 +01:00
sub VINFO {
2016-04-28 13:03:15 +02:00
return undef unless($do_dumper);
2016-03-17 14:02:22 +01:00
my $vinfo = shift; my $t = shift || "vinfo"; my $maxdepth = shift // 2;
print STDERR Data::Dumper->new([$vinfo], [$t])->Maxdepth($maxdepth)->Dump();
2016-03-10 18:29:21 +01:00
}
sub SUBVOL_LIST {
2016-04-28 13:03:15 +02:00
return undef unless($do_dumper);
2016-03-10 18:29:21 +01:00
my $vol = shift; my $t = shift // "SUBVOL_LIST"; my $svl = vinfo_subvol_list($vol);
2016-04-12 17:50:12 +02:00
print STDERR "$t:\n " . join("\n ", map { "$vol->{PRINT}/./$_->{SUBVOL_PATH}\t$_->{node}{id}" } @$svl) . "\n";
2016-03-10 18:29:21 +01:00
}
2016-03-10 19:26:17 +01:00
sub URL_CACHE {
print STDERR "URL_CACHE:\n" . join("\n", (sort keys %url_cache)) . "\n";
}
2016-03-10 05:26:43 +01:00
2016-03-07 17:35:17 +01:00
sub ABORTED($;$)
2015-10-12 22:26:36 +02:00
{
my $config = shift;
2016-03-07 19:17:33 +01:00
$abrt = shift;
2016-03-07 23:53:47 +01:00
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
2016-03-07 19:17:33 +01:00
return $config->{ABORTED} unless(defined($abrt));
2016-03-07 17:35:17 +01:00
2016-03-07 19:17:33 +01:00
unless($abrt eq "USER_SKIP") {
$abrt =~ s/\n/\\\\/g;
$abrt =~ s/\r//g;
2015-10-12 22:56:52 +02:00
action("abort_" . ($config->{CONTEXT} || "undef"),
status => "ABORT",
2015-10-12 23:58:38 +02:00
vinfo_prefixed_keys("target", vinfo($config->{url}, $config)),
2016-03-07 19:17:33 +01:00
message => $abrt,
2015-10-12 22:56:52 +02:00
);
}
2016-03-07 19:17:33 +01:00
$abrt = 1 unless($abrt); # make sure $abrt is always a true value
$config->{ABORTED} = $abrt;
2015-10-12 22:26:36 +02:00
}
2014-12-14 19:23:02 +01:00
2016-04-25 21:05:46 +02:00
sub eval_quiet(&)
{
local $SIG{__DIE__};
return eval { $_[0]->() }
}
2015-10-13 18:24:30 +02:00
2016-04-28 13:03:15 +02:00
sub require_data_dumper
{
if(eval_quiet { require Data::Dumper; }) {
Data::Dumper->import("Dumper");
$Data::Dumper::Sortkeys = 1;
$Data::Dumper::Quotekeys = 0;
$do_dumper = 1;
# silence perl warning: Name "Data::Dumper::Sortkeys" used only once: possible typo at...
TRACE "Successfully loaded Dumper module: sortkeys=$Data::Dumper::Sortkeys, quotekeys=$Data::Dumper::Quotekeys";
} else {
WARN "Perl module \"Data::Dumper\" not found: data trace dumps disabled!" if($loglevel >=4);
}
}
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
sub init_transaction_log($$)
2015-10-13 18:24:30 +02:00
{
2016-04-06 20:19:12 +02:00
my $file = shift;
2016-04-25 18:36:15 +02:00
my $config_syslog_facility = shift;
2016-04-06 20:19:12 +02:00
if(defined($file) && (not $dryrun)) {
if(open($tlog_fh, ">> $file")) {
2016-04-28 12:48:58 +02:00
# print headers (disabled)
# print_formatted("transaction", [ ], output_format => "tlog", outfile => $tlog_fh);
2016-04-06 20:19:12 +02:00
INFO "Using transaction log: $file";
} else {
$tlog_fh = undef;
ERROR "Failed to open transaction log '$file': $!";
}
2015-10-13 18:24:30 +02:00
}
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
if(defined($config_syslog_facility) && (not $dryrun)) {
2016-04-25 21:05:46 +02:00
DEBUG "Opening syslog";
if(eval_quiet { require Sys::Syslog; }) {
2016-04-25 19:40:11 +02:00
$syslog_enabled = 1;
Sys::Syslog::openlog("btrbk", "", $config_syslog_facility);
2016-04-25 21:05:46 +02:00
DEBUG "Syslog enabled";
}
else {
WARN "Syslog disabled: $@";
2016-04-25 18:36:15 +02:00
}
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
}
2016-04-15 01:22:19 +02:00
action("startup", status => "v$VERSION", message => "$VERSION_INFO");
2015-10-13 18:24:30 +02:00
}
sub close_transaction_log()
{
if($tlog_fh) {
DEBUG "Closing transaction log";
close $tlog_fh || ERROR "Failed to close transaction log: $!";
}
2016-04-25 19:40:11 +02:00
if($syslog_enabled) {
2016-04-25 21:05:46 +02:00
DEBUG "Closing syslog";
eval_quiet { Sys::Syslog::closelog(); };
2016-04-25 18:36:15 +02:00
}
2015-10-13 18:24:30 +02:00
}
sub action($@)
{
my $type = shift // die;
my $h = { @_ };
2015-10-20 18:23:54 +02:00
my $time = $h->{time} // time;
2015-10-13 18:24:30 +02:00
$h->{type} = $type;
$h->{time} = $time;
2016-04-21 13:27:54 +02:00
$h->{localtime} = timestamp($time, 'debug-iso');
2015-10-20 20:16:34 +02:00
print_formatted("transaction", [ $h ], output_format => "tlog", no_header => 1, outfile => $tlog_fh) if($tlog_fh);
2016-04-25 19:40:11 +02:00
print_formatted("transaction", [ $h ], output_format => "syslog", no_header => 1) if($syslog_enabled); # dirty hack, this calls syslog()
2015-10-20 18:23:54 +02:00
push @transaction_log, $h;
return $h;
}
sub start_transaction($@)
{
my $type = shift // die;
my $time = time;
die("start_transaction() while transaction is running") if($current_transaction);
my @actions = (ref($_[0]) eq "HASH") ? @_ : { @_ }; # single action is not hashref
$current_transaction = [];
foreach (@actions) {
push @$current_transaction, action($type, %$_, status => "starting", time => $time);
}
}
sub end_transaction($$)
{
my $type = shift // die;
my $status = shift // die;
my $time = time;
die("end_transaction() while no transaction is running") unless($current_transaction);
foreach (@$current_transaction) {
die("end_transaction() has different type") unless($_->{type} eq $type);
action($type, %$_, status => $status, duration => ($dryrun ? undef : ($time - $_->{time})));
}
$current_transaction = undef;
2015-10-13 18:24:30 +02:00
}
2016-04-25 19:40:11 +02:00
sub syslog($)
{
return undef unless($syslog_enabled);
2016-04-25 21:05:46 +02:00
my $line = shift;
eval_quiet { Sys::Syslog::syslog("info", $line); };
2016-04-25 19:40:11 +02:00
}
2016-05-03 14:34:04 +02:00
sub check_exe($)
{
my $cmd = shift // die;
foreach my $path (split(":", $ENV{PATH})) {
return 1 if( -x "$path/$cmd" );
}
return 0;
}
2016-04-02 14:13:16 +02:00
sub safe_cmd($)
{
my $aref = shift;
foreach(@$aref) {
unless(/^$safe_cmd_match$/) {
ERROR "Unsafe command `" . join(' ', @$aref) . "` (offending string: \"$_\")";
return undef;
}
}
2016-05-11 20:15:46 +02:00
return join(' ', @$aref);
}
sub compress_cmd($;$)
{
my $def = shift;
my $decompress = shift;
return undef unless(defined($def));
my $cc = $compression{$def->{key}};
my @cmd = $decompress ? @{$cc->{decompress_cmd}} : @{$cc->{compress_cmd}};
if((not $decompress) && defined($def->{level}) && ($def->{level} ne "default")) {
my $level = $def->{level};
if($level < $cc->{level_min}) {
WARN_ONCE "Compression level capped to minimum for '$cc->{name}': $cc->{level_min}";
$level = $cc->{level_min};
}
if($level > $cc->{level_max}) {
WARN_ONCE "Compression level capped to maximum for '$cc->{name}': $cc->{level_max}";
$level = $cc->{level_max};
}
push @cmd, '-' . $level;
}
if(defined($def->{threads}) && ($def->{threads} ne "default")) {
my $thread_opt = $cc->{threads};
if($thread_opt) {
push @cmd, $thread_opt . $def->{threads};
}
else {
WARN_ONCE "Threading is not supported for '$cc->{name}', ignoring";
}
}
return join(' ', @cmd);
}
sub decompress_cmd($)
{
return compress_cmd($_[0], 1);
2016-04-02 14:13:16 +02:00
}
2015-10-13 18:24:30 +02:00
2016-05-11 20:15:46 +02:00
2015-08-07 15:31:05 +02:00
sub run_cmd(@)
2014-12-11 18:03:10 +01:00
{
2016-01-19 17:52:27 +01:00
# shell-based implementation.
# this needs some redirection magic for filter_stderr to work.
# NOTE: multiple filters are not supported!
2016-05-11 20:15:46 +02:00
my @cmd_pipe = (ref($_[0]) eq "HASH") ? @_ : { @_ };
die unless(scalar(@cmd_pipe));
2015-05-19 18:22:55 +02:00
$err = "";
2015-08-07 15:31:05 +02:00
my $destructive = 0;
my $catch_stderr = 0;
my $filter_stderr = undef;
2016-05-11 20:15:46 +02:00
my @cmd_text;
my $compressed = undef;
foreach my $href (@cmd_pipe)
{
$catch_stderr = 1 if($href->{catch_stderr});
$filter_stderr = $href->{filter_stderr} if($href->{filter_stderr}); # NOTE: last filter wins!
$destructive = 1 unless($href->{non_destructive});
if($href->{compress}) {
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$href->{compress}->{key}}->{format})) {
push @cmd_text, { text => decompress_cmd($compressed) };
$compressed = undef;
}
unless($compressed) {
push @cmd_text, { text => compress_cmd($href->{compress}) };
$compressed = $href->{compress};
}
2015-10-20 22:05:02 +02:00
}
2016-05-11 20:15:46 +02:00
elsif($href->{redirect}) {
my $file = safe_cmd([ $href->{redirect} ]);
return undef unless(defined($file));
if($href->{rsh}) {
my $rsh_text = safe_cmd($href->{rsh});
return undef unless(defined($rsh_text));
push @cmd_text, { text => $rsh_text . " 'cat > " . $file . "'" };
}
else {
push @cmd_text, { redirect => $file };
}
}
elsif($href->{cmd}) {
my $sc = safe_cmd($href->{cmd});
return undef unless(defined($sc));
if($href->{rsh}) {
my $rsh_text = safe_cmd($href->{rsh});
return undef unless(defined($rsh_text));
if($href->{rsh_compress_in}) {
if($compressed && ((not $href->{compressed_ok}) ||
($compression{$compressed->{key}}->{format} ne $compression{$href->{rsh_compress_in}->{key}}->{format})))
{
push @cmd_text, { text => decompress_cmd($compressed) };
$compressed = undef;
}
unless($compressed) {
push @cmd_text, { text => compress_cmd($href->{rsh_compress_in}) };
$compressed = $href->{rsh_compress_in};
}
unless($href->{compressed_ok}) {
$sc = decompress_cmd($href->{rsh_compress_in}) . ' | ' . $sc;
$compressed = undef;
}
}
if($href->{rsh_compress_out}) {
$sc .= ' | ' . compress_cmd($href->{rsh_compress_out});
$compressed = $href->{rsh_compress_out};
}
$sc = $rsh_text . " '" . $sc . "'";
2016-01-19 17:52:27 +01:00
}
2016-05-11 20:15:46 +02:00
else {
if($compressed && (not ($href->{compressed_ok}))) {
push @cmd_text, { text => decompress_cmd($compressed) };
$compressed = undef;
}
}
push @cmd_text, { text => $sc,
catch_stderr => $href->{catch_stderr} };
}
}
# cmd result is something like this:
# { btrfs send <src> 2>&3 | pv | btrfs receive <dst> 2>&3 ; } 3>&1
my $cmd_print = "";
my $cmd = "{ ";
my $pipe = "";
foreach (@cmd_text) {
if($_->{redirect}) {
die unless($pipe);
$cmd_print .= ' > ' . $_->{redirect};
$cmd .= ' > ' . $_->{redirect};
$pipe = undef; # this dies if it is not last command
} else {
$cmd_print .= $pipe . $_->{text};
$cmd .= $pipe . $_->{text};
$cmd .= ' 2>&3' if($_->{catch_stderr});
$pipe = ' | ';
2016-01-19 17:52:27 +01:00
}
}
2016-05-11 20:15:46 +02:00
$cmd .= ' ; } 3>&1';
if($catch_stderr) {
if(scalar(@cmd_text) == 1) {
# no pipes, simply redirect stderr to stdout
$cmd = $cmd_print . ' 2>&1';
}
}
else {
$cmd = $cmd_print;
}
2016-01-19 17:52:27 +01:00
# hide redirection magic from debug output
if($dryrun && $destructive) {
DEBUG "### (dryrun) $cmd_print";
return "";
2015-10-20 22:05:02 +02:00
}
2016-01-19 17:52:27 +01:00
DEBUG "### $cmd_print";
2015-10-20 22:05:02 +02:00
2016-01-19 17:52:27 +01:00
# execute command and parse output
TRACE "Executing command: $cmd";
2015-08-07 15:31:05 +02:00
my $ret = "";
$ret = `$cmd`;
chomp($ret);
TRACE "Command output:\n$ret";
if($?) {
my $exitcode= $? >> 8;
my $signal = $? & 127;
DEBUG "Command execution failed (exitcode=$exitcode" . ($signal ? ", signal=$signal" : "") . "): \"$cmd\"";
if($catch_stderr) {
$_ = $ret;
&{$filter_stderr} ($cmd) if($filter_stderr);
2016-01-19 17:52:27 +01:00
ERROR "[$cmd_print] $_" if($_);
2015-01-14 14:10:41 +01:00
}
2015-08-07 15:31:05 +02:00
return undef;
2015-01-14 14:10:41 +01:00
}
else {
2015-08-07 15:31:05 +02:00
DEBUG "Command execution successful";
2014-12-11 18:03:10 +01:00
}
return $ret;
}
2014-12-13 13:52:43 +01:00
2016-03-23 11:58:23 +01:00
sub add_pv_command($@)
{
my $cmd_pipe = shift || die;
my %opts = @_;
my $rate_limit = $opts{rate_limit};
if($opts{show_progress}) {
if($rate_limit) {
2016-05-11 20:15:46 +02:00
push @$cmd_pipe, { cmd => [ 'pv', '-trab', '-L', $rate_limit ], compressed_ok => 1 };
2016-03-23 11:58:23 +01:00
} else {
2016-05-11 20:15:46 +02:00
push @$cmd_pipe, { cmd => [ 'pv', '-trab' ], compressed_ok => 1 };
2016-03-23 11:58:23 +01:00
}
}
elsif($rate_limit) {
2016-05-11 20:15:46 +02:00
push @$cmd_pipe, { cmd => [ 'pv', '-q', '-L', $rate_limit ], compressed_ok => 1 };
2016-03-23 11:58:23 +01:00
}
}
2016-03-15 11:21:59 +01:00
sub btrfs_filesystem_show_all_local()
2015-04-14 02:17:17 +02:00
{
2016-03-15 11:21:59 +01:00
return run_cmd( cmd => [ qw(btrfs filesystem show) ],
non_destructive => 1
);
}
2015-04-14 02:17:17 +02:00
2015-04-16 12:00:04 +02:00
2016-03-15 11:21:59 +01:00
sub btrfs_filesystem_show($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
return run_cmd( cmd => [ qw(btrfs filesystem show), $path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1
);
}
2016-03-09 19:52:45 +01:00
2016-03-15 11:21:59 +01:00
sub btrfs_filesystem_df($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
return run_cmd( cmd => [qw(btrfs filesystem df), $path],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1
);
}
sub btrfs_filesystem_usage($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
my $ret = run_cmd( cmd => [ qw(btrfs filesystem usage), $path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1
);
return undef unless(defined($ret));
my %detail;
foreach (split("\n", $ret)) {
if(/^\s+Device size:\s+(\S+)/) {
$detail{size} = $1;
}
elsif(/^\s+Device allocated:\s+(\S+)/) {
$detail{device_allocated} = $1;
}
elsif(/^\s+Device unallocated:\s+(\S+)/) {
$detail{device_unallocated} = $1;
}
elsif(/^\s+Device missing:\s+(\S+)/) {
$detail{device_missing} = $1;
}
elsif(/^\s+Used:\s+(\S+)/) {
$detail{used} = $1;
}
elsif(/^\s+Free \(estimated\):\s+(\S+)\s+\(min: (\S+)\)/) {
$detail{free} = $1;
$detail{free_min} = $2;
}
elsif(/^\s+Data ratio:\s+(\S+)/) {
$detail{data_ratio} = $1;
}
elsif(/^\s+Metadata ratio:\s+(\S+)/) {
$detail{metadata_ratio} = $1;
}
elsif(/^\s+Used:\s+(\S+)/) {
$detail{used} = $1;
}
elsif(/^\s+Global reserve:\s+(\S+)\s+\(used: (\S+)\)/) {
$detail{global_reserve} = $1;
$detail{global_reserve_used} = $2;
}
else {
TRACE "Failed to parse filesystem usage line \"$_\" for: $vol->{PRINT}";
}
}
DEBUG "Parsed " . scalar(keys %detail) . " filesystem usage detail items: $vol->{PRINT}";
2016-04-18 16:40:49 +02:00
TRACE(Data::Dumper->Dump([\%detail], ["btrfs_filesystem_usage($vol->{URL})"])) if($do_dumper);
2016-03-15 11:21:59 +01:00
return \%detail;
}
# returns hashref with keys: (name uuid parent_uuid id gen cgen top_level)
# for btrfs-progs >= 4.1, also returns key: "received_uuid"
sub btrfs_subvolume_show($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
my $ret = run_cmd(cmd => [ qw(btrfs subvolume show), $path],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
catch_stderr => 1, # hack for shell-based run_cmd()
filter_stderr => sub {
if(/ssh command rejected/) {
# catch errors from ssh_filter_btrbk.sh
$err = "ssh command rejected (please fix ssh_filter_btrbk.sh)";
}
elsif(/^ERROR: (.*)/) {
# catch errors from btrfs command
$err = $1;
}
else {
DEBUG "Unparsed error: $_";
$err = $_;
}
# consume stderr line, as $err will be displayed as a user-friendly WARNING
$_ = undef;
}
);
return undef unless(defined($ret));
my $real_path;
if($ret =~ /^($file_match)/) {
$real_path = $1;
2016-04-25 14:23:15 +02:00
$real_path = check_file($real_path, { absolute => 1 });
return undef unless(defined($real_path));
2016-03-15 11:21:59 +01:00
DEBUG "Real path for subvolume \"$vol->{PRINT}\" is: $real_path" if($real_path ne $path);
2016-03-30 23:43:41 +02:00
$realpath_cache{$vol->{URL}} = $real_path if($real_path ne $path);
2016-03-15 11:21:59 +01:00
}
else {
$real_path = $path;
WARN "No real path provided by \"btrfs subvolume show\" for subvolume \"$vol->{PRINT}\", using: $path";
}
my %detail = ( REAL_PATH => $real_path );
if($ret =~ /^\Q$real_path\E is (btrfs root|toplevel subvolume)/) {
# btrfs-progs < 4.4 prints: "<subvol> is btrfs root"
# btrfs-progs >= 4.4 prints: "<subvol> is toplevel subvolume"
DEBUG "found btrfs root: $vol->{PRINT}";
$detail{id} = 5;
$detail{is_root} = 1;
}
2016-04-22 20:17:03 +02:00
elsif($ret =~ /^\Q$real_path\E/) {
2016-03-15 11:21:59 +01:00
TRACE "btr_detail: found btrfs subvolume: $vol->{PRINT}";
# NOTE: received_uuid is not required here, as btrfs-progs < 4.1 does not give us that information.
# no worries, we get this from btrfs_subvolume_list() for all subvols.
my @required_keys = qw(name uuid parent_uuid id gen cgen top_level);
my %trans = (
"Name" => "name",
"uuid" => "uuid",
"UUID" => "uuid", # btrfs-progs >= 4.1
"Parent uuid" => "parent_uuid",
"Parent UUID" => "parent_uuid", # btrfs-progs >= 4.1
"Received UUID" => "received_uuid", # btrfs-progs >= 4.1
"Creation time" => "creation_time",
"Object ID" => "id",
"Subvolume ID" => "id", # btrfs-progs >= 4.1
"Generation (Gen)" => "gen",
"Generation" => "gen", # btrfs-progs >= 4.1
"Gen at creation" => "cgen",
"Parent" => "parent_id",
"Parent ID" => "parent_id", # btrfs-progs >= 4.1
"Top Level" => "top_level",
"Top level ID" => "top_level", # btrfs-progs >= 4.1
"Flags" => "flags",
2015-04-16 12:00:04 +02:00
);
2016-03-15 11:21:59 +01:00
foreach (split("\n", $ret)) {
next unless /^\s+(.+):\s+(.*)$/;
my ($key, $value) = ($1, $2);
if($trans{$key}) {
$detail{$trans{$key}} = $value;
2016-03-09 19:52:45 +01:00
} else {
2016-03-15 11:21:59 +01:00
WARN "Failed to parse subvolume detail \"$key: $value\" for: $vol->{PRINT}";
}
}
DEBUG "Parsed " . scalar(keys %detail) . " subvolume detail items: $vol->{PRINT}";
2016-04-28 13:03:15 +02:00
VINFO(\%detail, "detail") if($loglevel >=4);
2016-03-15 11:21:59 +01:00
foreach(@required_keys) {
unless(defined($detail{$_})) {
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
return undef;
2016-03-09 19:52:45 +01:00
}
}
2015-04-14 02:17:17 +02:00
}
2016-03-15 11:21:59 +01:00
else {
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
return undef;
2016-03-09 19:52:45 +01:00
}
2016-03-15 11:21:59 +01:00
return \%detail;
}
2015-04-14 16:03:31 +02:00
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_list_readonly_flag($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
my $ret = run_cmd(cmd => [ qw(btrfs subvolume list), '-a', '-r', $path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
);
return undef unless(defined($ret));
my %ro;
foreach (split(/\n/, $ret))
{
die("Failed to parse line: \"$_\"") unless(/^ID ([0-9]+) gen [0-9]+ top level [0-9]+ path /);
$ro{$1} = 1;
}
DEBUG "Parsed " . scalar(keys %ro) . " readonly subvolumes for filesystem at: $vol->{PRINT}";
return \%ro;
2015-04-14 02:17:17 +02:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_list($;@)
{
my $vol = shift || die;
my %opts = @_;
my $path = $vol->{PATH} // die; # deliberately NOT using REAL_PATH here!
my @filter_options = ('-a');
push(@filter_options, '-o') if($opts{subvol_only});
2016-04-15 01:22:19 +02:00
# NOTE: btrfs-progs <= 3.17 do NOT support the '-R' flag.
# NOTE: Support for btrfs-progs <= 3.17 has been dropped in
# btrbk-0.23, the received_uuid flag very essential!
my @display_options = ('-c', '-u', '-q', '-R');
2016-03-15 11:21:59 +01:00
my $ret = run_cmd(cmd => [ qw(btrfs subvolume list), @filter_options, @display_options, $path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
);
return undef unless(defined($ret));
my @nodes;
foreach (split(/\n/, $ret))
{
# ID <ID> top level <ID> path <path> where path is the relative path
# of the subvolume to the top level subvolume. The subvolume?s ID may
# be used by the subvolume set-default command, or at mount time via
# the subvolid= option. If -p is given, then parent <ID> is added to
# the output between ID and top level. The parent?s ID may be used at
# mount time via the subvolrootid= option.
2016-04-15 01:22:19 +02:00
# NOTE: btrfs-progs prior to v3.17 do not support the -R flag (unsupported since
2016-03-15 11:21:59 +01:00
my %node;
2016-04-15 01:22:19 +02:00
unless(/^ID ([0-9]+) gen ([0-9]+) cgen ([0-9]+) top level ([0-9]+) parent_uuid ([0-9a-z-]+) received_uuid ([0-9a-z-]+) uuid ([0-9a-z-]+) path (.+)$/) {
ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
DEBUG "Offending line: $_";
return undef;
}
%node = (
2016-03-15 11:21:59 +01:00
id => $1,
gen => $2,
cgen => $3,
top_level => $4,
parent_uuid => $5, # note: parent_uuid="-" if no parent
received_uuid => $6,
uuid => $7,
path => $8 # btrfs path, NOT filesystem path
);
# NOTE: "btrfs subvolume list <path>" prints <FS_TREE> prefix only if
# the subvolume is reachable within <path>. (as of btrfs-progs-3.18.2)
#
# NOTE: Be prepared for this to change in btrfs-progs!
$node{path} =~ s/^<FS_TREE>\///; # remove "<FS_TREE>/" portion from "path".
push @nodes, \%node;
}
DEBUG "Parsed " . scalar(@nodes) . " total subvolumes for filesystem at: $vol->{PRINT}";
# fetch readonly flag
# NOTE: the only way to get "readonly" flag is via a second call to "btrfs subvol list" with the "-r" option (as of btrfs-progs v4.3.1)
my $ro = btrfs_subvolume_list_readonly_flag($vol);
return undef unless(defined($ro));
foreach (@nodes) {
$_->{readonly} = $ro->{$_->{id}} // 0;
}
2016-03-07 17:35:17 +01:00
2016-03-15 11:21:59 +01:00
return \@nodes;
2016-03-07 17:35:17 +01:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_find_new($$;$)
2015-04-16 12:00:04 +02:00
{
2016-03-15 11:21:59 +01:00
my $vol = shift || die;
my $path = $vol->{PATH} // die;
my $lastgen = shift // die;
my $ret = run_cmd(cmd => [ qw(btrfs subvolume find-new), $path, $lastgen ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
);
unless(defined($ret)) {
ERROR "Failed to fetch modified files for: $vol->{PRINT}";
return undef;
}
2015-04-16 12:00:04 +02:00
2016-03-15 11:21:59 +01:00
my %files;
my $parse_errors = 0;
my $transid_marker;
foreach (split(/\n/, $ret))
2015-04-19 11:36:40 +02:00
{
2016-03-15 11:21:59 +01:00
if(/^inode \S+ file offset (\S+) len (\S+) disk start \S+ offset \S+ gen (\S+) flags (\S+) (.+)$/) {
my $file_offset = $1;
my $len = $2;
my $gen = $3;
my $flags = $4;
my $name = $5;
$files{$name}->{len} += $len;
$files{$name}->{new} = 1 if($file_offset == 0);
$files{$name}->{gen}->{$gen} = 1; # count the generations
if($flags eq "COMPRESS") {
$files{$name}->{flags}->{compress} = 1;
}
elsif($flags eq "COMPRESS|INLINE") {
$files{$name}->{flags}->{compress} = 1;
$files{$name}->{flags}->{inline} = 1;
}
elsif($flags eq "INLINE") {
$files{$name}->{flags}->{inline} = 1;
}
elsif($flags eq "NONE") {
}
else {
WARN "unparsed flags: $flags";
}
}
elsif(/^transid marker was (\S+)$/) {
$transid_marker = $1;
}
else {
$parse_errors++;
}
2015-04-16 12:00:04 +02:00
}
2016-03-15 11:21:59 +01:00
return { files => \%files,
transid_marker => $transid_marker,
parse_errors => $parse_errors,
};
2015-04-19 11:36:40 +02:00
}
2015-04-16 12:00:04 +02:00
2015-04-19 11:36:40 +02:00
2016-03-15 11:21:59 +01:00
# returns $target, or undef on error
sub btrfs_subvolume_snapshot($$)
2015-10-12 23:58:38 +02:00
{
2016-03-15 11:21:59 +01:00
my $svol = shift || die;
my $target_vol = shift // die;
my $target_path = $target_vol->{PATH} // die;
my $src_path = $svol->{PATH} // die;
2016-04-16 21:08:07 +02:00
INFO "[snapshot] source: $svol->{PRINT}";
INFO "[snapshot] target: $target_vol->{PRINT}";
2016-03-15 11:21:59 +01:00
start_transaction("snapshot",
vinfo_prefixed_keys("target", $target_vol),
vinfo_prefixed_keys("source", $svol),
);
my $ret = run_cmd(cmd => [ qw(btrfs subvolume snapshot), '-r', $src_path, $target_path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($svol),
2016-03-15 11:21:59 +01:00
);
end_transaction("snapshot", ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")));
2016-04-12 17:50:12 +02:00
unless(defined($ret)) {
ERROR "Failed to create btrfs subvolume snapshot: $svol->{PRINT} -> $target_path";
return undef;
}
return $target_vol;
2015-10-12 23:58:38 +02:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_delete($@)
2015-01-09 18:09:32 +01:00
{
2016-03-15 11:21:59 +01:00
my $targets = shift // die;
2015-09-10 11:22:19 +02:00
my %opts = @_;
2016-03-15 11:21:59 +01:00
my $commit = $opts{commit};
die if($commit && ($commit ne "after") && ($commit ne "each"));
$targets = [ $targets ] unless(ref($targets) eq "ARRAY");
return 0 unless(scalar(@$targets));
2016-05-10 15:51:44 +02:00
my $rsh = vinfo_rsh($targets->[0]);
2016-03-15 11:21:59 +01:00
my $rsh_host_check = $targets->[0]->{HOST} || "";
foreach (@$targets) {
# make sure all targets share same HOST
my $host = $_->{HOST} || "";
die if($rsh_host_check ne $host);
2015-01-09 18:09:32 +01:00
}
2016-04-16 21:08:07 +02:00
INFO "[delete] options: commit-$commit" if($commit);
INFO "[delete] target: $_->{PRINT}" foreach(@$targets);
2016-03-15 11:21:59 +01:00
my @options;
@options = ("--commit-$commit") if($commit);
my @target_paths = map( { $_->{PATH} } @$targets);
start_transaction($opts{type} // "delete",
map( { { vinfo_prefixed_keys("target", $_) }; } @$targets)
);
my $ret = run_cmd(cmd => [ qw(btrfs subvolume delete), @options, @target_paths ],
rsh => $rsh,
);
end_transaction($opts{type} // "delete", ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")));
ERROR "Failed to delete btrfs subvolumes: " . join(' ', map( { $_->{PRINT} } @$targets)) unless(defined($ret));
return defined($ret) ? scalar(@$targets) : undef;
2015-01-09 18:09:32 +01:00
}
2016-03-23 11:58:23 +01:00
sub btrfs_send_receive($$$$;@)
2016-03-08 15:25:35 +01:00
{
2016-03-15 11:21:59 +01:00
my $snapshot = shift || die;
my $target = shift || die;
my $parent = shift;
my $ret_vol_received = shift;
2016-03-23 11:58:23 +01:00
my %opts = @_;
2016-03-15 11:21:59 +01:00
my $snapshot_path = $snapshot->{PATH} // die;
my $target_path = $target->{PATH} // die;
my $parent_path = $parent ? $parent->{PATH} : undef;
my $vol_received = vinfo_child($target, $snapshot->{NAME});
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
2016-04-23 14:58:08 +02:00
print STDOUT "Creating backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
2016-03-15 11:21:59 +01:00
2016-04-16 21:08:07 +02:00
INFO "[send/receive] source: $snapshot->{PRINT}";
INFO "[send/receive] parent: $parent->{PRINT}" if($parent);
INFO "[send/receive] target: $vol_received->{PRINT}";
2016-03-15 11:21:59 +01:00
my @send_options;
my @receive_options;
push(@send_options, '-p', $parent_path) if($parent_path);
# push(@send_options, '-v') if($loglevel >= 3);
# push(@receive_options, '-v') if($loglevel >= 3);
my @cmd_pipe;
push @cmd_pipe, {
cmd => [ qw(btrfs send), @send_options, $snapshot_path ],
2016-05-11 20:15:46 +02:00
rsh => vinfo_rsh($snapshot, disable_compression => config_compress_hash($snapshot, "stream_compress")),
rsh_compress_out => config_compress_hash($snapshot, "stream_compress"),
2016-03-15 11:21:59 +01:00
name => "btrfs send",
catch_stderr => 1, # hack for shell-based run_cmd()
};
2016-03-23 11:58:23 +01:00
add_pv_command(\@cmd_pipe, show_progress => $show_progress, rate_limit => $opts{rate_limit});
2016-03-15 11:21:59 +01:00
push @cmd_pipe, {
cmd => [ qw(btrfs receive), @receive_options, $target_path . '/' ],
2016-05-11 20:15:46 +02:00
rsh => vinfo_rsh($target, disable_compression => config_compress_hash($target, "stream_compress")),
2016-03-15 11:21:59 +01:00
name => "btrfs receive",
2016-05-11 20:15:46 +02:00
rsh_compress_in => config_compress_hash($target, "stream_compress"),
2016-03-15 11:21:59 +01:00
catch_stderr => 1, # hack for shell-based run_cmd()
filter_stderr => sub { $err = $_; $_ = undef }
};
my $send_receive_error = 0;
start_transaction("send-receive",
vinfo_prefixed_keys("target", $vol_received),
vinfo_prefixed_keys("source", $snapshot),
vinfo_prefixed_keys("parent", $parent),
);
my $ret = run_cmd(@cmd_pipe);
unless(defined($ret)) {
$send_receive_error = 1;
$ret = $err; # print the errors below
}
if(defined($ret)) {
# NOTE: if "btrfs send" fails, "btrfs receive" returns 0! so we need to parse the output...
foreach(split("\n", $ret)) {
if(/^ERROR: /) {
ERROR $';
$send_receive_error = 1;
}
elsif(/^WARNING: /) {
2016-04-16 21:08:07 +02:00
WARN "[send/receive] (send=$snapshot_path, receive=$target_path) $'";
2016-03-15 11:21:59 +01:00
}
else {
2016-04-16 21:08:07 +02:00
WARN "[send/receive] (send=$snapshot_path, receive=$target_path) $_" if($send_receive_error);
2016-03-15 11:21:59 +01:00
}
}
}
2016-03-08 15:25:35 +01:00
2016-03-15 11:21:59 +01:00
end_transaction("send-receive", ($dryrun ? "DRYRUN" : ($send_receive_error ? "ERROR" : "success")));
2016-03-08 15:25:35 +01:00
2015-10-10 21:26:59 +02:00
2016-03-15 11:21:59 +01:00
if($send_receive_error) {
ERROR "Failed to send/receive btrfs subvolume: $snapshot->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $target->{PRINT}";
# NOTE: btrfs-progs v3.19.1 does not delete garbled received subvolume,
# we need to do this by hand.
# TODO: remove this as soon as btrfs-progs handle receive errors correctly.
DEBUG "send/received failed, deleting (possibly present and garbled) received subvolume: $vol_received->{PRINT}";
my $ret = btrfs_subvolume_delete($vol_received, commit => "after", type => "delete_garbled");
if(defined($ret)) {
WARN "Deleted partially received (garbled) subvolume: $vol_received->{PRINT}";
2015-10-10 21:26:59 +02:00
}
2016-03-15 11:21:59 +01:00
else {
WARN "Deletion of partially received (garbled) subvolume failed, assuming clean environment: $vol_received->{PRINT}";
2015-10-10 21:26:59 +02:00
}
2016-03-15 11:21:59 +01:00
return undef;
2015-10-10 21:26:59 +02:00
}
2016-03-15 11:21:59 +01:00
return 1;
2015-10-10 21:26:59 +02:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_send_to_file($$$$;@)
2015-01-16 17:29:04 +01:00
{
2016-03-15 11:21:59 +01:00
my $source = shift || die;
my $target = shift || die;
my $parent = shift;
my $ret_vol_received = shift;
my %opts = @_;
my $source_path = $source->{PATH} // die;
my $target_path = $target->{PATH} // die;
my $parent_path = $parent ? $parent->{PATH} : undef;
my $parent_uuid = $parent ? $parent->{node}{uuid} : undef ;
my $received_uuid = $source->{node}{uuid};
die unless($received_uuid);
die if($parent && !$parent_uuid);
2015-01-16 17:29:04 +01:00
2016-03-15 11:21:59 +01:00
my $target_filename = $source->{NAME} || die;
$target_filename .= "--$received_uuid";
$target_filename .= '@' . $parent_uuid if($parent_uuid);
$target_filename .= ".btrfs";
my @send_options;
push(@send_options, '-v') if($loglevel >= 3);
push(@send_options, '-p', $parent_path) if($parent_path);
my @cmd_pipe;
push @cmd_pipe, {
cmd => [ qw(btrfs send), @send_options, $source_path ],
2016-05-11 20:15:46 +02:00
rsh => vinfo_rsh($source, disable_compression => config_compress_hash($source, "stream_compress")),
2016-03-15 11:21:59 +01:00
name => "btrfs send",
2016-05-11 20:15:46 +02:00
rsh_compress_out => config_compress_hash($source, "stream_compress"),
2016-03-15 11:21:59 +01:00
};
2016-03-23 11:58:23 +01:00
add_pv_command(\@cmd_pipe, show_progress => $show_progress, rate_limit => $opts{rate_limit});
2016-03-15 11:21:59 +01:00
if($opts{compress}) {
2016-05-11 20:15:46 +02:00
$target_filename .= '.' . $compression{$opts{compress}->{key}}->{format};
push @cmd_pipe, { compress => $opts{compress} };
2015-01-16 17:29:04 +01:00
}
2016-03-15 11:21:59 +01:00
if($opts{encrypt}) {
die unless($opts{encrypt}->{type} eq "gpg");
$target_filename .= '.gpg';
2016-08-06 09:23:57 +02:00
my @gpg_options = ( '--batch', '--no-tty', '--trust-model', 'always', '--compress-algo', 'none' );
2016-03-15 11:21:59 +01:00
push(@gpg_options, ( '--no-default-keyring', '--keyring', $opts{encrypt}->{keyring} )) if($opts{encrypt}->{keyring});
push(@gpg_options, ( '--default-recipient', $opts{encrypt}->{recipient} )) if($opts{encrypt}->{recipient});
push @cmd_pipe, {
cmd => [ 'gpg', @gpg_options, '--encrypt' ],
name => 'gpg',
2016-05-11 20:15:46 +02:00
compressed_ok => 1,
2016-03-15 11:21:59 +01:00
};
2015-01-16 17:29:04 +01:00
}
2016-03-15 11:21:59 +01:00
push @cmd_pipe, {
2016-05-11 20:15:46 +02:00
redirect => "${target_path}/${target_filename}.part",
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($target),
2016-05-11 20:15:46 +02:00
compressed_ok => 1,
2016-03-15 11:21:59 +01:00
};
my $vol_received = vinfo_child($target, $target_filename);
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
2016-04-23 14:58:08 +02:00
print STDOUT "Creating raw backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
2016-03-15 11:21:59 +01:00
2016-04-16 21:08:07 +02:00
INFO "[send-to-raw] source: $source->{PRINT}";
INFO "[send-to-raw] parent: $parent->{PRINT}" if($parent);
INFO "[send-to-raw] target: $vol_received->{PRINT}";
2016-03-15 11:21:59 +01:00
start_transaction("send-to-raw",
vinfo_prefixed_keys("target", $vol_received),
vinfo_prefixed_keys("source", $source),
vinfo_prefixed_keys("parent", $parent),
);
my $ret = run_cmd(@cmd_pipe);
if(defined($ret)) {
# Test target file for "exists and size > 0" after writing,
2016-05-11 20:15:46 +02:00
# as we can not rely on the exit status of the command pipe,
# and the shell command always creates the target file.
2016-03-22 19:05:12 +01:00
DEBUG "Testing target file (non-zero size): $target->{PRINT}.part";
2016-03-15 11:21:59 +01:00
$ret = run_cmd({
2016-03-22 19:05:12 +01:00
cmd => ['test', '-s', "${target_path}/${target_filename}.part"],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($target),
2016-03-15 11:21:59 +01:00
name => "test",
});
2016-03-22 19:05:12 +01:00
if(defined($ret)) {
DEBUG "Renaming target file (remove postfix '.part'): $target->{PRINT}";
$ret = run_cmd({
cmd => ['mv', "${target_path}/${target_filename}.part", "${target_path}/${target_filename}"],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($target),
2016-03-22 19:05:12 +01:00
name => "mv",
});
}
2016-03-15 11:21:59 +01:00
}
end_transaction("send-to-raw", ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")));
unless(defined($ret)) {
ERROR "Failed to send btrfs subvolume to raw file: $source->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $vol_received->{PRINT}";
2015-10-23 21:28:58 +02:00
return undef;
}
2015-04-07 11:52:45 +02:00
return 1;
2015-01-16 17:29:04 +01:00
}
2016-03-30 15:32:28 +02:00
sub system_list_mounts($)
{
my $vol = shift // die;
my $file = '/proc/self/mounts';
my $ret = run_cmd(cmd => [ qw(cat), $file ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-30 15:32:28 +02:00
non_destructive => 1,
catch_stderr => 1, # hack for shell-based run_cmd()
);
return undef unless(defined($ret));
my @mounts;
foreach (split(/\n/, $ret))
{
# from fstab(5)
unless(/^(\S+) (\S+) (\S+) (\S+) (\S+) (\S+)$/) {
ERROR "Failed to parse \"$file\" on " . ($vol->{HOST} || "localhost");
DEBUG "Offending line: $_";
return undef;
}
my %line = (
spec => $1,
file => $2,
vfstype => $3,
mntops => $4,
freq => $5,
passno => $6,
);
foreach (split(',', $line{mntops})) {
if(/^(.+?)=(.+)$/) {
$line{MNTOPS}->{$1} = $2;
} else {
$line{MNTOPS}->{$_} = 1;
}
}
push @mounts, \%line;
}
2016-04-18 16:40:49 +02:00
# TRACE(Data::Dumper->Dump([\@mounts], ["mounts"])) if($do_dumper);
2016-03-30 15:32:28 +02:00
return \@mounts;
}
sub system_realpath($)
{
my $vol = shift // die;
my $path = $vol->{PATH} // die;;
my @quiet = ($loglevel < 3) ? ('-q') : ();
2016-06-18 13:04:43 +02:00
my $ret = run_cmd(cmd => [ qw(readlink), '-e', @quiet, $path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-30 15:32:28 +02:00
non_destructive => 1,
);
return undef unless(defined($ret));
2016-04-02 14:10:35 +02:00
unless($ret =~ /^($file_match)$/) {
2016-03-30 15:32:28 +02:00
ERROR "Failed to parse output of `realpath` for \"$vol->{PRINT}\": \"$ret\"";
return undef;
}
2016-04-02 14:10:35 +02:00
my $realpath = $1; # untaint argument
DEBUG "Real path for \"$vol->{PRINT}\" is: $realpath";
return $realpath;
2016-03-30 15:32:28 +02:00
}
2016-04-14 18:46:35 +02:00
sub system_mkdir($)
{
my $vol = shift // die;
my $path = $vol->{PATH} // die;;
INFO "Creating directory: $vol->{PRINT}/";
my $ret = run_cmd(cmd => [ qw(mkdir), '-p', $path ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-04-14 18:46:35 +02:00
);
action("mkdir",
vinfo_prefixed_keys("target", $vol),
status => ($dryrun ? "DRYRUN" : (defined($ret) ? "success" : "ERROR")),
);
return undef unless(defined($ret));
return 1;
}
2016-03-30 15:32:28 +02:00
sub btrfs_mountpoint($)
{
my $vol = shift // die;
DEBUG "Resolving btrfs mount point for: $vol->{PRINT}";
my $host = $vol->{HOST} || "localhost";
my $mounts = $fstab_cache{$host};
TRACE "fstab_cache " . ($mounts ? "HIT" : "MISS") . ": $host";
# get real path
2016-03-30 23:43:41 +02:00
my $path = $realpath_cache{$vol->{URL}};
unless($path) {
2016-03-30 15:32:28 +02:00
$path = system_realpath($vol);
2016-03-30 23:43:41 +02:00
$realpath_cache{$vol->{URL}} = $path;
2016-03-30 15:32:28 +02:00
}
return (undef, undef, undef) unless($path);
unless($mounts) {
$mounts = [];
my $all_mounts = system_list_mounts($vol);
foreach my $mnt (@$all_mounts) {
if($mnt->{vfstype} ne 'btrfs') {
TRACE "non-btrfs mount point: $mnt->{spec} $mnt->{file} $mnt->{vfstype}";
next;
}
my $file = $mnt->{file} // die;
unless($file =~ /^$file_match$/) {
WARN "Skipping non-parseable file in btrfs mounts of $host: \"$file\"";
next;
}
2016-04-14 18:21:00 +02:00
TRACE "btrfs mount point (spec=$mnt->{spec}, subvolid=" . ($mnt->{MNTOPS}->{subvolid} // '<undef>') . "): $file";
2016-03-30 15:32:28 +02:00
push @$mounts, $mnt;
}
$fstab_cache{$host} = $mounts;
}
# find longest match
$path .= '/' unless($path =~ /\/$/); # correctly handle root path="/"
my $len = 0;
my $longest_match;
foreach(@$mounts) {
my $mnt_path = $_->{file};
$mnt_path .= '/' unless($mnt_path =~ /\/$/); # correctly handle root path="/"
$longest_match = $_ if((length($mnt_path) > $len) && ($path =~ /^\Q$mnt_path\E/));
}
unless($longest_match) {
DEBUG "No btrfs mount point found for: $vol->{PRINT}";
return (undef, undef, undef);
}
2016-04-14 18:21:00 +02:00
DEBUG "Found btrfs mount point for \"$vol->{PRINT}\": $longest_match->{file} (subvolid=" . ($longest_match->{MNTOPS}->{subvolid} // '<undef>') . ")";
2016-03-30 15:32:28 +02:00
return ($longest_match->{file}, $path, $longest_match->{MNTOPS}->{subvolid});
}
2016-03-15 11:21:59 +01:00
sub btr_tree($$)
2015-10-23 14:43:36 +02:00
{
2016-03-15 11:21:59 +01:00
my $vol = shift;
my $vol_root_id = shift || die;
2016-04-14 18:21:00 +02:00
die unless($vol_root_id >= 5);
2016-03-15 11:21:59 +01:00
# NOTE: we need an ID (provided by btrfs_subvolume_show()) in order
# to determine the anchor to our root path (since the subvolume path
# output of "btrfs subvolume list" is ambigous, and the uuid of the
# btrfs root node cannot be resolved).
2015-10-23 14:43:36 +02:00
2016-03-15 11:21:59 +01:00
# man btrfs-subvolume:
# Also every btrfs filesystem has a default subvolume as its initially
# top-level subvolume, whose subvolume id is 5(FS_TREE).
my %tree = ( id => 5,
is_root => 1,
SUBTREE => []
);
my %id = ( 5 => \%tree );
2015-10-23 14:43:36 +02:00
2016-03-15 11:21:59 +01:00
$tree{TREE_ROOT} = \%tree;
2016-04-05 16:37:23 +02:00
$tree{ID_HASH} = \%id;
2015-10-23 14:43:36 +02:00
2016-03-15 11:21:59 +01:00
my $node_list = btrfs_subvolume_list($vol);
return undef unless(ref($node_list) eq "ARRAY");
2016-04-05 16:37:23 +02:00
my $vol_root;
2016-02-29 23:19:55 +01:00
2016-03-15 11:21:59 +01:00
TRACE "btr_tree: processing subvolume list of: $vol->{PRINT}";
2016-02-29 23:19:55 +01:00
2016-04-05 16:37:23 +02:00
# check if we already know this tree
if((scalar @$node_list) && $uuid_cache{$node_list->[0]->{uuid}}) {
TRACE "uuid_cache HIT: $node_list->[0]->{uuid}";
$vol_root = $uuid_cache{$node_list->[0]->{uuid}}->{TREE_ROOT}->{ID_HASH}->{$vol_root_id};
die "Duplicate UUID on different file systems" unless($vol_root);
TRACE "btr_tree: returning already parsed tree at id=$vol_root->{id}";
return $vol_root;
}
# fill ID_HASH and uuid_cache
2016-04-12 17:50:12 +02:00
my $gen_max = 0;
2016-03-15 11:21:59 +01:00
foreach my $node (@$node_list)
{
2016-04-12 17:50:12 +02:00
die unless($node->{id} >= 0);
2016-03-15 11:21:59 +01:00
die if exists($id{$node->{id}});
$node->{SUBTREE} //= [];
$id{$node->{id}} = $node;
$uuid_cache{$node->{uuid}} = $node;
2016-04-12 17:50:12 +02:00
$gen_max = $node->{gen} if($node->{gen} > $gen_max);
2015-10-23 14:43:36 +02:00
}
2016-04-12 17:50:12 +02:00
$tree{GEN_MAX} = $gen_max;
2016-03-15 11:21:59 +01:00
# note: it is possible that id < top_level, e.g. after restoring
foreach my $node (@$node_list)
2015-10-23 14:43:36 +02:00
{
2016-03-15 11:21:59 +01:00
# set SUBTREE / TOP_LEVEL node
die unless exists($id{$node->{top_level}});
my $top_level = $id{$node->{top_level}};
2015-10-23 14:43:36 +02:00
2016-03-15 11:21:59 +01:00
push(@{$top_level->{SUBTREE}}, $node);
$node->{TOP_LEVEL} = $top_level;
$node->{TREE_ROOT} = \%tree;
# "path" always starts with set REL_PATH
my $rel_path = $node->{path};
if($node->{top_level} != 5) {
die unless($rel_path =~ s/^$top_level->{path}\///);
}
$node->{REL_PATH} = $rel_path; # relative to {TOP_LEVEL}->{path}
2016-04-19 13:06:31 +02:00
add_btrbk_filename_info($node);
2016-03-15 11:21:59 +01:00
$vol_root = $node if($vol_root_id == $node->{id});
2015-10-23 14:43:36 +02:00
}
2016-03-15 11:21:59 +01:00
unless($vol_root) {
if($vol_root_id == 5) {
$vol_root = \%tree;
2015-10-23 14:43:36 +02:00
}
else {
2016-03-15 11:21:59 +01:00
ERROR "Failed to resolve tree root for: " . ($vol->{PRINT} // $vol->{id});
2015-10-23 14:43:36 +02:00
return undef;
}
}
2016-03-15 11:21:59 +01:00
TRACE "btr_tree: returning tree at id=$vol_root->{id}";
2016-04-28 13:03:15 +02:00
VINFO($vol_root, "node") if($loglevel >=4);
2015-10-23 14:43:36 +02:00
2016-03-15 11:21:59 +01:00
return $vol_root;
2015-10-23 14:43:36 +02:00
}
2016-04-12 17:50:12 +02:00
sub btr_tree_inject_node
{
my $top_node = shift;
my $detail = shift;
my $rel_path = shift;
my $subtree = $top_node->{SUBTREE} // die;
my $tree_root = $top_node->{TREE_ROOT};
$tree_inject_id -= 1;
$tree_root->{GEN_MAX} += 1;
2016-04-13 22:04:53 +02:00
my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
2016-04-12 17:50:12 +02:00
my $node = {
%$detail, # make a copy
TREE_ROOT => $top_node->{TREE_ROOT},
SUBTREE => [],
TOP_LEVEL => $top_node,
REL_PATH => $rel_path,
INJECTED => 1,
id => $tree_inject_id,
uuid => $uuid,
gen => $tree_root->{GEN_MAX},
cgen => $tree_root->{GEN_MAX},
};
push(@$subtree, $node);
$uuid_cache{$uuid} = $node;
$tree_root->{ID_HASH}->{$tree_inject_id} = $node;
return $node;
}
2016-04-07 14:34:51 +02:00
sub _fs_path
{
my $node = shift // die;
return '<BTRFS_ROOT>' if($node->{is_root});
return _fs_path($node->{TOP_LEVEL}) . '/' . $node->{REL_PATH};
}
2016-03-15 11:21:59 +01:00
sub _is_child_of
2015-10-20 19:07:08 +02:00
{
2016-03-15 11:21:59 +01:00
my $node = shift;
my $uuid = shift;
foreach(@{$node->{SUBTREE}}) {
return 1 if($_->{uuid} eq $uuid);
return 1 if(_is_child_of($_, $uuid));
}
return 0;
}
2015-10-20 19:07:08 +02:00
2016-03-15 11:21:59 +01:00
sub _fill_url_cache
{
my $node = shift;
my $abs_path = shift;
2016-04-03 16:24:38 +02:00
my $node_subdir = shift;
2016-03-15 11:21:59 +01:00
# TRACE "_fill_url_cache: $abs_path";
2015-10-20 19:07:08 +02:00
2016-03-15 11:21:59 +01:00
# traverse tree from given node and update tree cache
2016-03-30 21:55:02 +02:00
$url_cache{$abs_path} = $node unless(defined($node_subdir));
2016-03-15 11:21:59 +01:00
foreach(@{$node->{SUBTREE}}) {
2016-03-30 21:55:02 +02:00
my $rel_path = $_->{REL_PATH};
if(defined($node_subdir)) {
2016-04-03 16:24:38 +02:00
next unless($rel_path =~ s/^\Q$node_subdir\E\///);
2016-03-30 21:55:02 +02:00
}
_fill_url_cache($_, $abs_path . '/' . $rel_path, undef);
2015-10-20 19:07:08 +02:00
}
2016-03-15 11:21:59 +01:00
return undef;
}
2015-10-20 19:07:08 +02:00
2016-03-30 21:55:02 +02:00
sub _get_longest_match
{
my $node = shift;
my $path = shift;
my $check_path = shift; # MUST have a trailing slash
$path .= '/' unless($path =~ /\/$/); # correctly handle root path="/"
return undef unless($check_path =~ /^\Q$path\E/);
foreach(@{$node->{SUBTREE}}) {
my $ret = _get_longest_match($_, $path . $_->{REL_PATH}, $check_path);
return $ret if($ret);
}
return { node => $node,
path => $path };
}
2016-03-15 11:21:59 +01:00
# reverse path lookup
sub get_cached_url_by_uuid($)
{
my $uuid = shift;
my @result;
while(my ($url, $node) = each(%url_cache)) {
next if($node->{is_root});
next unless($node->{uuid} eq $uuid);
push @result, $url;
}
return @result;
2015-10-20 19:07:08 +02:00
}
2016-03-15 11:21:59 +01:00
sub vinfo($;$)
2014-12-12 12:32:04 +01:00
{
2016-03-15 11:21:59 +01:00
my $url = shift // die;
my $config = shift;
my %info;
2014-12-13 13:52:43 +01:00
2016-04-25 15:10:00 +02:00
my ($url_prefix, $path) = check_url($url);
die "invalid url: $url" unless(defined($path));
2016-05-10 15:51:44 +02:00
my $print = $path;
2016-04-25 15:10:00 +02:00
my $name = $path;
2016-03-15 11:21:59 +01:00
$name =~ s/^.*\///;
2016-04-25 15:10:00 +02:00
$name = '/' if($name eq "");
2015-01-17 14:55:46 +01:00
2016-05-10 15:51:44 +02:00
my $host = undef;
2016-04-25 15:10:00 +02:00
if($url_prefix) {
2016-05-10 15:51:44 +02:00
$host = $url_prefix;
2016-04-25 15:10:00 +02:00
die unless($host =~ s/^ssh:\/\///);
2016-05-10 15:51:44 +02:00
$print = "$host:$path";
2014-12-12 12:32:04 +01:00
}
2016-05-10 15:51:44 +02:00
return {
HOST => $host,
NAME => $name,
PATH => $path,
PRINT => $print,
URL => $url_prefix . $path,
URL_PREFIX => $url_prefix,
CONFIG => $config,
2016-03-30 21:55:02 +02:00
}
}
2016-05-10 15:51:44 +02:00
sub vinfo_child($$;$)
2015-01-20 19:18:38 +01:00
{
2016-03-15 11:21:59 +01:00
my $parent = shift || die;
my $rel_path = shift // die;
2016-05-10 15:51:44 +02:00
my $config = shift; # override parent config
2016-03-15 11:21:59 +01:00
my $name = $rel_path;
2016-04-03 20:46:29 +02:00
my $subvol_dir = "";
$subvol_dir = $1 if($name =~ s/^(.*)\///);
2016-03-30 21:55:02 +02:00
my $vinfo = {
2016-05-10 15:51:44 +02:00
HOST => $parent->{HOST},
2016-03-15 11:21:59 +01:00
NAME => $name,
PATH => "$parent->{PATH}/$rel_path",
PRINT => "$parent->{PRINT}/$rel_path",
2016-05-10 15:51:44 +02:00
URL => "$parent->{URL}/$rel_path",
2016-03-30 21:55:02 +02:00
URL_PREFIX => $parent->{URL_PREFIX},
2016-03-15 11:21:59 +01:00
SUBVOL_PATH => $rel_path,
2016-04-03 20:46:29 +02:00
SUBVOL_DIR => $subvol_dir, # SUBVOL_PATH=SUBVOL_DIR/NAME
2016-05-10 15:51:44 +02:00
CONFIG => $config // $parent->{CONFIG},
2016-03-30 21:55:02 +02:00
};
2015-01-20 19:18:38 +01:00
2016-04-19 13:06:31 +02:00
# TRACE "vinfo_child: created from \"$parent->{PRINT}\": $info{PRINT}";
return $vinfo;
}
2016-05-11 20:15:46 +02:00
sub vinfo_rsh($;@)
2016-05-10 15:51:44 +02:00
{
my $vinfo = shift || die;
2016-05-11 20:15:46 +02:00
my %opts = @_;
2016-05-10 15:51:44 +02:00
my $host = $vinfo->{HOST};
return undef unless(defined($host));
my $config = $vinfo->{CONFIG};
die unless($config);
my $ssh_port = config_key($config, "ssh_port");
my $ssh_user = config_key($config, "ssh_user");
my $ssh_identity = config_key($config, "ssh_identity");
2016-05-11 20:15:46 +02:00
my $ssh_compression = $opts{disable_compression} ? undef : config_key($config, "ssh_compression");
2016-05-10 15:51:44 +02:00
my $ssh_cipher_spec = config_key($config, "ssh_cipher_spec") // "default";
my @ssh_options;
push(@ssh_options, '-p', $ssh_port) if($ssh_port ne "default");
push(@ssh_options, '-C') if($ssh_compression);
push(@ssh_options, '-c', $ssh_cipher_spec) if($ssh_cipher_spec ne "default");
if($ssh_identity) {
push(@ssh_options, '-i', $ssh_identity);
} else {
WARN_ONCE "No SSH identity provided (option ssh_identity is not set) for: " . ($vinfo->{CONFIG}->{url} // $vinfo->{PRINT});
}
return ['ssh', @ssh_options, $ssh_user . '@' . $host ];
}
2016-04-19 13:06:31 +02:00
sub add_btrbk_filename_info($;$)
{
my $node = shift;
my $btrbk_raw_file = shift;
my $name = $node->{REL_PATH};
return undef unless(defined($name));
2016-04-21 13:27:54 +02:00
# NOTE: unless long-iso file format is encountered, the timestamp is interpreted in local timezone.
2016-04-20 22:45:11 +02:00
2016-04-19 13:06:31 +02:00
$name =~ s/^(.*)\///;
2016-04-25 20:49:17 +02:00
my $btrbk_raw;
2016-04-20 22:45:11 +02:00
if($btrbk_raw_file && ($name =~ /^(?<name>$file_match)$timestamp_postfix_match$raw_postfix_match$/)) {
2016-04-25 20:49:17 +02:00
$btrbk_raw = {
2016-04-20 22:45:11 +02:00
received_uuid => $+{received_uuid} // die,
remote_parent_uuid => $+{parent_uuid} // '-',
encrypt => $+{encrypt} // "",
compress => $+{compress} // "",
incomplete => $+{incomplete} ? 1 : 0,
};
2016-04-06 15:24:47 +02:00
}
2016-04-20 22:45:11 +02:00
elsif((not $btrbk_raw_file) && ($name =~ /^(?<name>$file_match)$timestamp_postfix_match$/)) { ; }
2016-04-06 15:24:47 +02:00
else {
2016-04-20 22:45:11 +02:00
return undef;
2016-04-06 15:24:47 +02:00
}
2016-04-20 22:45:11 +02:00
$name = $+{name} // die;
2016-04-21 13:27:54 +02:00
my @tm = ( ($+{ss} // 0), ($+{mm} // 0), ($+{hh} // 0), $+{DD}, ($+{MM} - 1), ($+{YYYY} - 1900) );
my $NN = $+{NN} // 0;
my $zz = $+{zz};
2016-04-20 22:45:11 +02:00
my $time;
2016-04-25 21:05:46 +02:00
if(defined($zz)) {
eval_quiet { $time = timegm(@tm); };
} else {
eval_quiet { $time = timelocal(@tm); };
}
unless(defined($time)) {
2016-04-20 22:45:11 +02:00
WARN "Illegal timestamp on subvolume \"$node->{REL_PATH}\", ignoring";
2016-04-21 13:27:54 +02:00
# WARN "$@"; # sadly Time::Local croaks, which also prints the line number from here.
2016-04-20 22:45:11 +02:00
return undef;
}
2016-04-21 13:27:54 +02:00
# handle ISO 8601 time offset
if(defined($zz)) {
my $offset;
if($zz eq 'Z') {
$offset = 0; # Zulu time == UTC
}
elsif($zz =~ /^([+-])([0-9][0-9])([0-9][0-9])$/) {
$offset = ( $3 * 60 ) + ( $2 * 60 * 60 );
$offset *= -1 if($1 eq '-');
}
else {
WARN "Failed to parse time offset on subvolume \"$node->{REL_PATH}\", ignoring";
return undef;
}
$time -= $offset;
}
2016-04-20 22:45:11 +02:00
$node->{BTRBK_BASENAME} = $name;
$node->{BTRBK_DATE} = [ $time, $NN ];
2016-04-25 20:49:17 +02:00
$node->{BTRBK_RAW} = $btrbk_raw if($btrbk_raw);
2016-04-21 13:27:54 +02:00
return $node;
2015-01-20 19:18:38 +01:00
}
2016-03-30 21:55:02 +02:00
sub vinfo_init_root($;@)
2015-01-25 13:36:07 +01:00
{
2015-04-16 12:00:04 +02:00
my $vol = shift || die;
2016-03-30 21:55:02 +02:00
my %opts = @_;
2016-03-15 11:21:59 +01:00
my $tree_root;
my @fill_cache;
2015-10-19 22:10:08 +02:00
2016-03-15 11:21:59 +01:00
# use cached info if present
$tree_root = $url_cache{$vol->{URL}};
2016-03-16 19:21:48 +01:00
TRACE "url_cache " . ($tree_root ? "HIT" : "MISS") . ": URL=$vol->{URL}";
2016-03-15 11:21:59 +01:00
unless($tree_root) {
2016-03-30 23:43:41 +02:00
if(my $real_path = $realpath_cache{$vol->{URL}}) {
my $real_url = $vol->{URL_PREFIX} . $real_path;
2016-03-15 11:21:59 +01:00
$tree_root = $url_cache{$real_url};
2016-03-16 19:21:48 +01:00
TRACE "url_cache " . ($tree_root ? "HIT" : "MISS") . ": REAL_URL=$real_url";
2015-10-19 22:10:08 +02:00
}
}
2015-01-25 13:36:07 +01:00
2016-03-30 21:55:02 +02:00
# TODO: replace the subvolume_show part as soon as resolve_subdir stuff has stabilized
2016-03-15 11:21:59 +01:00
unless($tree_root) {
# url_cache miss, read the subvolume detail
my $detail = btrfs_subvolume_show($vol);
2016-03-30 21:55:02 +02:00
if($detail) {
2016-03-30 23:43:41 +02:00
my $real_path = $realpath_cache{$vol->{URL}};
2016-03-30 21:55:02 +02:00
push @fill_cache, $vol->{URL};
2016-03-30 23:43:41 +02:00
push @fill_cache, $vol->{URL_PREFIX} . $real_path if($real_path && (not $url_cache{$vol->{URL_PREFIX} . $real_path}));
2016-03-30 21:55:02 +02:00
# check uuid_cache
if($detail->{uuid}) {
$tree_root = $uuid_cache{$detail->{uuid}};
TRACE "uuid_cache " . ($tree_root ? "HIT" : "MISS") . ": UUID=$detail->{uuid}";
}
unless($tree_root) {
# cache miss, read the fresh tree
$tree_root = btr_tree($vol, $detail->{id});
}
2016-03-15 11:21:59 +01:00
}
2016-03-30 21:55:02 +02:00
elsif($opts{resolve_subdir}) {
# $vol is not a subvolume, read btrfs tree from mount point
my ($mnt_path, $real_path, $id) = btrfs_mountpoint($vol);
2016-04-14 18:21:00 +02:00
return undef unless($mnt_path && $real_path);
2016-03-30 21:55:02 +02:00
my $mnt_tree_root = $url_cache{$vol->{URL_PREFIX} . $mnt_path};
unless($mnt_tree_root) {
# read btrfs tree for the mount point
2016-05-10 15:51:44 +02:00
my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
2016-04-14 18:21:00 +02:00
unless($id) {
DEBUG "No subvolid provided in btrfs mounts for: $mnt_path";
unless($id) {
# old kernels don't have subvolid=NN in /proc/self/mounts, read it with btrfs-progs
my $detail = btrfs_subvolume_show($mnt_vol);
return undef unless($detail);
$id = $detail->{id} || die;
}
}
2016-03-30 21:55:02 +02:00
$mnt_tree_root = btr_tree($mnt_vol, $id);
TRACE "url_cache fill: $mnt_vol->{PRINT}";
_fill_url_cache($mnt_tree_root, $mnt_vol->{URL});
}
2015-04-14 02:17:17 +02:00
2016-03-30 21:55:02 +02:00
# find longest match in tree
my $ret = _get_longest_match($mnt_tree_root, $mnt_path, $real_path) // die;
my $node_subdir = $real_path;
2016-04-03 16:24:38 +02:00
die unless($node_subdir =~ s/^\Q$ret->{path}\E//); # NOTE: $ret->{path} has trailing slash!
$node_subdir =~ s/\/+$//;
# NODE_SUBDIR: if set, then PATH points to a regular (non-subvolume) directory.
# in other words, "PATH=<path_to_subvolume>/NODE_SUBDIR"
$vol->{NODE_SUBDIR} = $node_subdir if($node_subdir ne '');
2016-03-30 21:55:02 +02:00
$tree_root = $ret->{node};
TRACE "url_cache fill: $vol->{PRINT}" . ($vol->{NODE_SUBDIR} ? " (subdir=$vol->{NODE_SUBDIR})" : "");
_fill_url_cache($tree_root, $vol->{URL}, $vol->{NODE_SUBDIR});
}
else {
return undef;
2016-03-15 11:21:59 +01:00
}
2015-06-07 11:52:39 +02:00
}
2016-03-15 11:21:59 +01:00
return undef unless($tree_root);
2015-06-07 11:52:39 +02:00
2016-03-15 11:21:59 +01:00
# fill cache if needed
foreach (@fill_cache) {
2016-03-16 19:21:48 +01:00
TRACE "url_cache fill: $_";
2016-03-15 11:21:59 +01:00
_fill_url_cache($tree_root, $_);
2015-04-23 16:19:34 +02:00
}
2016-03-15 11:21:59 +01:00
$vol->{node} = $tree_root;
return $tree_root;
2015-01-26 17:23:37 +01:00
}
2016-03-15 11:21:59 +01:00
sub _vinfo_subtree_list
2016-01-13 14:29:44 +01:00
{
2016-03-15 11:21:59 +01:00
my $tree = shift;
my $vinfo_parent = shift;
2016-04-03 16:24:38 +02:00
my $node_subdir_filter = shift;
2016-03-15 11:21:59 +01:00
my $list = shift // [];
my $path_prefix = shift // "";
2016-04-03 20:46:29 +02:00
my $depth = shift // 0;
2016-01-13 14:29:44 +01:00
2016-04-03 20:46:29 +02:00
foreach my $node (@{$tree->{SUBTREE}}) {
my $rel_path = $node->{REL_PATH};
2016-04-03 16:24:38 +02:00
if(defined($node_subdir_filter)) {
next unless($rel_path =~ s/^\Q$node_subdir_filter\E\///);
2016-03-30 21:55:02 +02:00
}
my $path = $path_prefix . $rel_path;
2016-03-15 11:21:59 +01:00
my $vinfo = vinfo_child($vinfo_parent, $path);
2016-04-03 20:46:29 +02:00
$vinfo->{node} = $node;
# add some additional information to vinfo
# NOTE: make sure to also set those in raw tree readin!
$vinfo->{subtree_depth} = $depth;
2016-04-06 15:24:47 +02:00
if(($depth == 0) && ($rel_path !~ /\//)) {
$vinfo->{direct_leaf} = 1;
2016-04-19 13:06:31 +02:00
$vinfo->{btrbk_direct_leaf} = 1 if(exists($node->{BTRBK_BASENAME}));
2016-04-03 20:46:29 +02:00
}
2016-01-13 14:29:44 +01:00
2016-04-03 20:46:29 +02:00
push(@$list, $vinfo);
_vinfo_subtree_list($node, $vinfo_parent, undef, $list, $path . '/', $depth + 1);
2016-01-13 14:29:44 +01:00
}
2016-03-15 11:21:59 +01:00
return $list;
2016-01-13 14:29:44 +01:00
}
2016-04-03 20:46:29 +02:00
sub vinfo_subvol_list($;@)
2014-12-12 10:39:40 +01:00
{
2015-04-16 12:00:04 +02:00
my $vol = shift || die;
2016-04-03 20:46:29 +02:00
my %opts = @_;
2016-03-15 14:46:25 +01:00
2016-04-12 17:50:12 +02:00
# use fake subvolume list if present
2016-04-03 20:46:29 +02:00
my $subvol_list = $vol->{SUBVOL_LIST};
2016-03-15 14:46:25 +01:00
2016-04-03 20:46:29 +02:00
unless($subvol_list) {
# recurse into tree from $vol->{node}, returns arrayref of vinfo
$subvol_list = _vinfo_subtree_list($vol->{node}, $vol, $vol->{NODE_SUBDIR});
}
if($opts{sort}) {
if($opts{sort} eq 'path') {
2016-04-12 17:50:12 +02:00
my @sorted = sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } @$subvol_list;
$subvol_list = \@sorted;
2016-04-03 20:46:29 +02:00
}
2016-04-12 17:50:12 +02:00
else { die; }
2016-04-03 20:46:29 +02:00
}
return $subvol_list;
2016-03-15 11:21:59 +01:00
}
2015-03-13 12:12:37 +01:00
2016-01-13 14:29:44 +01:00
2016-03-15 11:21:59 +01:00
sub vinfo_subvol($$)
{
my $vol = shift || die;
my $subvol_path = shift // die;
foreach (@{vinfo_subvol_list($vol)}) {
return $_ if($_->{SUBVOL_PATH} eq $subvol_path);
2016-01-13 14:29:44 +01:00
}
2016-03-15 11:21:59 +01:00
return undef;
2014-12-14 19:23:02 +01:00
}
2015-01-14 14:10:41 +01:00
2016-04-19 13:06:31 +02:00
sub vinfo_inject_child($$$)
2016-04-12 17:50:12 +02:00
{
my $vinfo = shift;
my $vinfo_child = shift;
my $detail = shift;
2016-04-13 22:04:53 +02:00
my $node;
my $subvol_list = $vinfo->{SUBVOL_LIST};
2016-04-19 13:06:31 +02:00
my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
my $rel_path = $node_subdir . $vinfo_child->{SUBVOL_PATH};
2016-04-13 22:04:53 +02:00
if($subvol_list)
{
# insert to a SUBVOL_LIST (raw targets)
$tree_inject_id -= 1;
my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
$node = {
%$detail,
2016-04-19 13:06:31 +02:00
REL_PATH => $rel_path,
2016-04-13 22:04:53 +02:00
INJECTED => 1,
id => $tree_inject_id,
uuid => $uuid,
};
2016-04-25 20:49:17 +02:00
return undef unless(add_btrbk_filename_info($node, 1));
2016-04-19 13:06:31 +02:00
2016-04-13 22:04:53 +02:00
# NOTE: make sure to have all the flags set by _vinfo_subtree_list()
$vinfo_child->{subtree_depth} = 0;
$vinfo_child->{direct_leaf} = 1;
$vinfo_child->{btrbk_direct_leaf} = 1;
$uuid_cache{$uuid} = $node;
push @$subvol_list, $vinfo_child;
}
else {
2016-04-14 18:24:11 +02:00
my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
2016-04-19 13:06:31 +02:00
$node = btr_tree_inject_node($vinfo->{node}, $detail, $rel_path);
2016-04-22 20:25:30 +02:00
return undef unless(add_btrbk_filename_info($node));
2016-04-13 22:04:53 +02:00
}
2016-04-12 17:50:12 +02:00
$vinfo_child->{node} = $node;
$url_cache{$vinfo_child->{URL}} = $node;
2016-04-14 18:24:11 +02:00
TRACE "vinfo_inject_child: injected child id=$node->{id} to $vinfo->{PRINT}";
2016-04-12 17:50:12 +02:00
return $vinfo_child;
}
2016-03-15 11:21:59 +01:00
# returns hash: ( $prefix_{url,path,host,name,subvol_path,rsh} => value, ... )
sub vinfo_prefixed_keys($$)
2014-12-14 21:29:22 +01:00
{
2016-03-15 11:21:59 +01:00
my $prefix = shift // die;
my $vinfo = shift;
return () unless($vinfo);
my %ret;
if($prefix) {
$ret{$prefix} = $vinfo->{PRINT};
$prefix .= '_';
2015-01-03 21:25:46 +01:00
}
2016-03-15 11:21:59 +01:00
foreach (qw( URL PATH HOST NAME SUBVOL_PATH )) {
$ret{$prefix . lc($_)} = $vinfo->{$_};
2015-01-03 21:25:46 +01:00
}
2016-03-15 11:21:59 +01:00
$ret{$prefix . "subvol"} = $vinfo->{PATH};
2016-05-10 15:51:44 +02:00
my $rsh = vinfo_rsh($vinfo);
$ret{$prefix . "rsh"} = ($rsh ? join(" ", @$rsh) : undef),
2016-03-15 11:21:59 +01:00
return %ret;
2014-12-14 21:29:22 +01:00
}
2014-12-14 19:23:02 +01:00
2016-05-10 15:51:44 +02:00
sub vinfo_assign_config($)
2015-04-23 16:19:34 +02:00
{
2016-03-15 11:21:59 +01:00
my $vinfo = shift || die;
2016-05-10 15:51:44 +02:00
my $config = $vinfo->{CONFIG} || die;
2016-03-15 11:21:59 +01:00
die if($config->{VINFO});
$config->{VINFO} = $vinfo;
2015-04-23 16:19:34 +02:00
}
2016-03-15 11:21:59 +01:00
sub vinfo_subsection($$;$)
2015-04-23 16:19:34 +02:00
{
2016-03-15 11:21:59 +01:00
# if config: must have SUBSECTION key
# if vinfo: must have CONFIG key
my $config_or_vinfo = shift || die;
my $context = shift || die;
my $include_aborted = shift;
my $config_list;
my $vinfo_check;
if(exists($config_or_vinfo->{SUBSECTION})) {
# config
$config_list = $config_or_vinfo->{SUBSECTION};
2015-04-23 16:19:34 +02:00
}
2016-03-15 11:21:59 +01:00
else {
# vinfo
$config_list = $config_or_vinfo->{CONFIG}->{SUBSECTION};
die unless($config_or_vinfo->{CONFIG}->{VINFO} == $config_or_vinfo); # check back reference
}
# for now be paranoid and check all contexts
my @ret;
foreach (@$config_list) {
die unless($_->{CONTEXT} eq $context);
next if((not $include_aborted) && $_->{ABORTED});
die unless($_->{VINFO});
die unless($_->{VINFO}->{CONFIG});
die unless($_->{VINFO} == $_->{VINFO}->{CONFIG}->{VINFO}); # check all back references
push @ret, $_->{VINFO};
}
return @ret;
# much simpler implementation, without checks
#return map { $_->{ABORTED} ? () : $_->{VINFO} } @$config_list;
2015-04-23 16:19:34 +02:00
}
2016-04-13 14:47:38 +02:00
sub get_snapshot_children($$;$$)
2015-04-23 16:19:34 +02:00
{
2016-03-15 11:21:59 +01:00
my $sroot = shift || die;
my $svol = shift // die;
2016-04-13 14:47:38 +02:00
my $subvol_dir = shift // "";
my $btrbk_basename = shift;
2016-03-15 11:21:59 +01:00
my @ret;
2015-04-23 16:19:34 +02:00
2016-03-15 11:21:59 +01:00
my $sroot_subvols = vinfo_subvol_list($sroot);
foreach (@$sroot_subvols) {
next unless($_->{node}{readonly});
next unless($_->{node}{parent_uuid} eq $svol->{node}{uuid});
2016-04-13 14:47:38 +02:00
if(defined($btrbk_basename) &&
2016-04-19 13:06:31 +02:00
( (not exists($_->{node}{BTRBK_BASENAME})) ||
2016-04-13 14:47:38 +02:00
($_->{SUBVOL_DIR} ne $subvol_dir) ||
2016-04-19 13:06:31 +02:00
($_->{node}{BTRBK_BASENAME} ne $btrbk_basename)) ) {
2016-04-13 14:47:38 +02:00
TRACE "get_snapshot_children: child does not match btrbk filename scheme, skipping: $_->{PRINT}";
next;
}
2016-03-15 11:21:59 +01:00
TRACE "get_snapshot_children: found: $_->{PRINT}";
push(@ret, $_);
}
2016-04-13 14:47:38 +02:00
$subvol_dir .= '/' if($subvol_dir);
DEBUG "Found " . scalar(@ret) . " snapshot children of \"$svol->{PRINT}\" in: $sroot->{PRINT}" . (defined($btrbk_basename) ? "/$subvol_dir$btrbk_basename.*" : "");
2016-03-15 11:21:59 +01:00
return @ret;
}
2015-04-23 16:19:34 +02:00
2016-04-07 14:34:51 +02:00
sub get_receive_targets($$;@)
2016-03-15 11:21:59 +01:00
{
my $droot = shift || die;
my $src_vol = shift || die;
2016-04-07 14:34:51 +02:00
my %opts = @_;
2016-04-19 13:42:18 +02:00
my $droot_subvols = $opts{droot_subvol_list} // vinfo_subvol_list($droot);
2016-04-16 16:05:57 +02:00
my @ret;
2016-04-07 14:34:51 +02:00
my $unexpected_count = 0;
2015-10-20 22:05:02 +02:00
2016-04-05 16:11:46 +02:00
if($src_vol->{node}{is_root}) {
DEBUG "Skip search for targets: source subvolume is btrfs root: $src_vol->{PRINT}";
2016-04-16 16:05:57 +02:00
return @ret;
2016-04-05 16:11:46 +02:00
}
unless($src_vol->{node}{readonly}) {
DEBUG "Skip search for targets: source subvolume is not read-only: $src_vol->{PRINT}";
2016-04-16 16:05:57 +02:00
return @ret;
2016-04-05 16:11:46 +02:00
}
2016-04-15 01:22:19 +02:00
# find matches by comparing uuid / received_uuid
my $uuid = $src_vol->{node}{uuid};
2016-04-16 16:05:57 +02:00
my $received_uuid = $src_vol->{node}{received_uuid};
$received_uuid = undef if($received_uuid eq '-');
TRACE "get_receive_targets: src_vol=\"$src_vol->{PRINT}\", droot=\"$droot->{PRINT}\"";
2016-04-15 01:22:19 +02:00
foreach (@$droot_subvols) {
next unless($_->{node}{readonly});
my $matched = undef;
if($_->{node}{received_uuid} eq $uuid) {
$matched = 'by-uuid';
}
elsif(defined($received_uuid) && ($_->{node}{received_uuid} eq $received_uuid)) {
$matched = 'by-received_uuid';
}
2016-04-16 16:05:57 +02:00
next unless($matched);
TRACE "get_receive_targets: $matched: Found receive target: $_->{SUBVOL_PATH}";
push(@{$opts{seen}}, $_) if($opts{seen});
2016-04-19 13:06:31 +02:00
if($opts{exact_match} && !exists($_->{node}{BTRBK_RAW})) {
2016-04-16 16:05:57 +02:00
if($_->{direct_leaf} && ($_->{NAME} eq $src_vol->{NAME})) {
TRACE "get_receive_targets: exact_match: $_->{SUBVOL_PATH}";
2016-04-05 16:11:46 +02:00
}
2016-04-16 16:05:57 +02:00
else {
TRACE "get_receive_targets: $matched: skip non-exact match: $_->{PRINT}";
WARN "Receive target of \"$src_vol->{PRINT}\" exists at unexpected location: $_->{PRINT}" if($opts{warn});
next;
}
}
push(@ret, $_);
}
2016-04-16 19:25:46 +02:00
TRACE "get_receive_targets: " . scalar(@ret) . " receive targets in \"$droot->{PRINT}/\" for: $src_vol->{PRINT}";
2016-04-16 16:05:57 +02:00
return @ret;
}
sub get_receive_targets_fsroot($$@)
{
my $droot = shift // die;
my $src_vol = shift // die;
my %opts = @_;
my $id = $src_vol->{node}{id};
my $uuid = $src_vol->{node}{uuid};
my $received_uuid = $src_vol->{node}{received_uuid};
$received_uuid = undef if(defined($received_uuid) && ($received_uuid eq '-'));
my @unexpected;
my @exclude;
@exclude = map { $_->{node}{id} } @{$opts{exclude}} if($opts{exclude});
TRACE "get_receive_target_fsroot: uuid=$uuid, received_uuid=" . ($received_uuid // '-') . " exclude id={ " . join(', ', @exclude) . " }";
# search in filesystem for matching received_uuid
foreach my $node (
grep({ (not $_->{is_root}) &&
(($_->{received_uuid} eq $uuid) ||
(defined($received_uuid) && ($_->{received_uuid} eq $received_uuid)))
} values(%{$droot->{node}{TREE_ROOT}{ID_HASH}}) ) )
{
next if(scalar grep($_ == $node->{id}, @exclude));
push @unexpected, $node;
if($opts{warn}) {
my $text;
my @url = get_cached_url_by_uuid($node->{uuid});
if(scalar(@url)) {
$text = vinfo($url[0])->{PRINT};
} else {
$text = '"' . _fs_path($node) . "\" (in filesystem at \"$droot->{PRINT}\")";
2016-04-07 14:34:51 +02:00
}
2016-04-16 16:05:57 +02:00
WARN "Receive target of \"$src_vol->{PRINT}\" exists at unexpected location: $text";
2016-04-07 14:34:51 +02:00
}
2016-03-15 11:21:59 +01:00
}
2016-04-19 18:57:22 +02:00
return @unexpected;
2016-03-15 11:21:59 +01:00
}
2015-10-20 22:05:02 +02:00
2016-03-15 11:21:59 +01:00
sub get_latest_common($$$;$)
{
my $sroot = shift || die;
my $svol = shift // die;
my $droot = shift || die;
2016-04-05 23:37:08 +02:00
my $snapshot_dir = shift; # if not set, skip search for btrbk basename (set to empty string to enable at current dir)
my $sroot_subvol_list = vinfo_subvol_list($sroot);
2015-10-20 22:05:02 +02:00
2016-04-05 23:37:08 +02:00
TRACE "get_latest_common: resolving latest common for subvolume: $svol->{PATH} (sroot=$sroot->{PRINT}, droot=$droot->{PRINT}, snapdir=\"" . ($snapshot_dir // '<undef>') . "\")";
2016-04-05 22:01:17 +02:00
my @candidate;
if($svol->{node}{readonly}) {
2016-04-05 23:37:08 +02:00
if($svol->{node}{parent_uuid} ne '-') {
# add readonly parent
@candidate = grep { $_->{node}{readonly} && ($_->{node}{uuid} eq $svol->{node}{parent_uuid}) } @$sroot_subvol_list;
die "multiple parents for $svol->{node}{parent_uuid}" if(scalar(@candidate) > 1);
TRACE "get_latest_common: subvolume has a read-only parent, add parent candidate" if(scalar(@candidate) > 0);
2016-05-03 13:19:42 +02:00
# add snapshots with same parent_uuid (siblings)
my @siblings = grep { $_->{node}{readonly} && ($_->{node}{parent_uuid} eq $svol->{node}{parent_uuid}) } @$sroot_subvol_list;
my @siblings_older = grep { $_->{node}{cgen} <= $svol->{node}{cgen} } @siblings;
my @siblings_newer = grep { $_->{node}{cgen} > $svol->{node}{cgen} } @siblings;
push @candidate, sort { $b->{node}{cgen} <=> $a->{node}{cgen} } @siblings_older; # older first, descending by cgen
push @candidate, sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @siblings_newer; # then newer, ascending by cgen
TRACE "get_latest_common: subvolume has siblings (same parent_uuid), add " . scalar(@siblings_older) . " older and " . scalar(@siblings_newer) . " newer (by cgen) candidates";
2016-04-05 23:37:08 +02:00
}
2016-04-19 13:06:31 +02:00
if(defined($snapshot_dir) && exists($svol->{node}{BTRBK_BASENAME})) {
2016-04-05 23:37:08 +02:00
# add subvolumes in same directory matching btrbk file name scheme
2016-04-19 13:06:31 +02:00
my @naming_match = grep { $_->{node}{readonly} && exists($_->{node}{BTRBK_BASENAME}) && ($_->{SUBVOL_DIR} eq $snapshot_dir) && ($_->{node}{BTRBK_BASENAME} eq $svol->{node}{BTRBK_BASENAME}) } @$sroot_subvol_list;
my @naming_match_older = grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) < 0 } @naming_match;
my @naming_match_newer = grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) > 0 } @naming_match;
push @candidate, sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } @naming_match_older;
push @candidate, sort { cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) } @naming_match_newer;
TRACE "get_latest_common: subvolume has btrbk naming scheme, add " . scalar(@naming_match_older) . " older and " . scalar(@naming_match_newer) . " newer (by file suffix) candidates with scheme: $sroot->{PRINT}/$snapshot_dir/$svol->{node}{BTRBK_BASENAME}.*";
2016-04-05 22:01:17 +02:00
}
}
2016-04-05 23:37:08 +02:00
else
{
2016-04-05 22:01:17 +02:00
@candidate = sort { $b->{node}{cgen} <=> $a->{node}{cgen} } get_snapshot_children($sroot, $svol);
TRACE "get_latest_common: subvolume is read-write, add " . scalar(@candidate) . " snapshot children, sorted by cgen: $svol->{PATH}";
2016-04-05 23:37:08 +02:00
if(defined($snapshot_dir)) {
2016-04-06 15:24:47 +02:00
# add subvolumes in same directory matching btrbk file name scheme (using $svol->{NAME} as basename)
2016-04-19 13:06:31 +02:00
my @naming_match = grep { $_->{node}{readonly} && exists($_->{node}{BTRBK_BASENAME}) && ($_->{SUBVOL_DIR} eq $snapshot_dir) && ($_->{node}{BTRBK_BASENAME} eq $svol->{NAME}) } @$sroot_subvol_list;
push @candidate, sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } @naming_match;
2016-04-05 23:37:08 +02:00
TRACE "get_latest_common: snapshot_dir is set, add " . scalar(@naming_match) . " candidates with scheme: $sroot->{PRINT}/$snapshot_dir/$svol->{NAME}.*";
}
2016-04-05 22:01:17 +02:00
}
2016-04-19 13:42:18 +02:00
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list
2016-04-05 22:01:17 +02:00
foreach my $child (@candidate) {
2016-04-05 23:37:08 +02:00
if($child->{node}{id} == $svol->{node}{id}) {
TRACE "get_latest_common: skip self: $child->{PRINT}";
2016-03-15 11:21:59 +01:00
next;
2015-06-02 22:16:33 +02:00
}
2016-04-19 13:42:18 +02:00
my @receive_targets = get_receive_targets($droot, $child, droot_subvol_list => $droot_subvol_list);
2016-04-12 17:50:12 +02:00
if(scalar @receive_targets) {
DEBUG("Latest common subvolumes for: $svol->{PRINT}: src=$child->{PRINT} target=$receive_targets[0]->{PRINT}");
return ($child, $receive_targets[0]);
2016-03-15 11:21:59 +01:00
}
2015-06-02 22:16:33 +02:00
}
2016-04-05 23:37:08 +02:00
DEBUG("No common subvolumes of \"$svol->{PRINT}\" found in src=\"$sroot->{PRINT}/\", target=\"$droot->{PRINT}/\"");
2016-03-15 11:21:59 +01:00
return (undef, undef);
2015-06-02 22:16:33 +02:00
}
2016-03-15 11:21:59 +01:00
sub get_latest_snapshot_child($$)
2015-06-02 22:16:33 +02:00
{
2016-03-15 11:21:59 +01:00
my $sroot = shift || die;
my $svol = shift // die;
my $latest = undef;
my $gen = -1;
foreach (get_snapshot_children($sroot, $svol)) {
if($_->{node}{cgen} > $gen) {
$latest = $_;
$gen = $_->{node}{cgen};
}
}
if($latest) {
DEBUG "Latest snapshot child for \"$svol->{PRINT}#$svol->{node}{gen}\" is: $latest->{PRINT}#$latest->{node}{cgen}";
} else {
DEBUG "No latest snapshots found for: $svol->{PRINT}";
}
return $latest;
}
2015-06-02 22:16:33 +02:00
2016-03-15 11:21:59 +01:00
sub check_file($$;$$)
{
my $file = shift // die;
my $accept = shift || die;
my $key = shift; # only for error text
my $config_file = shift; # only for error text
2015-06-02 22:16:33 +02:00
2016-04-25 14:23:15 +02:00
my $match = $file_match;
$match = $glob_match if($accept->{wildcards});
2016-03-16 13:25:19 +01:00
2016-04-25 14:23:15 +02:00
if($file =~ /^($match)$/) {
$file = $1;
2016-03-15 11:21:59 +01:00
if($accept->{absolute}) {
2016-04-25 14:23:15 +02:00
unless($file =~ /^\//) {
2016-03-15 11:21:59 +01:00
ERROR "Only absolute files allowed for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
return undef;
2016-01-14 18:02:53 +01:00
}
2016-03-15 11:21:59 +01:00
}
elsif($accept->{relative}) {
2016-04-25 14:23:15 +02:00
if($file =~ /^\//) {
2016-03-15 11:21:59 +01:00
ERROR "Only relative files allowed for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
return undef;
2016-01-14 18:02:53 +01:00
}
}
2016-03-15 11:21:59 +01:00
elsif($accept->{name_only}) {
2016-04-25 14:23:15 +02:00
if($file =~ /\//) {
2016-03-15 11:21:59 +01:00
ERROR "Option \"$key\" is not a valid file name in \"$config_file\" line $.: $file" if($key && $config_file);
return undef;
}
}
else {
die("accept_type must contain either 'relative' or 'absolute'");
}
2015-06-02 22:16:33 +02:00
}
2016-03-15 11:21:59 +01:00
else {
ERROR "Ambiguous file for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
return undef;
2015-06-02 22:16:33 +02:00
}
2016-03-15 11:21:59 +01:00
# check directory traversal
if(($file =~ /^\.\.$/) || ($file =~ /^\.\.\//) || ($file =~ /\/\.\.\//) || ($file =~ /\/\.\.$/)) {
ERROR "Illegal directory traversal for option \"$key\" in \"$config_file\" line $.: $file" if($key && $config_file);
return undef;
}
2016-04-25 14:23:15 +02:00
$file =~ s/\/+/\//g; # sanitize multiple slash
$file =~ s/\/\.\//\//g; # sanitize "/./" -> "/"
$file =~ s/\/$// unless($file eq '/'); # remove trailing slash
return $file;
}
sub check_url($;$$)
{
my $url = shift // die;
my $key = shift; # only for error text
my $config_file = shift; # only for error text
my $url_prefix = "";
2016-04-25 16:07:40 +02:00
if($url =~ s/^(ssh:\/\/($ip_addr_match|$host_name_match))\//\//) {
$url_prefix = $1;
}
elsif($url =~ s/^($ip_addr_match|$host_name_match)://) {
# convert "my.host.com:/my/path" to ssh url
$url_prefix = "ssh://" . $1;
}
2016-04-25 14:23:15 +02:00
return ( $url_prefix, check_file($url, { absolute => 1 }, $key, $config_file) );
2016-03-15 11:21:59 +01:00
}
2015-09-29 19:43:11 +02:00
2015-06-02 22:16:33 +02:00
2016-03-15 11:21:59 +01:00
sub config_key($$;@)
2014-12-14 19:23:02 +01:00
{
2016-03-15 11:21:59 +01:00
my $config = shift || die;
my $key = shift || die;
my %opts = @_;
my $orig_config = $config;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
2015-04-21 14:53:31 +02:00
2016-03-15 11:21:59 +01:00
if(exists($config_override{$key})) {
TRACE "config_key: OVERRIDE key=$key to value=" . ($config_override{$key} // "<undef>");
return $config_override{$key};
}
2015-04-21 14:53:31 +02:00
2016-03-15 11:21:59 +01:00
while(not exists($config->{$key})) {
# note: while all config keys exist in root context (at least with default values),
# we also allow fake configs (CONTEXT="cmdline") which have no PARENT.
return undef unless($config->{PARENT});
$config = $config->{PARENT};
}
my $retval = $config->{$key};
$retval = $opts{prefix} . $retval if(defined($opts{prefix}) && defined($retval));
$retval .= $opts{postfix} if(defined($opts{postfix}) && defined($retval));
return $retval;
}
2016-03-11 14:55:22 +01:00
2015-03-13 17:54:08 +01:00
2016-03-15 11:21:59 +01:00
sub config_preserve_hash($$)
{
my $config = shift || die;
my $prefix = shift || die;
2016-04-12 11:47:28 +02:00
my $ret = config_key($config, $prefix . "_preserve") // {};
2016-04-12 19:55:29 +02:00
my $preserve_min = config_key($config, $prefix . "_preserve_min");
if(defined($preserve_min)) {
$ret->{min} = $preserve_min; # used for raw schedule output
2016-04-12 20:35:57 +02:00
if(($preserve_min eq 'all') || ($preserve_min eq 'latest')) {
$ret->{min_q} = $preserve_min;
2016-04-12 11:47:28 +02:00
}
2016-04-12 19:55:29 +02:00
elsif($preserve_min =~ /^([0-9]+)([hdwmy])$/) {
$ret->{min_n} = $1;
$ret->{min_q} = $2;
2016-04-12 11:47:28 +02:00
}
else { die; }
}
$ret->{dow} = config_key($config, "preserve_day_of_week");
return $ret;
2016-03-15 11:21:59 +01:00
}
2015-03-13 17:54:08 +01:00
2016-03-15 11:21:59 +01:00
2016-05-11 20:15:46 +02:00
sub config_compress_hash($$)
{
my $config = shift || die;
my $config_key = shift || die;
my $compress_key = config_key($config, $config_key);
return undef unless($compress_key);
return {
key => $compress_key,
level => config_key($config, $config_key . "_level"),
threads => config_key($config, $config_key . "_threads"),
};
}
2016-03-15 11:21:59 +01:00
sub config_dump_keys($;@)
{
my $config = shift || die;
my %opts = @_;
my @ret;
my $maxlen = 0;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
foreach my $key (sort keys %config_options)
2014-12-14 19:23:02 +01:00
{
2016-03-15 11:21:59 +01:00
my $val;
2016-04-18 20:42:53 +02:00
next if($config_options{$key}->{deprecated});
2016-03-15 11:21:59 +01:00
if($opts{resolve}) {
$val = config_key($config, $key);
} else {
next unless exists($config->{$key});
$val = $config->{$key};
}
if($opts{skip_defaults}) {
if(defined($config_options{$key}->{default}) && defined($val)) {
next if($val eq $config_options{$key}->{default});
}
if((not defined($config_options{$key}->{default})) && (not (defined($val)))) {
next; # both undef, skip
}
}
2016-04-18 20:42:53 +02:00
if(defined($val)) {
if($config_options{$key}->{accept_preserve_matrix}) {
$val = format_preserve_matrix($val);
}
if(ref($val) eq "ARRAY") {
my $val2 = join(',', @$val);
$val = $val2;
}
2016-03-15 11:21:59 +01:00
}
2016-04-18 20:42:53 +02:00
$val //= exists($config->{$key}) ? "no" : "<unset>";
2016-03-15 11:21:59 +01:00
my $len = length($key);
$maxlen = $len if($len > $maxlen);
push @ret, { key => $key, val => $val, len => $len };
2015-06-17 12:42:29 +02:00
}
2016-03-15 11:21:59 +01:00
# print as table
return map { ($opts{prefix} // "") . $_->{key} . (' ' x (1 + $maxlen - $_->{len})) . ' ' . $_->{val} } @ret;
}
2014-12-14 15:34:55 +01:00
2015-03-13 11:20:47 +01:00
2016-03-15 11:21:59 +01:00
sub append_config_option($$$$;$)
{
my $config = shift;
my $key = shift;
my $value = shift;
my $context = shift;
my $config_file = shift; # only for error text
my $config_file_statement = $config_file ? " in \"$config_file\" line $." : "";
2015-03-13 11:20:47 +01:00
2016-03-15 11:21:59 +01:00
my $opt = $config_options{$key};
# accept only keys listed in %config_options
unless($opt) {
ERROR "Unknown option \"$key\"" . $config_file_statement;
return undef;
}
if($opt->{context} && !grep(/^$context$/, @{$opt->{context}})) {
ERROR "Option \"$key\" is only allowed in " . join(" or ", map("\"$_\"", @{$opt->{context}})) . " context" . $config_file_statement;
return undef;
}
2016-03-16 13:25:19 +01:00
if($opt->{deny_glob_context} && $config->{GLOB_CONTEXT}) {
ERROR "Option \"$key\" is not allowed on section with wildcards" . $config_file_statement;
return undef;
}
2016-03-15 11:21:59 +01:00
if(grep(/^\Q$value\E$/, @{$opt->{accept}})) {
TRACE "option \"$key=$value\" found in accept list";
}
elsif($opt->{accept_numeric} && ($value =~ /^[0-9]+$/)) {
TRACE "option \"$key=$value\" is numeric, accepted";
}
elsif($opt->{accept_file})
{
# be very strict about file options, for security sake
2016-04-25 14:23:15 +02:00
$value = check_file($value, $opt->{accept_file}, $key, $config_file);
return undef unless(defined($value));
2015-03-13 11:20:47 +01:00
2016-03-15 11:21:59 +01:00
TRACE "option \"$key=$value\" is a valid file, accepted";
$value = "no" if($value eq "."); # maps to undef later
2015-04-21 14:53:31 +02:00
}
2016-03-15 11:21:59 +01:00
elsif($opt->{accept_regexp}) {
my $match = $opt->{accept_regexp};
if($value =~ m/$match/) {
TRACE "option \"$key=$value\" matched regexp, accepted";
2016-03-10 05:26:43 +01:00
}
else {
2016-03-15 11:21:59 +01:00
ERROR "Value \"$value\" failed input validation for option \"$key\"" . $config_file_statement;
2016-03-10 05:26:43 +01:00
return undef;
}
2014-12-14 22:03:31 +01:00
}
2016-04-14 14:15:12 +02:00
elsif($opt->{accept_preserve_matrix}) {
my %preserve;
my $s = ' ' . $value;
while($s =~ s/\s+(\*|[0-9]+)([hdwmyHDWMY])//) {
my $n = $1;
my $q = lc($2); # qw( h d w m y )
$n = 'all' if($n eq '*');
if(exists($preserve{$q})) {
ERROR "Value \"$value\" failed input validation for option \"$key\": multiple definitions of '$q'" . $config_file_statement;
return undef;
}
$preserve{$q} = $n;
}
unless($s eq "") {
ERROR "Value \"$value\" failed input validation for option \"$key\"" . $config_file_statement;
return undef;
}
2016-04-18 16:40:49 +02:00
TRACE "adding preserve matrix $context context:" . Data::Dumper->new([\%preserve], [ $key ])->Indent(0)->Pad(' ')->Quotekeys(0)->Pair('=>')->Dump() if($do_dumper);
2016-04-14 14:15:12 +02:00
$config->{$key} = \%preserve;
return $config;
}
2016-03-15 11:21:59 +01:00
else
{
ERROR "Unsupported value \"$value\" for option \"$key\"" . $config_file_statement;
return undef;
2016-03-14 16:39:13 +01:00
}
2016-03-15 11:21:59 +01:00
if($opt->{split}) {
$value = [ split($opt->{split}, $value) ];
TRACE "splitted option \"$key\": " . join(',', @$value);
}
2016-03-14 16:39:13 +01:00
2016-05-03 14:34:04 +02:00
if($opt->{require_bin} && (not check_exe($opt->{require_bin}))) {
WARN "Found option \"$key\", but required executable \"$opt->{require_bin}\" does not exist on your system. Please install \"$opt->{require_bin}\".";
WARN "Ignoring option \"$key\"" . $config_file_statement;
2016-03-23 11:58:23 +01:00
$value = "no";
}
2016-03-15 11:21:59 +01:00
if($opt->{deprecated}) {
2016-04-14 13:01:28 +02:00
if(my $warn_msg = ($opt->{deprecated}->{$value}->{warn} || $opt->{deprecated}->{DEFAULT}->{warn})) {
WARN "Found deprecated option \"$key $value\"" . $config_file_statement . ": " . $warn_msg;
}
2016-04-13 17:13:03 +02:00
if($opt->{deprecated}->{$value}->{ABORT} || $opt->{deprecated}->{DEFAULT}->{ABORT}) {
ERROR 'Deprecated (incompatible) option found, refusing to continue';
return undef;
}
2016-04-14 13:01:28 +02:00
if($opt->{deprecated}->{$value}->{FAILSAFE_PRESERVE} || $opt->{deprecated}->{DEFAULT}->{FAILSAFE_PRESERVE}) {
unless($config_override{FAILSAFE_PRESERVE}) { # warn only once
WARN "Entering failsafe mode:";
WARN " - preserving ALL snapshots for ALL subvolumes";
WARN " - ignoring ALL targets (skipping backup creation)";
2016-04-16 21:08:07 +02:00
WARN " - please read \"doc/upgrade_to_v0.23.0.md\"";
2016-04-14 13:01:28 +02:00
$config_override{FAILSAFE_PRESERVE} = "Failsafe mode active (deprecated configuration)";
}
$config_override{snapshot_preserve_min} = 'all';
return $config;
}
2016-03-15 11:21:59 +01:00
my $replace_key = $opt->{deprecated}->{$value}->{replace_key};
my $replace_value = $opt->{deprecated}->{$value}->{replace_value};
if(defined($replace_key)) {
$key = $replace_key;
$value = $replace_value;
WARN "Using \"$key $value\"";
}
2016-03-11 14:55:22 +01:00
}
2016-03-15 11:21:59 +01:00
TRACE "adding option \"$key=$value\" to $context context";
$value = undef if($value eq "no"); # we don't want to check for "no" all the time
$config->{$key} = $value;
return $config;
2016-03-11 14:55:22 +01:00
}
2016-03-15 11:21:59 +01:00
sub parse_config_line($$$$$)
2014-12-14 19:23:02 +01:00
{
2016-03-15 11:21:59 +01:00
my ($file, $root, $cur, $key, $value) = @_;
2016-03-09 19:52:45 +01:00
2016-03-15 11:21:59 +01:00
if($key eq "volume")
{
$cur = $root;
TRACE "config: context forced to: $cur->{CONTEXT}";
2016-03-14 12:24:32 +01:00
2016-03-15 11:21:59 +01:00
# be very strict about file options, for security sake
2016-04-25 14:23:15 +02:00
my ($url_prefix, $path) = check_url($value, $key, $file);
return undef unless(defined($path));
TRACE "config: adding volume \"$url_prefix$path\" to root context";
2016-03-15 11:21:59 +01:00
die unless($cur->{CONTEXT} eq "root");
2016-03-17 14:02:22 +01:00
my $volume = { CONTEXT => "volume",
PARENT => $cur,
SUBSECTION => [],
2016-04-25 14:23:15 +02:00
url => $url_prefix . $path,
2016-03-15 11:21:59 +01:00
};
push(@{$cur->{SUBSECTION}}, $volume);
$cur = $volume;
}
elsif($key eq "subvolume")
{
while($cur->{CONTEXT} ne "volume") {
if(($cur->{CONTEXT} eq "root") || (not $cur->{PARENT})) {
ERROR "Subvolume keyword outside volume context, in \"$file\" line $.";
return undef;
}
$cur = $cur->{PARENT} || die;
TRACE "config: context changed to: $cur->{CONTEXT}";
}
# be very strict about file options, for security sake
2016-04-25 14:23:15 +02:00
my $rel_path = check_file($value, { relative => 1, wildcards => 1 }, $key, $file);
return undef unless(defined($rel_path));
2016-03-14 15:55:57 +01:00
2016-04-25 14:23:15 +02:00
TRACE "config: adding subvolume \"$rel_path\" to volume context: $cur->{url}";
my $snapshot_name = $rel_path;
2016-03-15 11:21:59 +01:00
$snapshot_name =~ s/^.*\///; # snapshot_name defaults to subvolume name
die unless($cur->{CONTEXT} eq "volume");
2016-03-17 14:02:22 +01:00
my $subvolume = { CONTEXT => "subvolume",
PARENT => $cur,
# SUBSECTION => [], # handled by target propagation
2016-04-25 14:23:15 +02:00
rel_path => $rel_path,
url => $cur->{url} . '/' . $rel_path,
2016-03-15 11:21:59 +01:00
snapshot_name => $snapshot_name,
};
2016-03-16 13:25:19 +01:00
$subvolume->{GLOB_CONTEXT} = 1 if($value =~ /\*/);
2016-03-15 11:21:59 +01:00
push(@{$cur->{SUBSECTION}}, $subvolume);
$cur = $subvolume;
}
elsif($key eq "target")
{
if($cur->{CONTEXT} eq "target") {
$cur = $cur->{PARENT} || die;
TRACE "config: context changed to: $cur->{CONTEXT}";
2016-03-14 15:55:57 +01:00
}
2016-03-15 11:21:59 +01:00
if($value =~ /^(\S+)\s+(\S+)$/)
{
2016-04-25 14:23:15 +02:00
my ($target_type, $url) = ($1, $2);
2016-03-15 11:21:59 +01:00
unless(grep(/^\Q$target_type\E$/, @config_target_types)) {
ERROR "Unknown target type \"$target_type\" in \"$file\" line $.";
return undef;
}
# be very strict about file options, for security sake
2016-04-25 14:23:15 +02:00
my ($url_prefix, $path) = check_url($url, $key, $file);
return undef unless(defined($path));
2016-03-14 15:55:57 +01:00
2016-04-25 14:23:15 +02:00
TRACE "config: adding target \"$url_prefix$path\" (type=$target_type) to $cur->{CONTEXT} context" . ($cur->{url} ? ": $cur->{url}" : "");
2016-03-15 11:21:59 +01:00
my $target = { CONTEXT => "target",
PARENT => $cur,
target_type => $target_type,
2016-04-25 14:23:15 +02:00
url => $url_prefix . $path,
2016-03-15 11:21:59 +01:00
};
2016-03-17 14:02:22 +01:00
# NOTE: target sections are propagated to the apropriate SUBSECTION in _config_propagate_target()
$cur->{TARGET} //= [];
push(@{$cur->{TARGET}}, $target);
2016-03-15 11:21:59 +01:00
$cur = $target;
}
else
{
ERROR "Ambiguous target configuration, in \"$file\" line $.";
return undef;
2016-03-14 15:55:57 +01:00
}
2016-03-14 12:24:32 +01:00
}
2016-03-15 11:21:59 +01:00
else
{
return append_config_option($cur, $key, $value, $cur->{CONTEXT}, $file);
2016-03-10 05:26:43 +01:00
}
2015-03-13 11:44:04 +01:00
2016-03-15 11:21:59 +01:00
return $cur;
2016-03-14 16:39:13 +01:00
}
2016-03-17 14:02:22 +01:00
sub _config_propagate_target
{
my $cur = shift;
foreach my $subsection (@{$cur->{SUBSECTION}}) {
my @propagate_target;
foreach my $target (@{$cur->{TARGET}}) {
TRACE "propagating target \"$target->{url}\" from $cur->{CONTEXT} context to: $subsection->{CONTEXT} $subsection->{url}";
die if($target->{SUBSECTION});
# don't propagate if a target of same target_type and url already exists in subsection
if($subsection->{TARGET} &&
grep({ ($_->{url} eq $target->{url}) && ($_->{target_type} eq $target->{target_type}) } @{$subsection->{TARGET}}))
{
DEBUG "Skip propagation of \"target $target->{target_type} $target->{url}\" from $cur->{CONTEXT} context to \"$subsection->{CONTEXT} $subsection->{url}\": same target already exists";
next;
}
2016-04-24 15:59:17 +02:00
my %copy = ( %$target, PARENT => $subsection );
2016-03-17 14:02:22 +01:00
push @propagate_target, \%copy;
}
$subsection->{TARGET} //= [];
unshift @{$subsection->{TARGET}}, @propagate_target; # maintain config order: propagated targets go in front of already defined targets
if($subsection->{CONTEXT} eq "subvolume") {
# finally create missing SUBSECTION in subvolume context
die if($subsection->{SUBSECTION});
$subsection->{SUBSECTION} = $subsection->{TARGET};
}
else {
# recurse into SUBSECTION
_config_propagate_target($subsection);
}
}
delete $cur->{TARGET};
return $cur;
}
2016-03-15 16:54:54 +01:00
sub init_config(@)
{
2016-03-17 14:02:22 +01:00
my %config_root = ( CONTEXT => "root", SUBSECTION => [], @_ );
2016-03-15 16:54:54 +01:00
# set defaults
foreach (keys %config_options) {
next if $config_options{$_}->{deprecated}; # don't pollute hash with deprecated options
$config_root{$_} = $config_options{$_}->{default};
}
return \%config_root;
}
2016-03-15 11:21:59 +01:00
sub parse_config(@)
2016-03-14 16:39:13 +01:00
{
2016-03-15 11:21:59 +01:00
my @config_files = @_;
my $file = undef;
foreach(@config_files) {
TRACE "config: checking for file: $_";
if(-r "$_") {
$file = $_;
last;
}
2016-03-14 16:39:13 +01:00
}
2016-03-15 11:21:59 +01:00
unless($file) {
ERROR "Configuration file not found: " . join(', ', @config_files);
return undef;
2016-03-10 19:10:57 +01:00
}
2016-03-15 16:54:54 +01:00
my $root = init_config(SRC_FILE => $file);
2016-03-15 11:21:59 +01:00
my $cur = $root;
2016-03-10 19:10:57 +01:00
2016-03-15 11:21:59 +01:00
INFO "Using configuration: $file";
open(FILE, '<', $file) or die $!;
while (<FILE>) {
chomp;
next if /^\s*#/; # ignore comments
next if /^\s*$/; # ignore empty lines
TRACE "config: parsing line $. with context=$cur->{CONTEXT}: \"$_\"";
if(/^(\s*)([a-zA-Z_]+)\s+(.*)$/)
{
# NOTE: we do not perform checks on indentation!
my ($indent, $key, $value) = (length($1), lc($2), $3);
$value =~ s/\s*$//;
$cur = parse_config_line($file, $root, $cur, $key, $value);
unless(defined($cur)) {
# error, bail out
$root = undef;
last;
}
TRACE "line processed: new context=$cur->{CONTEXT}";
}
else
{
ERROR "Parse error in \"$file\" line $.";
$root = undef;
last;
}
2016-03-14 16:39:13 +01:00
}
2016-03-15 11:21:59 +01:00
close FILE || ERROR "Failed to close configuration file: $!";
2016-03-17 14:02:22 +01:00
_config_propagate_target($root);
2016-03-15 11:21:59 +01:00
return $root;
2015-04-21 14:53:31 +02:00
}
2016-03-08 15:25:35 +01:00
# sets $target->{CONFIG}->{ABORTED} on failure
2016-03-07 21:45:12 +01:00
# sets $target->{SUBVOL_RECEIVED}
2016-03-07 20:47:24 +01:00
sub macro_send_receive(@)
2015-03-31 19:07:33 +02:00
{
my %info = @_;
2016-03-01 21:49:59 +01:00
my $source = $info{source} || die;
2015-04-16 12:00:04 +02:00
my $target = $info{target} || die;
my $parent = $info{parent};
2016-03-07 20:47:24 +01:00
my $config_target = $target->{CONFIG};
die unless($config_target->{CONTEXT} eq "target");
2015-06-02 22:16:33 +02:00
my $target_type = $config_target->{target_type} || die;
2015-04-19 11:36:40 +02:00
my $incremental = config_key($config_target, "incremental");
2015-03-31 19:07:33 +02:00
2015-05-15 16:06:36 +02:00
# check for existing target subvolume
2016-03-01 21:49:59 +01:00
if(my $err_vol = vinfo_subvol($target, $source->{NAME})) {
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "Target subvolume \"$err_vol->{PRINT}\" already exists");
2016-01-14 15:52:33 +01:00
$config_target->{UNRECOVERABLE} = "Please delete stray subvolume (\"btrbk clean\"): $err_vol->{PRINT}";
2016-03-01 21:49:59 +01:00
ERROR $config_target->{ABORTED} . ", aborting send/receive of: $source->{PRINT}";
2015-05-15 16:06:36 +02:00
ERROR $config_target->{UNRECOVERABLE};
$info{ERROR} = 1;
return undef;
}
2015-03-31 19:07:33 +02:00
if($incremental)
{
# create backup from latest common
2015-04-16 12:00:04 +02:00
if($parent) {
2016-04-19 19:36:58 +02:00
INFO "Creating incremental backup...";
2015-03-31 19:07:33 +02:00
}
elsif($incremental ne "strict") {
2016-04-19 19:36:58 +02:00
INFO "No common parent subvolume present, creating full backup...";
2015-03-31 19:07:33 +02:00
}
else {
2016-04-19 19:36:58 +02:00
WARN "Backup to $target->{PRINT} failed: no common parent subvolume found for \"$source->{PRINT}\", and option \"incremental\" is set to \"strict\"";
2015-03-31 20:36:10 +02:00
$info{ERROR} = 1;
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "No common parent subvolume found, and option \"incremental\" is set to \"strict\"");
2015-03-31 19:07:33 +02:00
return undef;
}
}
else {
2016-04-19 19:36:58 +02:00
INFO "Creating full backup...";
2015-09-26 19:51:38 +02:00
$parent = undef;
2015-03-31 19:07:33 +02:00
delete $info{parent};
}
2015-06-02 22:16:33 +02:00
my $ret;
my $vol_received;
if($target_type eq "send-receive")
{
2016-03-23 11:58:23 +01:00
$ret = btrfs_send_receive($source, $target, $parent, \$vol_received, rate_limit => config_key($config_target, "rate_limit"));
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "Failed to send/receive subvolume") unless($ret);
2015-06-02 22:16:33 +02:00
}
elsif($target_type eq "raw")
{
unless($dryrun) {
2016-03-01 21:49:59 +01:00
# make sure we know the source uuid
2016-04-13 22:04:53 +02:00
if($source->{node}{uuid} =~ /^$fake_uuid_prefix/) {
2016-03-01 21:49:59 +01:00
DEBUG "Fetching uuid of new subvolume: $source->{PRINT}";
2016-03-14 15:55:57 +01:00
my $detail = btrfs_subvolume_show($source);
2015-06-02 22:16:33 +02:00
die unless($detail->{uuid});
2016-03-15 14:46:25 +01:00
$source->{node}{uuid} = $detail->{uuid};
2016-04-15 02:38:41 +02:00
$uuid_cache{$detail->{uuid}} = $source->{node};
2015-06-02 22:16:33 +02:00
}
2015-05-09 16:00:41 +02:00
}
2015-06-02 22:16:33 +02:00
my $encrypt = undef;
my $encrypt_type = config_key($config_target, "raw_target_encrypt");
if($encrypt_type) {
die unless($encrypt_type eq "gpg");
$encrypt = { type => $encrypt_type,
keyring => config_key($config_target, "gpg_keyring"),
recipient => config_key($config_target, "gpg_recipient"),
}
2015-05-09 16:00:41 +02:00
}
2016-03-01 21:49:59 +01:00
$ret = btrfs_send_to_file($source, $target, $parent, \$vol_received,
2016-05-11 20:15:46 +02:00
compress => config_compress_hash($config_target, "raw_target_compress"),
encrypt => $encrypt,
rate_limit => config_key($config_target, "rate_limit"),
2016-01-14 18:02:53 +01:00
);
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "Failed to send subvolume to raw file") unless($ret);
2015-06-02 22:16:33 +02:00
}
else
{
die "Illegal target type \"$target_type\"";
}
2016-04-12 17:50:12 +02:00
# inject fake vinfo
2016-04-22 20:25:30 +02:00
if($ret) {
vinfo_inject_child($target, $vol_received, {
# NOTE: this is not necessarily the correct parent_uuid (on
# receive, btrfs-progs picks the uuid of the first (lowest id)
# matching possible parent), whereas the target_parent is the
# first from get_receive_targets().
#
# NOTE: the parent_uuid of an injected receive target is not used
# anywhere in btrbk at the time of writing
parent_uuid => $parent ? $info{latest_common_target}->{node}{uuid} : '-',
received_uuid => $source->{node}{received_uuid} eq '-' ? $source->{node}{uuid} : $source->{node}{received_uuid},
readonly => 1,
TARGET_TYPE => $target_type,
FORCE_PRESERVE => 'preserve forced: created just now',
});
}
2016-04-12 17:50:12 +02:00
2015-06-02 22:16:33 +02:00
# add info to $config->{SUBVOL_RECEIVED}
$info{received_type} = $target_type || die;
$info{received_subvolume} = $vol_received || die;
2016-03-07 21:45:12 +01:00
$target->{SUBVOL_RECEIVED} //= [];
push(@{$target->{SUBVOL_RECEIVED}}, \%info);
2015-06-02 22:16:33 +02:00
unless($ret) {
$info{ERROR} = 1;
2015-03-31 20:36:10 +02:00
return undef;
2015-03-31 19:07:33 +02:00
}
2015-06-02 22:16:33 +02:00
return 1;
2015-03-31 19:07:33 +02:00
}
2016-03-08 15:25:35 +01:00
# sets $result_vinfo->{CONFIG}->{ABORTED} on failure
# sets $result_vinfo->{SUBVOL_DELETED}
2016-04-03 20:46:29 +02:00
sub macro_delete($$$$$;@)
2016-03-02 00:03:54 +01:00
{
my $root_subvol = shift || die;
2016-04-03 20:46:29 +02:00
my $subvol_dir = shift // die;
my $subvol_basename = shift // die;
2016-03-08 15:25:35 +01:00
my $result_vinfo = shift || die;
2016-03-02 00:03:54 +01:00
my $schedule_options = shift || die;
2016-03-08 15:25:35 +01:00
my %delete_options = @_;
2016-04-03 20:46:29 +02:00
$subvol_dir =~ s/\/+$//;
2016-03-02 00:03:54 +01:00
my @schedule;
2016-03-10 05:26:43 +01:00
foreach my $vol (@{vinfo_subvol_list($root_subvol)}) {
2016-04-19 13:06:31 +02:00
unless($vol->{node}{BTRBK_DATE} &&
2016-04-03 20:46:29 +02:00
($vol->{SUBVOL_DIR} eq $subvol_dir) &&
2016-04-19 13:06:31 +02:00
($vol->{node}{BTRBK_BASENAME} eq $subvol_basename)) {
2016-03-02 00:03:54 +01:00
TRACE "Target subvolume does not match btrbk filename scheme, skipping: $vol->{PRINT}";
next;
}
push(@schedule, { value => $vol,
2016-03-08 18:22:58 +01:00
# name => $vol->{PRINT}, # only for logging
2016-04-19 13:06:31 +02:00
btrbk_date => $vol->{node}{BTRBK_DATE},
2016-04-12 17:50:12 +02:00
preserve => $vol->{node}{FORCE_PRESERVE},
2016-03-02 00:03:54 +01:00
});
}
my (undef, $delete) = schedule(
2016-04-22 20:51:31 +02:00
%$schedule_options,
2016-03-08 18:22:58 +01:00
schedule => \@schedule,
2016-04-22 20:51:31 +02:00
preserve_date_in_future => 1,
2016-03-02 00:03:54 +01:00
);
2016-03-08 15:25:35 +01:00
my $ret = btrfs_subvolume_delete($delete, %delete_options);
2016-03-02 00:03:54 +01:00
if(defined($ret)) {
2016-04-03 20:46:29 +02:00
$subvol_dir .= '/' if($subvol_dir ne "");
INFO "Deleted $ret subvolumes in: $root_subvol->{PRINT}/$subvol_dir$subvol_basename.*";
2016-03-08 15:25:35 +01:00
$result_vinfo->{SUBVOL_DELETED} //= [];
push @{$result_vinfo->{SUBVOL_DELETED}}, @$delete;
2016-03-02 00:03:54 +01:00
return $delete;
}
else {
2016-03-08 15:25:35 +01:00
ABORTED($result_vinfo, "Failed to delete subvolume");
2016-03-02 00:03:54 +01:00
return undef;
}
}
2016-04-16 01:09:17 +02:00
sub macro_archive_target($$$;$)
2016-04-07 15:33:32 +02:00
{
my $sroot = shift || die;
my $droot = shift || die;
my $snapshot_name = shift // die;
my $schedule_options = shift // {};
my @schedule;
# NOTE: this is pretty much the same as "resume missing"
2016-04-16 16:05:57 +02:00
my @unexpected_location;
2016-04-19 13:42:18 +02:00
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list for get_receive_targets()
2016-04-07 15:33:32 +02:00
foreach my $svol (@{vinfo_subvol_list($sroot, sort => 'path')})
{
next unless($svol->{node}{readonly});
2016-04-19 13:06:31 +02:00
next unless($svol->{btrbk_direct_leaf} && ($svol->{node}{BTRBK_BASENAME} eq $snapshot_name));
2016-04-07 15:33:32 +02:00
2016-04-16 16:05:57 +02:00
my $warning_seen = [];
2016-04-19 13:42:18 +02:00
my @receive_targets = get_receive_targets($droot, $svol, exact_match => 1, warn => 1, seen => $warning_seen, droot_subvol_list => $droot_subvol_list );
2016-04-16 16:05:57 +02:00
push @unexpected_location, get_receive_targets_fsroot($droot, $svol, exclude => $warning_seen, warn => 1); # warn if unexpected on fs
2016-04-07 15:33:32 +02:00
2016-04-16 16:05:57 +02:00
next if(scalar(@receive_targets));
2016-04-15 22:33:19 +02:00
DEBUG "Adding archive candidate: $svol->{PRINT}";
2016-04-07 15:33:32 +02:00
push @schedule, { value => $svol,
2016-04-19 13:06:31 +02:00
btrbk_date => $svol->{node}{BTRBK_DATE},
2016-04-15 22:33:19 +02:00
preserve => $svol->{node}{FORCE_PRESERVE},
2016-04-07 15:33:32 +02:00
};
}
2016-04-16 01:32:25 +02:00
2016-04-19 18:57:22 +02:00
if(scalar(@unexpected_location)) {
ABORTED($droot, "Receive targets of archive candidates exist at unexpected location");
WARN "Skipping archiving of \"$sroot->{PRINT}/${snapshot_name}.*\": $abrt";
return undef;
}
2016-04-07 15:33:32 +02:00
2016-04-16 19:25:46 +02:00
# add all present archives as informative_only: these are needed for correct results of schedule()
2016-04-19 13:42:18 +02:00
foreach my $dvol (@$droot_subvol_list)
2016-04-15 22:33:19 +02:00
{
2016-04-19 13:06:31 +02:00
next unless($dvol->{btrbk_direct_leaf} && ($dvol->{node}{BTRBK_BASENAME} eq $snapshot_name));
2016-04-15 22:33:19 +02:00
next unless($dvol->{node}{readonly});
push @schedule, { informative_only => 1,
value => $dvol,
2016-04-19 13:06:31 +02:00
btrbk_date => $dvol->{node}{BTRBK_DATE},
2016-04-15 22:33:19 +02:00
};
}
2016-04-07 15:33:32 +02:00
my ($preserve, undef) = schedule(
schedule => \@schedule,
preserve => config_preserve_hash($droot, "archive"),
result_preserve_action_text => 'archive',
result_delete_action_text => '',
%$schedule_options
);
my @archive = grep defined, @$preserve; # remove entries with no value from list (archive subvolumes)
my $archive_total = scalar @archive;
my $archive_success = 0;
foreach my $svol (@archive)
{
my ($latest_common_src, $latest_common_target) = get_latest_common($sroot, $svol, $droot, "");
if(macro_send_receive(source => $svol,
target => $droot,
parent => $latest_common_src,
latest_common_target => $latest_common_target,
))
{
$archive_success++;
}
else {
ERROR("Error while cloning, aborting");
last;
}
}
if($archive_total) {
INFO "Archived $archive_success/$archive_total subvolumes";
} else {
2016-04-19 19:36:58 +02:00
INFO "No missing archives found";
2016-04-07 15:33:32 +02:00
}
return $archive_success;
}
2016-04-05 22:01:17 +02:00
sub cmp_date($$)
{
2016-04-20 22:45:11 +02:00
return (($_[0]->[0] <=> $_[1]->[0]) || # unix time
($_[0]->[1] <=> $_[1]->[1])); # NN
2016-04-05 22:01:17 +02:00
}
2015-04-02 15:53:53 +02:00
sub schedule(@)
2015-01-04 21:26:48 +01:00
{
my %args = @_;
2016-04-12 11:47:28 +02:00
my $schedule = $args{schedule} || die;
my $preserve = $args{preserve} || die;
2016-04-22 20:51:31 +02:00
my $preserve_date_in_future = $args{preserve_date_in_future};
2016-04-12 11:47:28 +02:00
my $results_list = $args{results};
my $result_hints = $args{result_hints} // {};
2016-04-14 15:39:50 +02:00
my $result_preserve_action_text = $args{result_preserve_action_text};
my $result_delete_action_text = $args{result_delete_action_text} // 'delete';
2016-04-12 11:47:28 +02:00
my $preserve_day_of_week = $preserve->{dow} || die;
2016-04-12 19:55:29 +02:00
my $preserve_min_n = $preserve->{min_n};
my $preserve_min_q = $preserve->{min_q};
2016-04-12 11:47:28 +02:00
my $preserve_hourly = $preserve->{h};
my $preserve_daily = $preserve->{d};
my $preserve_weekly = $preserve->{w};
my $preserve_monthly = $preserve->{m};
my $preserve_yearly = $preserve->{y};
DEBUG "Schedule: " . format_preserve_matrix($preserve, format => "debug_text");
2015-01-13 12:38:01 +01:00
2016-04-20 22:45:11 +02:00
# 0 1 2 3 4 5 6 7 8
# sec, min, hour, mday, mon, year, wday, yday, isdst
2015-04-02 16:24:13 +02:00
# sort the schedule, ascending by date
2016-04-15 22:33:19 +02:00
# regular entries come in front of informative_only
my @sorted_schedule = sort { cmp_date($a->{btrbk_date}, $b->{btrbk_date} ) ||
(($a->{informative_only} ? ($b->{informative_only} ? 0 : 1) : ($b->{informative_only} ? -1 : 0)))
} @$schedule;
2015-04-02 16:24:13 +02:00
2016-04-21 13:27:54 +02:00
DEBUG "Scheduler reference time: " . timestamp(\@tm_now, 'debug-iso');
2015-01-25 18:05:52 +01:00
# first, do our calendar calculations
2016-04-20 22:45:11 +02:00
# - weeks start on $preserve_day_of_week
2016-04-21 13:27:54 +02:00
# - leap hours are NOT taken into account for $delta_hours
my $now_h = timegm_nocheck( 0, 0, $tm_now[2], $tm_now[3], $tm_now[4], $tm_now[5] ); # use timelocal() here (and below) if you want to honor leap hours
my $now_d = timegm_nocheck( 0, 0, 0, $tm_now[3], $tm_now[4], $tm_now[5] );
2016-04-20 22:45:11 +02:00
2015-04-02 16:24:13 +02:00
foreach my $href (@sorted_schedule)
2015-01-13 12:38:01 +01:00
{
2016-04-21 13:27:54 +02:00
my $time = $href->{btrbk_date}->[0];
my @tm = localtime($time);
my $delta_days_from_eow = $tm[6] - $day_of_week_map{$preserve_day_of_week};
2016-04-20 22:45:11 +02:00
$delta_days_from_eow += 7 if($delta_days_from_eow < 0);
2016-04-21 13:27:54 +02:00
# check timegm: ignores leap hours
my $delta_days = int(($now_d - timegm_nocheck( 0, 0, 0, $tm[3], $tm[4], $tm[5] ) ) / (60 * 60 * 24));
my $delta_hours = int(($now_h - timegm_nocheck( 0, 0, $tm[2], $tm[3], $tm[4], $tm[5] ) ) / (60 * 60));
2016-04-20 22:45:11 +02:00
my $delta_weeks = int(($delta_days + $delta_days_from_eow) / 7); # weeks from beginning of week
2016-04-21 13:27:54 +02:00
my $delta_years = ($tm_now[5] - $tm[5]);
my $delta_months = $delta_years * 12 + ($tm_now[4] - $tm[4]);
2016-04-20 22:45:11 +02:00
$href->{delta_hours} = $delta_hours;
$href->{delta_days} = $delta_days;
$href->{delta_weeks} = $delta_weeks;
$href->{delta_months} = $delta_months;
$href->{delta_years} = $delta_years;
# only for text output
2016-04-21 13:27:54 +02:00
my $year = $tm[5] + 1900;
my $year_month = "${year}-" . ($tm[4] < 9 ? '0' : "") . ($tm[4] + 1);
2016-04-20 22:45:11 +02:00
$href->{year_month} = $year_month;
$href->{year} = $year;
$href->{err_days} = ($delta_days_from_eow ? "+$delta_days_from_eow days after " : "on ") . "$preserve_day_of_week";
2016-04-22 20:51:31 +02:00
if($preserve_date_in_future && ($href->{delta_hours} < 0)) {
$href->{preserve} = "preserve forced: " . -($href->{delta_hours}) . " hours in the future";
}
2015-01-04 21:26:48 +01:00
}
2016-04-11 19:54:56 +02:00
my %first_in_delta_hours;
my %first_in_delta_days;
2015-01-25 18:05:52 +01:00
my %first_in_delta_weeks;
2016-04-11 19:54:56 +02:00
my %first_weekly_in_delta_months;
my %first_monthly_in_delta_years;
2016-04-12 11:47:28 +02:00
# filter "preserve all within N days/weeks/..."
2015-04-02 16:24:13 +02:00
foreach my $href (@sorted_schedule) {
2016-04-12 20:35:57 +02:00
if($preserve_min_q) {
if($preserve_min_q eq 'all') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: all";
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'h') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_hours} hours ago" if($href->{delta_hours} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'd') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_days} days ago" if($href->{delta_days} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'w') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_weeks} weeks ago" if($href->{delta_weeks} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'm') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_months} months ago" if($href->{delta_months} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'y') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_years} years ago" if($href->{delta_years} <= $preserve_min_n);
2016-04-12 11:47:28 +02:00
}
}
2016-04-11 19:54:56 +02:00
$first_in_delta_hours{$href->{delta_hours}} //= $href;
2016-04-12 11:47:28 +02:00
}
2016-04-12 20:35:57 +02:00
if($preserve_min_q && ($preserve_min_q eq 'latest') && (scalar @sorted_schedule)) {
my $href = $sorted_schedule[-1];
2016-04-12 21:06:46 +02:00
$href->{preserve} = 'preserve min: latest';
2016-04-12 20:35:57 +02:00
}
2016-04-12 11:47:28 +02:00
# filter hourly, daily, weekly, monthly, yearly
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_in_delta_hours) {
my $href = $first_in_delta_hours{$_} || die;
2016-04-12 21:06:46 +02:00
if($preserve_hourly && (($preserve_hourly eq 'all') || ($href->{delta_hours} <= $preserve_hourly))) {
$href->{preserve} = "preserve hourly: first of hour, $href->{delta_hours} hours ago";
2016-04-12 11:47:28 +02:00
}
2016-04-11 19:54:56 +02:00
$first_in_delta_days{$href->{delta_days}} //= $href;
2016-04-12 11:47:28 +02:00
}
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_in_delta_days) {
my $href = $first_in_delta_days{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_daily && (($preserve_daily eq 'all') || ($href->{delta_days} <= $preserve_daily))) {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve daily: first of day, $href->{delta_days} days ago";
2015-01-25 18:05:52 +01:00
}
$first_in_delta_weeks{$href->{delta_weeks}} //= $href;
}
2016-04-12 11:47:28 +02:00
2015-12-17 19:00:45 +01:00
foreach (sort {$b <=> $a} keys %first_in_delta_weeks) {
2015-01-20 16:53:35 +01:00
my $href = $first_in_delta_weeks{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_weekly && (($preserve_weekly eq 'all') || ($href->{delta_weeks} <= $preserve_weekly))) {
2016-04-20 22:45:11 +02:00
$href->{preserve} = "preserve weekly: $href->{delta_weeks} weeks ago, $href->{err_days}";
2015-01-04 21:26:48 +01:00
}
2016-04-11 19:54:56 +02:00
$first_weekly_in_delta_months{$href->{delta_months}} //= $href;
2015-01-04 21:26:48 +01:00
}
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_weekly_in_delta_months) {
my $href = $first_weekly_in_delta_months{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_monthly && (($preserve_monthly eq 'all') || ($href->{delta_months} <= $preserve_monthly))) {
2016-04-20 22:45:11 +02:00
$href->{preserve} = "preserve monthly: first weekly of month $href->{year_month} ($href->{delta_months} months ago, $href->{err_days})";
2015-01-13 12:38:01 +01:00
}
2016-04-11 19:54:56 +02:00
$first_monthly_in_delta_years{$href->{delta_years}} //= $href;
2016-02-29 18:00:55 +01:00
}
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_monthly_in_delta_years) {
my $href = $first_monthly_in_delta_years{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_yearly && (($preserve_yearly eq 'all') || ($href->{delta_years} <= $preserve_yearly))) {
2016-04-20 22:45:11 +02:00
$href->{preserve} = "preserve yearly: first weekly of year $href->{year} ($href->{delta_years} years ago, $href->{err_days})";
2016-02-29 18:00:55 +01:00
}
2015-01-04 21:26:48 +01:00
}
2015-01-25 18:05:52 +01:00
# assemble results
2015-01-13 12:38:01 +01:00
my @delete;
2015-04-02 15:53:53 +02:00
my @preserve;
2016-04-12 11:47:28 +02:00
my %result_base = ( %$preserve,
scheme => format_preserve_matrix($preserve),
2015-10-12 20:46:05 +02:00
%$result_hints,
);
2016-03-08 18:22:58 +01:00
my $count_defined = 0;
2015-04-02 16:24:13 +02:00
foreach my $href (@sorted_schedule)
2015-01-13 12:38:01 +01:00
{
2016-04-15 22:33:19 +02:00
$count_defined++ unless($href->{informative_only});
2015-01-13 12:38:01 +01:00
if($href->{preserve}) {
2016-04-15 22:33:19 +02:00
push(@preserve, $href->{value}) unless($href->{informative_only});
2015-10-12 20:46:05 +02:00
push @$results_list, { %result_base,
2016-04-19 18:53:44 +02:00
action => $href->{informative_only} ? undef : $result_preserve_action_text,
2015-10-12 20:46:05 +02:00
reason => $href->{preserve},
value => $href->{value},
} if($results_list);
2016-04-16 19:25:46 +02:00
TRACE "schedule: $href->{value}->{PRINT}: " . ($href->{informative_only} ? '(informative_only)' : '') . " $href->{preserve}" if($href->{value} && $href->{value}->{PRINT});
2015-01-13 12:38:01 +01:00
}
else {
2016-04-15 22:33:19 +02:00
push(@delete, $href->{value}) unless($href->{informative_only});
2015-10-12 20:46:05 +02:00
push @$results_list, { %result_base,
2016-04-19 18:53:44 +02:00
action => $href->{informative_only} ? undef : $result_delete_action_text,
2015-10-12 20:46:05 +02:00
value => $href->{value},
2016-04-12 21:06:46 +02:00
} if($results_list);
2016-04-16 19:25:46 +02:00
TRACE "schedule: $href->{value}->{PRINT}: delete ($result_delete_action_text)" if($href->{value} && $href->{value}->{PRINT});
2015-01-13 12:38:01 +01:00
}
}
2016-03-08 18:22:58 +01:00
DEBUG "Preserving " . @preserve . "/" . $count_defined . " items";
2015-04-02 15:53:53 +02:00
return (\@preserve, \@delete);
2015-01-04 21:26:48 +01:00
}
2016-04-12 11:47:28 +02:00
sub format_preserve_matrix($@)
2015-10-11 19:01:59 +02:00
{
2016-04-12 11:47:28 +02:00
my $preserve = shift || die;
my %opts = @_;
my $format = $opts{format} // "short";
2016-03-08 18:22:58 +01:00
if($format eq "debug_text") {
2016-04-12 11:47:28 +02:00
my @out;
my %trans = ( h => 'hours', d => 'days', w => 'weeks', m => 'months', y => 'years' );
2016-04-12 20:35:57 +02:00
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
2016-04-12 21:06:46 +02:00
push @out, "all forever";
2016-04-12 11:47:28 +02:00
}
else {
2016-04-12 21:06:46 +02:00
push @out, "latest" if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
push @out, "all within $preserve->{min_n} $trans{$preserve->{min_q}}" if($preserve->{min_n} && $preserve->{min_q});
push @out, "first of day for $preserve->{d} days" if($preserve->{d});
2016-04-12 11:47:28 +02:00
unless($preserve->{d} && ($preserve->{d} eq 'all')) {
push @out, "first daily in week (starting on $preserve->{dow}) for $preserve->{w} weeks" if($preserve->{w});
unless($preserve->{w} && ($preserve->{w} eq 'all')) {
2016-04-11 19:54:56 +02:00
push @out, "first weekly of month for $preserve->{m} months" if($preserve->{m});
2016-04-12 11:47:28 +02:00
unless($preserve->{m} && ($preserve->{m} eq 'all')) {
2016-04-11 19:54:56 +02:00
push @out, "first weekly of year for $preserve->{y} years" if($preserve->{y});
2016-04-12 11:47:28 +02:00
}
2016-03-08 18:22:58 +01:00
}
}
}
2016-04-12 21:06:46 +02:00
return 'preserving ' . join('; ', @out);
2016-03-08 18:22:58 +01:00
}
2016-04-12 11:47:28 +02:00
my $s = "";
2016-04-12 20:35:57 +02:00
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
2016-04-12 11:47:28 +02:00
$s = '*d+';
2015-10-11 19:01:59 +02:00
}
2016-04-12 11:47:28 +02:00
else {
2016-04-12 21:06:46 +02:00
# $s .= '.+' if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
2016-04-12 19:55:29 +02:00
$s .= $preserve->{min_n} . $preserve->{min_q} . '+' if($preserve->{min_n} && $preserve->{min_q});
2016-04-12 11:47:28 +02:00
foreach (qw(h d w m y)) {
my $val = $preserve->{$_} // 0;
next unless($val);
$val = '*' if($val eq 'all');
$s .= ($s ? ' ' : '') . $val . $_;
}
$s .= " ($preserve->{dow})" if($preserve->{dow} && ($preserve->{w} || $preserve->{m} || $preserve->{y}));
}
return $s;
2015-10-11 19:01:59 +02:00
}
2016-04-21 13:27:54 +02:00
sub timestamp($$;$)
{
my $time = shift // die; # unixtime, or arrayref from localtime()
my $format = shift;
my $tm_is_utc = shift;
my @tm = ref($time) ? @$time : localtime($time);
my $ts;
# NOTE: can't use POSIX::strftime(), as "%z" always prints offset of local timezone!
if($format eq "short") {
return sprintf('%04u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3]);
}
elsif($format eq "long") {
return sprintf('%04u%02u%02uT%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1]);
}
elsif($format eq "long-iso") {
$ts = sprintf('%04u%02u%02uT%02u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
}
elsif($format eq "debug-iso") {
$ts = sprintf('%04u-%02u-%02uT%02u:%02u:%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
}
else { die; }
if($tm_is_utc) {
$ts .= '+0000'; # or 'Z'
} else {
my $offset = timegm(@tm) - timelocal(@tm);
if($offset < 0) { $ts .= '-'; $offset = -$offset; } else { $ts .= '+'; }
2016-04-23 14:37:24 +02:00
my $h = int($offset / (60 * 60));
die if($h > 24); # sanity check, something went really wrong
$ts .= sprintf('%02u%02u', $h, int($offset / 60) % 60);
2016-04-21 13:27:54 +02:00
}
return $ts;
return undef;
}
2015-09-20 14:25:20 +02:00
sub print_header(@)
{
2015-05-26 20:05:40 +02:00
my %args = @_;
my $config = $args{config};
print "--------------------------------------------------------------------------------\n";
2016-04-15 01:22:19 +02:00
print "$args{title} ($VERSION_INFO)\n\n";
2015-05-26 20:05:40 +02:00
if($args{time}) {
print " Date: " . localtime($args{time}) . "\n";
}
if($config) {
print " Config: $config->{SRC_FILE}\n";
2015-10-10 15:13:32 +02:00
}
if($dryrun) {
print " Dryrun: YES\n";
}
if($config && $config->{CMDLINE_FILTER_LIST}) {
my @list = sort @{$config->{CMDLINE_FILTER_LIST}};
my @sorted = ( grep(/^group/, @list),
grep(/^volume/, @list),
grep(/^subvolume/, @list),
grep(/^target/, @list) );
die unless(scalar(@list) == scalar(@sorted));
print " Filter: ";
print join("\n ", @sorted);
print "\n";
2015-05-26 20:05:40 +02:00
}
if($args{info}) {
print "\n" . join("\n", grep(defined, @{$args{info}})) . "\n";
}
if($args{legend}) {
print "\nLegend:\n ";
print join("\n ", @{$args{legend}});
print "\n";
}
print "--------------------------------------------------------------------------------\n";
}
2016-01-15 02:06:03 +01:00
sub print_table($;$)
{
my $data = shift;
my $spacing = shift // " ";
my $maxlen = 0;
foreach (@$data) {
$maxlen = length($_->[0]) if($maxlen < length($_->[0]));
}
foreach (@$data) {
print $_->[0] . ((' ' x ($maxlen - length($_->[0]))) . $spacing) . $_->[1] . "\n";
}
}
2015-10-11 19:01:59 +02:00
sub print_formatted(@)
2015-10-11 01:44:13 +02:00
{
2015-10-13 01:10:06 +02:00
my $format_key = shift || die;
my $data = shift || die;
my $default_format = "table";
2015-10-11 19:01:59 +02:00
my %args = @_;
2015-10-12 20:46:05 +02:00
my $title = $args{title};
2015-10-13 01:10:06 +02:00
my $format = $args{output_format} || $output_format || $default_format;
my $keys = $table_formats{$format_key}->{$format};
2015-10-19 22:10:08 +02:00
my $ralign = $table_formats{$format_key}->{RALIGN} // {};
2015-10-20 20:16:34 +02:00
my $fh = $args{outfile} // *STDOUT;
2015-10-19 22:10:08 +02:00
my $table_spacing = 2;
2015-10-11 01:44:13 +02:00
2015-10-11 15:38:43 +02:00
unless($keys) {
2015-10-13 01:10:06 +02:00
WARN "Unsupported output format \"$format\", defaulting to \"$default_format\" format.";
$keys = $table_formats{$format_key}->{$default_format} || die;
$format = $default_format;
2015-10-11 15:38:43 +02:00
}
2015-10-20 20:16:34 +02:00
print $fh "$title\n" if($title);
2015-10-11 19:01:59 +02:00
if($format eq "raw")
{
# output: key0="value0" key1="value1" ...
foreach my $row (@$data) {
2015-10-20 20:16:34 +02:00
print $fh "format=\"$format_key\" ";
print $fh join(' ', map { "$_=\"" . ($row->{$_} // "") . "\""; } @$keys) . "\n";
2015-10-11 19:01:59 +02:00
}
}
2016-04-25 19:40:11 +02:00
elsif(($format eq "tlog") || ($format eq "syslog"))
2015-10-13 18:24:30 +02:00
{
# output: value0 value1, ...
2015-10-20 20:16:34 +02:00
unless($args{no_header}) {
print $fh join(' ', @$keys) . "\n";
}
foreach my $row (@$data) {
2016-04-25 19:40:11 +02:00
my $line = join(' ', map { ((defined($row->{$_}) && ($_ eq "message")) ? '# ' : '') . ($row->{$_} // "-") } @$keys);
if($format eq "syslog") { # dirty hack, ignore outfile on syslog format
syslog($line);
} else {
print $fh ($line . "\n");
}
2015-10-13 18:24:30 +02:00
}
}
2015-10-11 19:01:59 +02:00
else
2015-10-11 01:44:13 +02:00
{
2015-10-11 15:38:43 +02:00
# sanitize and calculate maxlen for each column
# NOTE: this is destructive on data!
2015-10-11 01:44:13 +02:00
my %maxlen;
2015-10-11 15:38:43 +02:00
my @sane_data;
foreach my $key (@$keys) {
$maxlen{$key} = length($key); # initialize with size of key
}
2015-10-11 01:44:13 +02:00
foreach my $row (@$data) {
2015-10-11 15:38:43 +02:00
foreach my $key (@$keys) {
my $val = $row->{$key};
if(ref $val eq "ARRAY") {
$val = join(',', @{$val});
}
$val //= "-";
$val = "-" if($val eq "");
$row->{$key} = $val; # write back the sanitized value
$maxlen{$key} = length($val) if($maxlen{$key} < length($val));
2015-10-11 01:44:13 +02:00
}
}
2015-10-11 15:38:43 +02:00
# print keys (headings)
2016-04-15 21:42:38 +02:00
unless($args{no_header}) {
my $fill = 0;
foreach (@$keys) {
2015-10-20 20:16:34 +02:00
print $fh ' ' x $fill;
2016-04-15 21:42:38 +02:00
$fill = $maxlen{$_} - length($_);
if($ralign->{$_}) {
print $fh ' ' x $fill;
$fill = 0;
}
print $fh $_;
$fill += $table_spacing;
2015-10-19 22:10:08 +02:00
}
2016-04-15 21:42:38 +02:00
print $fh "\n";
print $fh join(' ' x $table_spacing, map { '-' x ($maxlen{$_}) } @$keys) . "\n";
2015-10-13 01:39:58 +02:00
}
2015-10-11 01:44:13 +02:00
# print values
foreach my $row (@$data) {
2015-10-19 22:10:08 +02:00
my $fill = 0;
2015-10-11 01:44:13 +02:00
foreach (@$keys) {
2015-10-11 15:38:43 +02:00
my $val = $row->{$_};
2015-10-20 20:16:34 +02:00
print $fh ' ' x $fill;
2015-10-19 22:10:08 +02:00
$fill = $maxlen{$_} - length($val);
if($ralign->{$_}) {
2015-10-20 20:16:34 +02:00
print $fh ' ' x $fill;
2015-10-19 22:10:08 +02:00
$fill = 0;
}
2015-10-20 20:16:34 +02:00
print $fh $val;
2015-10-19 22:10:08 +02:00
$fill += $table_spacing;
2015-10-11 01:44:13 +02:00
}
2015-10-20 20:16:34 +02:00
print $fh "\n";
2015-10-11 01:44:13 +02:00
}
}
}
2016-03-15 11:21:59 +01:00
sub _origin_tree
2015-10-14 16:51:39 +02:00
{
2016-03-15 11:21:59 +01:00
my $prefix = shift;
2016-04-15 22:00:10 +02:00
my $node = shift // die;
2016-03-15 11:21:59 +01:00
my $lines = shift;
2016-04-15 22:00:10 +02:00
my $nodelist = shift;
my $depth = shift // 0;
my $seen = shift // [];
my $norecurse = shift;
my $uuid = $node->{uuid} || die;
# cache a bit, this might be large
$nodelist //= [ (sort { $a->{REL_PATH} cmp $b->{REL_PATH} } values %uuid_cache) ];
2015-10-14 16:51:39 +02:00
2016-03-15 11:21:59 +01:00
my @url = get_cached_url_by_uuid($uuid);
2016-04-15 22:00:10 +02:00
my $out_path;
2016-03-15 11:21:59 +01:00
if(scalar @url) {
2016-04-15 22:00:10 +02:00
$out_path = join(" === ", sort map { vinfo($_)->{PRINT} } @url);
2016-03-15 11:21:59 +01:00
} else {
2016-04-15 22:00:10 +02:00
$out_path = _fs_path($node);
2016-03-15 11:21:59 +01:00
}
2016-04-15 22:00:10 +02:00
my $prefix_spaces = ' ' x (($depth * 4) - ($prefix ? 4 : 0));
push(@$lines, { tree => "${prefix_spaces}${prefix}$out_path",
uuid => $node->{uuid},
parent_uuid => $node->{parent_uuid},
received_uuid => $node->{received_uuid},
});
# handle deep recursion
return 0 if(grep /^$uuid$/, @$seen);
2015-10-14 16:51:39 +02:00
2016-03-15 11:21:59 +01:00
if($node->{parent_uuid} ne '-') {
2016-04-15 22:00:10 +02:00
my $parent_node = $uuid_cache{$node->{parent_uuid}};
if($parent_node) {
if($norecurse) {
push(@$lines,{ tree => "${prefix_spaces} ^-- ...",
uuid => $parent_node->{uuid},
parent_uuid => $parent_node->{parent_uuid},
received_uuid => $parent_node->{received_uuid},
recursion => 'stop_recursion',
});
return 0;
}
if($parent_node->{readonly}) {
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1, undef, 1); # end recursion
}
else {
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1);
}
}
else {
push(@$lines,{ tree => "${prefix_spaces} ^-- <unknown>" });
}
2016-03-07 19:20:15 +01:00
}
2016-04-15 22:00:10 +02:00
return 0 if($norecurse);
push(@$seen, $uuid);
if($node->{received_uuid} ne '-') {
my $received_uuid = $node->{received_uuid};
my @receive_parents; # there should be only one!
my @receive_twins;
foreach (@$nodelist) {
next if($_->{uuid} eq $uuid);
if($received_uuid eq $_->{uuid} && $_->{readonly}) {
_origin_tree("", $_, \@receive_parents, $nodelist, $depth, $seen);
}
elsif(($_->{received_uuid} ne '-') && ($received_uuid eq $_->{received_uuid}) && $_->{readonly}) {
_origin_tree("", $_, \@receive_twins, $nodelist, $depth, $seen, 1); # end recursion
}
}
push @$lines, @receive_twins;
push @$lines, @receive_parents;
}
return 0;
2016-03-15 11:21:59 +01:00
}
2016-03-07 19:20:15 +01:00
2016-03-15 11:21:59 +01:00
sub exit_status
{
my $config = shift;
foreach my $subsection (@{$config->{SUBSECTION}}) {
return 10 if($subsection->{ABORTED} && ($subsection->{ABORTED} ne "USER_SKIP"));
return 10 if(exit_status($subsection));
}
return 0;
2016-03-07 17:35:17 +01:00
}
2014-12-11 18:03:10 +01:00
MAIN:
{
2015-05-18 21:18:57 +02:00
# set PATH instead of using absolute "/sbin/btrfs" (for now), as
# different distros (and even different versions of btrfs-progs)
# install the "btrfs" executable to different locations.
$ENV{PATH} = '/sbin:/bin:/usr/sbin:/usr/bin';
2015-08-15 17:51:00 +02:00
Getopt::Long::Configure qw(gnu_getopt);
2015-01-17 14:55:46 +01:00
my $start_time = time;
2016-04-21 13:27:54 +02:00
@tm_now = localtime($start_time);
2014-12-11 18:03:10 +01:00
2016-04-18 15:22:14 +02:00
my %config_override_cmdline;
2016-06-07 16:17:02 +02:00
my ($config_cmdline, $quiet, $verbose, $preserve_backups, $resume_only, $print_schedule, $lockfile_cmdline);
2015-08-15 17:51:00 +02:00
unless(GetOptions(
2016-04-09 14:16:14 +02:00
'help|h' => sub { VERSION_MESSAGE(); HELP_MESSAGE(0); exit 0; },
'version' => sub { VERSION_MESSAGE(); exit 0; },
'config|c=s' => \$config_cmdline,
'dry-run|n' => \$dryrun,
'preserve|p' => \$preserve_backups,
'resume-only|r' => \$resume_only,
'quiet|q' => \$quiet,
'verbose|v' => sub { $loglevel = 2; },
'loglevel|l=s' => \$loglevel,
'progress' => \$show_progress,
'table|t' => sub { $output_format = "table" },
'format=s' => \$output_format,
'print-schedule' => \$print_schedule,
2016-06-07 16:17:02 +02:00
'lockfile=s' => \$lockfile_cmdline,
2016-04-18 15:22:14 +02:00
'override=s' => \%config_override_cmdline, # e.g. --override=incremental=no
2015-08-15 17:51:00 +02:00
))
{
2015-01-10 16:02:35 +01:00
VERSION_MESSAGE();
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-01-10 16:02:35 +01:00
}
2014-12-13 15:15:58 +01:00
my $command = shift @ARGV;
2015-08-15 17:51:00 +02:00
unless($command) {
VERSION_MESSAGE();
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-08-15 17:51:00 +02:00
}
2014-12-12 12:32:04 +01:00
# assign command line options
2016-04-18 16:40:49 +02:00
@config_src = ( $config_cmdline ) if($config_cmdline);
2014-12-13 19:34:03 +01:00
if (lc($loglevel) eq "warn") { $loglevel = 1; }
elsif(lc($loglevel) eq "info") { $loglevel = 2; }
elsif(lc($loglevel) eq "debug") { $loglevel = 3; }
elsif(lc($loglevel) eq "trace") { $loglevel = 4; }
2015-08-15 17:51:00 +02:00
elsif($loglevel =~ /^[0-9]+$/) { ; }
else { $loglevel = 1; }
2016-04-28 13:03:15 +02:00
require_data_dumper() if(($loglevel >= 4) || ($VERSION =~ /-dev$/));
2014-12-11 18:03:10 +01:00
2014-12-12 12:32:04 +01:00
# check command line options
2016-05-03 14:34:04 +02:00
if($show_progress && (not check_exe('pv'))) {
WARN 'Found option "--progress", but required executable "pv" does not exist on your system. Please install "pv".';
2015-08-15 18:23:48 +02:00
$show_progress = 0;
}
2016-04-16 01:09:17 +02:00
my ($action_run, $action_usage, $action_resolve, $action_diff, $action_origin, $action_config_print, $action_list, $action_clean, $action_archive);
2015-09-02 11:04:22 +02:00
my @filter_args;
2015-10-22 17:45:27 +02:00
my $args_allow_group = 1;
my $args_expected_min = 0;
my $args_expected_max = 9999;
2015-02-08 13:47:31 +01:00
if(($command eq "run") || ($command eq "dryrun")) {
$action_run = 1;
2014-12-13 15:15:58 +01:00
$dryrun = 1 if($command eq "dryrun");
2015-09-02 11:04:22 +02:00
$args_allow_group = 1;
@filter_args = @ARGV;
2014-12-13 15:15:58 +01:00
}
2016-01-14 15:52:33 +01:00
elsif ($command eq "clean") {
$action_clean = 1;
@filter_args = @ARGV;
}
2016-04-16 01:09:17 +02:00
elsif ($command eq "archive") {
$action_archive = 1;
2016-04-07 15:33:32 +02:00
$args_expected_min = $args_expected_max = 2;
$args_allow_group = 0;
@filter_args = @ARGV;
}
2015-10-19 22:10:08 +02:00
elsif ($command eq "usage") {
$action_usage = 1;
2015-09-02 11:04:22 +02:00
@filter_args = @ARGV;
2015-01-20 19:18:38 +01:00
}
2015-01-04 19:30:41 +01:00
elsif ($command eq "diff") {
2015-01-03 21:25:46 +01:00
$action_diff = 1;
2015-03-01 14:28:26 +01:00
$args_expected_min = $args_expected_max = 2;
2015-10-22 17:45:27 +02:00
$args_allow_group = 0;
2015-09-02 11:04:22 +02:00
@filter_args = @ARGV;
2014-12-14 21:29:22 +01:00
}
2015-01-26 17:31:18 +01:00
elsif ($command eq "origin") {
$action_origin = 1;
2015-03-01 14:28:26 +01:00
$args_expected_min = $args_expected_max = 1;
2015-10-22 17:45:27 +02:00
$args_allow_group = 0;
2015-09-02 11:04:22 +02:00
@filter_args = @ARGV;
2015-01-26 17:31:18 +01:00
}
2015-10-11 02:02:45 +02:00
elsif($command eq "list") {
2015-10-22 17:45:27 +02:00
my $subcommand = shift @ARGV // "";
if(($subcommand eq "config") ||
($subcommand eq "volume") ||
($subcommand eq "source") ||
($subcommand eq "target"))
{
$action_list = $subcommand;
}
elsif(($subcommand eq "snapshots") ||
($subcommand eq "backups") ||
($subcommand eq "latest"))
{
$action_resolve = $subcommand;
}
else {
$action_list = "config";
unshift @ARGV, $subcommand if($subcommand ne "");
2015-10-12 14:59:02 +02:00
}
2015-10-11 02:02:45 +02:00
@filter_args = @ARGV;
}
2016-01-15 02:06:03 +01:00
elsif($command eq "stats") {
$action_resolve = "stats";
@filter_args = @ARGV;
}
2015-09-24 13:51:15 +02:00
elsif ($command eq "config") {
2015-10-10 21:26:59 +02:00
my $subcommand = shift @ARGV // "";
2015-10-22 17:45:27 +02:00
@filter_args = @ARGV;
2015-10-10 21:26:59 +02:00
if(($subcommand eq "print") || ($subcommand eq "print-all")) {
$action_config_print = $subcommand;
2015-10-22 17:45:27 +02:00
}
elsif($subcommand eq "list") {
$action_list = "config";
2015-10-10 21:26:59 +02:00
}
else {
ERROR "Unknown subcommand for \"config\" command: $subcommand";
2015-09-24 13:51:15 +02:00
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-09-24 13:51:15 +02:00
}
}
2014-12-13 15:15:58 +01:00
else {
ERROR "Unrecognized command: $command";
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2014-12-13 13:52:43 +01:00
}
2015-03-01 14:28:26 +01:00
if(($args_expected_min > scalar(@ARGV)) || ($args_expected_max < scalar(@ARGV))) {
2015-02-28 13:49:36 +01:00
ERROR "Incorrect number of arguments";
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-02-28 13:49:36 +01:00
}
# input validation
2015-09-02 11:04:22 +02:00
foreach (@filter_args) {
if($args_allow_group && /^($group_match)$/) { # matches group
$_ = $1; # untaint argument
2016-04-25 16:07:40 +02:00
next;
2015-05-25 16:28:50 +02:00
}
else {
2016-04-25 16:07:40 +02:00
my ($url_prefix, $path) = check_url($_);
if(defined($path)) {
$_ = $url_prefix . $path;
next;
}
2015-02-28 13:49:36 +01:00
}
2016-04-25 16:07:40 +02:00
ERROR "Bad argument: not a subvolume" . ($args_allow_group ? "/group" : "") . " declaration: $_";
HELP_MESSAGE(0);
exit 2;
2015-02-28 13:49:36 +01:00
}
2016-04-18 15:22:14 +02:00
foreach my $key (keys %config_override_cmdline) {
DEBUG "config_override: \"$key=$config_override_cmdline{$key}\"";
unless(append_config_option(\%config_override, $key, $config_override_cmdline{$key}, "root")) {
2015-10-23 14:43:36 +02:00
HELP_MESSAGE(0);
exit 2;
}
}
2016-06-07 16:17:02 +02:00
if(defined($lockfile_cmdline)) {
if($lockfile_cmdline =~ /^($file_match)$/) {
$lockfile = $1; # untaint argument
} else {
ERROR "Option \"--lockfile\" is not a valid file name: \"$lockfile_cmdline\"";
HELP_MESSAGE(0);
exit 2;
}
}
2015-02-28 13:49:36 +01:00
2014-12-13 13:52:43 +01:00
2016-04-15 01:22:19 +02:00
INFO "$VERSION_INFO (" . localtime($start_time) . ")";
2014-12-14 21:29:22 +01:00
if($action_diff)
{
2015-01-04 19:30:41 +01:00
#
# print snapshot diff
#
2015-09-02 11:04:22 +02:00
my $src_url = $filter_args[0] || die;
my $target_url = $filter_args[1] || die;
2016-03-15 16:54:54 +01:00
my $default_config = init_config();
# NOTE: ssh://{src,target} uses default config
2015-01-03 21:25:46 +01:00
2016-03-15 16:54:54 +01:00
my $src_vol = vinfo($src_url, $default_config);
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($src_vol)) { ERROR "Failed to fetch subvolume detail for '$src_vol->{PRINT}'" . ($err ? ": $err" : ""); exit 1; }
2016-03-14 16:39:13 +01:00
if($src_vol->{node}{is_root}) { ERROR "Subvolume is btrfs root: $src_vol->{PRINT}"; exit 1; }
2015-01-03 21:25:46 +01:00
2016-03-15 16:54:54 +01:00
my $target_vol = vinfo($target_url, $default_config);
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($target_vol)) { ERROR "Failed to fetch subvolume detail for '$target_vol->{PRINT}'" . ($err ? ": $err" : ""); exit 1; }
2016-03-14 16:39:13 +01:00
if($target_vol->{node}{is_root}) { ERROR "Subvolume is btrfs root: $target_vol->{PRINT}"; exit 1; }
2015-01-03 21:25:46 +01:00
2016-03-14 16:39:13 +01:00
unless(_is_child_of($src_vol->{node}->{TREE_ROOT}, $target_vol->{node}{uuid})) {
ERROR "Subvolumes are not on the same btrfs filesystem!";
2015-04-21 14:53:31 +02:00
exit 1;
}
2015-01-03 21:25:46 +01:00
# NOTE: in some cases "cgen" differs from "gen", even for read-only snapshots (observed: gen=cgen+1)
2016-05-03 13:19:42 +02:00
my $lastgen = $src_vol->{node}{gen} + 1;
2015-01-03 21:25:46 +01:00
2014-12-14 22:03:31 +01:00
# dump files, sorted and unique
2015-04-23 16:19:34 +02:00
my $ret = btrfs_subvolume_find_new($target_vol, $lastgen);
2015-01-03 21:25:46 +01:00
exit 1 unless(ref($ret));
2015-05-26 20:05:40 +02:00
print_header(title => "Subvolume Diff",
time => $start_time,
info => [
"Showing changed files for subvolume:",
2016-03-14 16:39:13 +01:00
" $target_vol->{PRINT} (gen=$target_vol->{node}{gen})",
2015-05-26 20:05:40 +02:00
"",
"Starting at creation generation of subvolume:",
2016-03-14 16:39:13 +01:00
" $src_vol->{PRINT} (cgen=$src_vol->{node}{cgen})",
2015-05-26 20:05:40 +02:00
"",
2016-03-14 16:39:13 +01:00
"This will show all files modified within generation range: [$lastgen..$target_vol->{node}{gen}]",
2015-05-26 20:05:40 +02:00
"Newest file generation (transid marker) was: $ret->{transid_marker}",
($ret->{parse_errors} ? "Parse errors: $ret->{parse_errors}" : undef),
],
legend => [
"+.. file accessed at offset 0 (at least once)",
".c. flags COMPRESS or COMPRESS|INLINE set (at least once)",
"..i flags INLINE or COMPRESS|INLINE set (at least once)",
"<count> file was modified in <count> generations",
"<size> file was modified for a total of <size> bytes",
]
);
2015-01-03 21:25:46 +01:00
my $files = $ret->{files};
# calculate the character offsets
2016-03-01 21:29:19 +01:00
my $total_len = 0;
2015-01-03 21:25:46 +01:00
my $len_charlen = 0;
my $gen_charlen = 0;
foreach (values %$files) {
my $len = length($_->{len});
2015-02-10 13:31:43 +01:00
my $gen = length(scalar(keys(%{$_->{gen}})));
2015-01-03 21:25:46 +01:00
$len_charlen = $len if($len > $len_charlen);
$gen_charlen = $gen if($gen > $gen_charlen);
2016-03-01 21:29:19 +01:00
$total_len += $_->{len};
2015-01-03 21:25:46 +01:00
}
# finally print the output
foreach my $name (sort keys %$files) {
print ($files->{$name}->{new} ? '+' : '.');
print ($files->{$name}->{flags}->{compress} ? 'c' : '.');
print ($files->{$name}->{flags}->{inline} ? 'i' : '.');
# make nice table
2015-02-10 13:31:43 +01:00
my $gens = scalar(keys(%{$files->{$name}->{gen}}));
2015-01-03 21:25:46 +01:00
my $len = $files->{$name}->{len};
print " " . (' ' x ($gen_charlen - length($gens))) . $gens;
print " " . (' ' x ($len_charlen - length($len))) . $len;
print " $name\n";
}
2016-03-01 21:29:19 +01:00
print "\nTotal size: $total_len bytes\n";
2014-12-14 21:29:22 +01:00
exit 0;
}
2014-12-13 13:52:43 +01:00
#
2015-01-20 19:18:38 +01:00
# parse config file
2014-12-13 13:52:43 +01:00
#
2015-01-17 13:14:47 +01:00
my $config = parse_config(@config_src);
2015-01-10 16:02:35 +01:00
unless($config) {
2014-12-13 15:15:58 +01:00
ERROR "Failed to parse configuration file";
2015-09-30 14:00:39 +02:00
exit 2;
2014-12-13 15:15:58 +01:00
}
2016-03-07 23:53:47 +01:00
unless(ref($config->{SUBSECTION}) eq "ARRAY") {
2016-03-07 19:20:15 +01:00
ERROR "No volumes defined in configuration file";
exit 2;
}
2015-01-20 19:18:38 +01:00
2016-06-07 16:17:02 +02:00
#
# try exclusive lock if set in config or command-line option
#
$lockfile //= config_key($config, "lockfile");
if(defined($lockfile) && (not $dryrun)) {
unless(open(LOCKFILE, ">>$lockfile")) {
# NOTE: the lockfile is never deleted by design
ERROR "Failed to open lock file '$lockfile': $!";
exit 3;
}
unless(flock(LOCKFILE, 6)) { # exclusive, non-blocking (LOCK_EX | LOCK_NB)
ERROR "Failed to take lock (another btrbk instance is running): $lockfile";
exit 3;
}
}
2015-01-20 19:18:38 +01:00
2016-04-16 01:09:17 +02:00
if($action_archive)
2016-04-07 15:33:32 +02:00
{
#
2016-04-16 01:09:17 +02:00
# archive (clone) tree
2016-04-07 15:33:32 +02:00
#
# NOTE: This is intended to work without a config file! The only
# thing used from the configuration is the SSH and transaction log
# stuff.
#
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
init_transaction_log(config_key($config, "transaction_log"),
config_key($config, "transaction_syslog"));
2016-04-07 15:33:32 +02:00
my $src_url = $filter_args[0] || die;
2016-04-16 17:13:19 +02:00
my $archive_url = $filter_args[1] || die;
2016-04-07 15:33:32 +02:00
# FIXME: add command line options for preserve logic
$config->{SUBSECTION} = []; # clear configured subsections, we build them dynamically
2016-04-16 01:09:17 +02:00
my $src_root = vinfo($src_url, $config);
unless(vinfo_init_root($src_root, resolve_subdir => 1)) {
ERROR "Failed to fetch subvolume detail for '$src_root->{PRINT}'" . ($err ? ": $err" : "");
2016-04-07 15:33:32 +02:00
exit 1;
}
2016-04-16 17:13:19 +02:00
my $archive_root = vinfo($archive_url, $config);
unless(vinfo_init_root($archive_root, resolve_subdir => 1)) {
ERROR "Failed to fetch subvolume detail for '$archive_root->{PRINT}'" . ($err ? ": $err" : "");
2016-04-07 15:33:32 +02:00
exit 1;
}
my %name_uniq;
2016-04-16 01:09:17 +02:00
my @subvol_list = @{vinfo_subvol_list($src_root)};
2016-04-07 15:33:32 +02:00
my @sorted = sort { ($a->{subtree_depth} <=> $b->{subtree_depth}) || ($a->{SUBVOL_DIR} cmp $b->{SUBVOL_DIR}) } @subvol_list;
foreach my $vol (@sorted) {
next unless($vol->{node}{readonly});
2016-04-19 13:06:31 +02:00
my $snapshot_name = $vol->{node}{BTRBK_BASENAME};
2016-04-07 15:33:32 +02:00
unless(defined($snapshot_name)) {
WARN "Skipping subvolume (not a btrbk subvolume): $vol->{PRINT}";
next;
}
my $subvol_dir = $vol->{SUBVOL_DIR};
next if($name_uniq{"$subvol_dir/$snapshot_name"});
$name_uniq{"$subvol_dir/$snapshot_name"} = 1;
2016-04-16 17:13:19 +02:00
my $droot_url = $archive_url . ($subvol_dir eq "" ? "" : "/$subvol_dir");
2016-04-14 18:46:35 +02:00
my $sroot_url = $src_url . ($subvol_dir eq "" ? "" : "/$subvol_dir");
2016-04-16 01:09:17 +02:00
my $config_sroot = { CONTEXT => "archive_source",
PARENT => $config,
url => $sroot_url, # ABORTED() needs this
snapshot_name => $snapshot_name,
};
my $config_droot = { CONTEXT => "target",
PARENT => $config_sroot,
target_type => "send-receive", # macro_send_receive checks this
url => $droot_url, # ABORTED() needs this
};
$config_sroot->{SUBSECTION} = [ $config_droot ];
push(@{$config->{SUBSECTION}}, $config_sroot);
my $sroot = vinfo($sroot_url, $config_sroot);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($sroot);
2016-04-07 15:33:32 +02:00
unless(vinfo_init_root($sroot, resolve_subdir => 1)) {
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2016-04-16 01:09:17 +02:00
WARN "Skipping archive source \"$sroot->{PRINT}\": $abrt";
2016-04-07 15:33:32 +02:00
next;
}
2016-04-16 01:09:17 +02:00
my $droot = vinfo($droot_url, $config_droot);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($droot);
2016-04-07 15:33:32 +02:00
unless(vinfo_init_root($droot, resolve_subdir => 1)) {
2016-04-14 18:46:35 +02:00
DEBUG("Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
unless(system_mkdir($droot)) {
ABORTED($droot, "Failed to create directory: $droot->{PRINT}/");
2016-04-16 01:09:17 +02:00
WARN "Skipping archive target \"$droot->{PRINT}\": $abrt";
2016-04-14 18:46:35 +02:00
next;
}
2016-04-16 17:13:19 +02:00
$droot->{SUBDIR_CREATED} = 1;
2016-04-14 18:46:35 +02:00
if($dryrun) {
# we need to fake this directory on dryrun
2016-04-16 17:13:19 +02:00
$droot->{node} = $archive_root->{node};
2016-04-14 18:46:35 +02:00
$droot->{NODE_SUBDIR} = $subvol_dir;
}
else {
# after directory is created, try to init again
unless(vinfo_init_root($droot, resolve_subdir => 1)) {
ABORTED($droot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2016-04-16 01:09:17 +02:00
WARN "Skipping archive target \"$droot->{PRINT}\": $abrt";
2016-04-14 18:46:35 +02:00
next;
}
}
2016-04-07 15:33:32 +02:00
}
if(_is_child_of($droot->{node}->{TREE_ROOT}, $vol->{node}{uuid})) {
ERROR "Source and target subvolumes are on the same btrfs filesystem!";
exit 1;
}
}
2016-04-14 15:39:50 +02:00
my $schedule_results = [];
2016-04-19 18:53:44 +02:00
my $aborted;
2016-04-16 01:09:17 +02:00
foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
2016-04-19 18:53:44 +02:00
if($aborted) {
# abort all subsequent sources on any abort (we don't want to go on hammering on "disk full" errors)
ABORTED($sroot, $aborted);
next;
}
2016-04-07 15:33:32 +02:00
foreach my $droot (vinfo_subsection($sroot, 'target')) {
my $snapshot_name = config_key($droot, "snapshot_name") // die;
INFO "Archiving subvolumes: $sroot->{PRINT}/${snapshot_name}.*";
2016-04-16 01:09:17 +02:00
macro_archive_target($sroot, $droot, $snapshot_name, { results => $schedule_results });
2016-04-16 00:45:16 +02:00
if(ABORTED($droot)) {
# also abort $sroot
2016-04-19 18:53:44 +02:00
$aborted = "At least one target aborted earlier";
ABORTED($sroot, $aborted);
2016-04-16 00:45:16 +02:00
WARN "Skipping archiving of \"$sroot->{PRINT}/\": $abrt";
last;
}
2016-04-07 15:33:32 +02:00
}
}
2016-04-19 18:53:44 +02:00
my $del_schedule_results = [];
if($preserve_backups || $resume_only) {
INFO "Preserving all archives (option \"-p\" or \"-r\" present)";
}
else
{
foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
foreach my $droot (vinfo_subsection($sroot, 'target')) {
my $snapshot_name = config_key($droot, "snapshot_name") // die;
INFO "Cleaning archive: $droot->{PRINT}/${snapshot_name}.*";
macro_delete($droot, "", $snapshot_name, $droot,
{ preserve => config_preserve_hash($droot, "archive"),
results => $del_schedule_results,
result_hints => { topic => "archive", root_path => $droot->{PATH} },
},
commit => config_key($droot, "btrfs_commit_delete"),
type => "delete_archive",
);
}
}
}
2016-04-07 15:33:32 +02:00
my $exit_status = exit_status($config);
my $time_elapsed = time - $start_time;
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
action("finished",
status => $exit_status ? "partial" : "success",
duration => $time_elapsed,
message => $exit_status ? "At least one backup task aborted" : undef,
);
close_transaction_log();
unless($quiet)
{
2016-04-14 15:39:50 +02:00
# print scheduling results
if($print_schedule) {
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
2016-04-16 01:09:17 +02:00
print_formatted("schedule", \@data, title => "ARCHIVE SCHEDULE");
2016-04-14 15:39:50 +02:00
print "\n";
}
2016-04-19 18:53:44 +02:00
if($print_schedule && not ($preserve_backups || $resume_only)) {
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$del_schedule_results;
print_formatted("schedule", \@data, title => "DELETE SCHEDULE");
print "\n";
}
2016-04-07 15:33:32 +02:00
# print summary
$output_format ||= "custom";
if($output_format eq "custom")
{
my @unrecoverable;
my @out;
2016-04-19 18:53:44 +02:00
foreach my $sroot (vinfo_subsection($config, 'archive_source', 1)) {
2016-04-07 15:33:32 +02:00
foreach my $droot (vinfo_subsection($sroot, 'target', 1)) {
my @subvol_out;
2016-04-16 17:13:19 +02:00
if($droot->{SUBDIR_CREATED}) {
push @subvol_out, "++. $droot->{PRINT}/";
}
2016-04-07 15:33:32 +02:00
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
my $create_mode = "***";
$create_mode = ">>>" if($_->{parent});
$create_mode = "!!!" if($_->{ERROR});
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
}
2016-04-19 18:53:44 +02:00
foreach(sort { $a->{PATH} cmp $b->{PATH} } @{$droot->{SUBVOL_DELETED} // []}) {
push @subvol_out, "--- $_->{PRINT}";
}
if((ABORTED($droot) && (ABORTED($droot) ne "USER_SKIP")) ||
(ABORTED($sroot) && (ABORTED($sroot) ne "USER_SKIP"))) {
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . (ABORTED($droot) || ABORTED($sroot));
2016-04-07 15:33:32 +02:00
}
if($droot->{CONFIG}->{UNRECOVERABLE}) {
push(@unrecoverable, $droot->{CONFIG}->{UNRECOVERABLE});
}
if(@subvol_out) {
push @out, "$sroot->{PRINT}/$sroot->{CONFIG}->{snapshot_name}.*", @subvol_out, "";
}
}
}
2016-04-16 01:09:17 +02:00
print_header(title => "Archive Summary",
2016-04-07 15:33:32 +02:00
time => $start_time,
legend => [
2016-04-16 17:13:19 +02:00
"++. created directory",
2016-04-19 18:53:44 +02:00
"--- deleted subvolume",
2016-04-07 15:33:32 +02:00
"*** received subvolume (non-incremental)",
">>> received subvolume (incremental)",
],
);
print join("\n", @out);
2016-04-16 19:25:46 +02:00
if($exit_status || scalar(@unrecoverable)) {
2016-04-07 15:33:32 +02:00
print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
print "Please check warning and error messages above.\n";
print join("\n", @unrecoverable) . "\n" if(@unrecoverable);
}
if($dryrun) {
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
}
}
else
{
# print action log (without transaction start messages)
my @data = grep { $_->{status} ne "starting" } @transaction_log;
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
}
}
exit $exit_status;
}
2016-03-16 13:25:19 +01:00
#
# expand subvolume globs (wildcards)
#
foreach my $config_vol (@{$config->{SUBSECTION}}) {
die unless($config_vol->{CONTEXT} eq "volume");
# read-in subvolume list (and expand globs) only if needed
next unless(grep defined($_->{GLOB_CONTEXT}), @{$config_vol->{SUBSECTION}});
my $sroot = vinfo($config_vol->{url}, $config_vol);
unless(vinfo_init_root($sroot)) {
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
WARN "Skipping volume \"$sroot->{PRINT}\": $abrt";
next;
}
my @vol_subsection_expanded;
foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
die unless($config_subvol->{CONTEXT} eq "subvolume");
if($config_subvol->{GLOB_CONTEXT}) {
my $globs = $config_subvol->{rel_path};
INFO "Expanding wildcards: $sroot->{PRINT}/$globs";
# support "*some*file*", "*/*"
my $match = join('[^\/]*', map(quotemeta($_), split(/\*+/, $globs, -1)));
TRACE "translated globs \"$globs\" to regex \"$match\"";
my $expand_count = 0;
2016-04-03 20:46:29 +02:00
foreach my $vol (@{vinfo_subvol_list($sroot, sort => 'path')})
2016-03-16 13:25:19 +01:00
{
if($vol->{node}{readonly}) {
TRACE "skipping readonly subvolume: $vol->{PRINT}";
next;
}
unless($vol->{SUBVOL_PATH} =~ /^$match$/) {
TRACE "skipping non-matching subvolume: $vol->{PRINT}";
next;
}
INFO "Found source subvolume: $vol->{PRINT}";
my %conf = ( %$config_subvol,
rel_path_glob => $globs,
rel_path => $vol->{SUBVOL_PATH},
url => $vol->{URL},
snapshot_name => $vol->{NAME}, # snapshot_name defaults to subvolume name
);
# deep copy of target subsection
my @subsection_copy = map { { %$_, PARENT => \%conf }; } @{$config_subvol->{SUBSECTION}};
$conf{SUBSECTION} = \@subsection_copy;
push @vol_subsection_expanded, \%conf;
$expand_count += 1;
}
unless($expand_count) {
WARN "No subvolumes found matching: $sroot->{PRINT}/$globs";
}
}
else {
push @vol_subsection_expanded, $config_subvol;
}
}
$config_vol->{SUBSECTION} = \@vol_subsection_expanded;
}
2016-04-18 16:40:49 +02:00
TRACE(Data::Dumper->Dump([$config], ["config"])) if($do_dumper);
2016-03-16 13:25:19 +01:00
2016-03-07 17:35:17 +01:00
#
# create vinfo nodes (no readin yet)
#
2016-03-07 17:46:53 +01:00
foreach my $config_vol (@{$config->{SUBSECTION}}) {
die unless($config_vol->{CONTEXT} eq "volume");
2016-03-07 17:35:17 +01:00
my $sroot = vinfo($config_vol->{url}, $config_vol);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($sroot);
2016-03-07 17:46:53 +01:00
foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
die unless($config_subvol->{CONTEXT} eq "subvolume");
2016-05-10 15:51:44 +02:00
my $svol = vinfo_child($sroot, $config_subvol->{rel_path}, $config_subvol);
vinfo_assign_config($svol);
2016-03-07 17:46:53 +01:00
foreach my $config_target (@{$config_subvol->{SUBSECTION}}) {
die unless($config_target->{CONTEXT} eq "target");
2016-03-07 17:35:17 +01:00
my $droot = vinfo($config_target->{url}, $config_target);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($droot);
2016-03-07 17:35:17 +01:00
}
}
}
2015-05-27 15:00:25 +02:00
#
# filter subvolumes matching command line arguments
#
2016-01-14 16:39:43 +01:00
if(($action_run || $action_clean || $action_resolve || $action_usage || $action_list || $action_config_print) && scalar(@filter_args))
2015-05-27 15:00:25 +02:00
{
my %match;
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
my $vol_url = $sroot->{URL};
2015-09-20 14:25:20 +02:00
my $found_vol = 0;
2015-09-02 11:04:22 +02:00
foreach my $filter (@filter_args) {
2016-03-07 21:54:51 +01:00
if(($vol_url eq $filter) || (map { ($filter eq $_) || () } @{$sroot->{CONFIG}->{group}})) {
2016-04-28 13:03:15 +02:00
TRACE "filter argument \"$filter\" matches volume: $vol_url";
2016-03-07 21:54:51 +01:00
$match{$filter} = ($vol_url eq $filter) ? "volume=$sroot->{PRINT}" : "group=$filter";
2015-09-20 14:25:20 +02:00
$found_vol = 1;
2015-09-02 11:04:22 +02:00
# last; # need to cycle through all filter_args for correct %match
}
2015-05-27 15:00:25 +02:00
}
2015-09-20 14:25:20 +02:00
next if($found_vol);
2015-09-02 11:04:22 +02:00
2015-05-27 15:00:25 +02:00
my @filter_subvol;
2016-03-07 21:54:51 +01:00
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
my $subvol_url = $svol->{URL};
2015-09-20 14:25:20 +02:00
my $found_subvol = 0;
2015-09-02 11:04:22 +02:00
foreach my $filter (@filter_args) {
2016-03-07 21:54:51 +01:00
if(($subvol_url eq $filter) || (map { ($filter eq $_) || () } @{$svol->{CONFIG}->{group}})) {
2016-04-28 13:03:15 +02:00
TRACE "filter argument \"$filter\" matches subvolume: $subvol_url";
2016-03-07 21:54:51 +01:00
$match{$filter} = ($subvol_url eq $filter) ? "subvolume=$svol->{PRINT}" : "group=$filter";
2015-09-20 14:25:20 +02:00
$found_subvol = 1;
$found_vol = 1;
2015-09-02 11:04:22 +02:00
# last; # need to cycle through all filter_args for correct %match
}
}
2015-09-20 14:25:20 +02:00
next if($found_subvol);
2016-03-07 21:54:51 +01:00
my $snapshot_name = config_key($svol, "snapshot_name") // die;
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
my $target_url = $droot->{URL};
2015-09-20 14:25:20 +02:00
my $found_target = 0;
foreach my $filter (@filter_args) {
2015-09-24 14:56:22 +02:00
if(($filter eq $target_url) ||
($filter eq "$target_url/$snapshot_name") ||
2016-03-07 21:54:51 +01:00
(map { ($filter eq $_) || () } @{$droot->{CONFIG}->{group}})) {
2016-04-28 13:03:15 +02:00
TRACE "filter argument \"$filter\" matches target: $target_url";
2016-03-07 21:54:51 +01:00
$match{$filter} = ($target_url eq $filter) ? "target=$droot->{PRINT}" : "group=$filter";
2015-09-20 14:25:20 +02:00
$found_target = 1;
$found_subvol = 1;
$found_vol = 1;
# last; # need to cycle through all filter_args for correct %match
}
}
unless($found_target) {
DEBUG "No match on filter command line argument, skipping target: $target_url";
2016-03-07 21:54:51 +01:00
ABORTED($droot, "USER_SKIP");
2015-09-20 14:25:20 +02:00
}
}
unless($found_subvol) {
DEBUG "No match on filter command line argument, skipping subvolume: $subvol_url";
2016-03-07 21:54:51 +01:00
ABORTED($svol, "USER_SKIP");
2015-05-27 15:00:25 +02:00
}
}
2015-09-20 14:25:20 +02:00
unless($found_vol) {
DEBUG "No match on filter command line argument, skipping volume: $vol_url";
2016-03-07 21:54:51 +01:00
ABORTED($sroot, "USER_SKIP");
2015-05-27 15:00:25 +02:00
}
}
# make sure all args have a match
2015-09-02 11:04:22 +02:00
my @nomatch = map { $match{$_} ? () : $_ } @filter_args;
2015-05-27 15:00:25 +02:00
if(@nomatch) {
foreach(@nomatch) {
2015-09-20 14:25:20 +02:00
ERROR "Command line argument does not match any volume, subvolume, target or group declaration: $_";
2015-05-27 15:00:25 +02:00
}
2015-09-30 14:00:39 +02:00
exit 2;
2015-05-27 15:00:25 +02:00
}
2015-09-20 14:25:20 +02:00
$config->{CMDLINE_FILTER_LIST} = [ values %match ];
2015-05-27 15:00:25 +02:00
}
2015-01-20 19:18:38 +01:00
2015-10-19 22:10:08 +02:00
if($action_usage)
2015-01-20 19:18:38 +01:00
{
#
# print filesystem information
#
2015-10-19 22:10:08 +02:00
my @data;
2015-01-20 19:18:38 +01:00
my %processed;
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2015-10-19 22:10:08 +02:00
unless($processed{$sroot->{URL}}) {
my $usage = btrfs_filesystem_usage($sroot) // {};
push @data, { %$usage,
type => "source",
vinfo_prefixed_keys("", $sroot),
};
2015-04-23 15:30:33 +02:00
$processed{$sroot->{URL}} = 1;
2015-01-20 19:18:38 +01:00
}
}
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-19 22:10:08 +02:00
unless($processed{$droot->{URL}}) {
my $usage = btrfs_filesystem_usage($droot) // {};
push @data, { %$usage,
type => "target",
vinfo_prefixed_keys("", $droot),
};
2015-04-23 15:30:33 +02:00
$processed{$droot->{URL}} = 1;
2015-01-20 19:18:38 +01:00
}
}
}
}
2015-10-19 22:10:08 +02:00
print_formatted("usage", \@data);
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2015-01-20 19:18:38 +01:00
}
2015-04-14 13:52:16 +02:00
2015-10-10 21:26:59 +02:00
if($action_config_print)
{
my $resolve = ($action_config_print eq "print-all");
#
# print configuration lines, machine readable
#
my @out;
push @out, config_dump_keys($config, skip_defaults => 1);
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2015-10-10 21:26:59 +02:00
push @out, "\nvolume $sroot->{URL}";
2016-03-07 21:54:51 +01:00
push @out, config_dump_keys($sroot, prefix => "\t", resolve => $resolve);
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-16 13:25:19 +01:00
push @out, ""; # newline
push @out, "\t# subvolume $svol->{CONFIG}->{rel_path_glob}" if(defined($svol->{CONFIG}->{rel_path_glob}));
push @out, "\tsubvolume $svol->{SUBVOL_PATH}";
2016-03-07 21:54:51 +01:00
push @out, config_dump_keys($svol, prefix => "\t\t", resolve => $resolve);
foreach my $droot (vinfo_subsection($svol, 'target')) {
push @out, "\n\t\ttarget $droot->{CONFIG}->{target_type} $droot->{URL}";
push @out, config_dump_keys($droot, prefix => "\t\t\t", resolve => $resolve);
2015-10-10 21:26:59 +02:00
}
}
}
print_header(title => "Configuration Dump",
config => $config,
time => $start_time,
);
print join("\n", @out) . "\n";
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2015-10-10 21:26:59 +02:00
}
2015-10-11 01:44:13 +02:00
2015-10-11 02:02:45 +02:00
if($action_list)
2015-09-24 13:51:15 +02:00
{
2015-10-11 19:01:59 +02:00
my @vol_data;
my @subvol_data;
my @target_data;
my @mixed_data;
2015-10-11 01:44:13 +02:00
my %target_uniq;
2015-09-24 13:51:15 +02:00
#
# print configuration lines, machine readable
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2015-10-12 23:58:38 +02:00
my $volh = { vinfo_prefixed_keys("volume", $sroot) };
2015-10-11 19:01:59 +02:00
push @vol_data, $volh;
2015-09-24 13:51:15 +02:00
2016-03-07 21:54:51 +01:00
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2015-10-11 01:44:13 +02:00
my $subvolh = { %$volh,
2015-10-12 23:58:38 +02:00
vinfo_prefixed_keys("source", $svol),
2016-03-07 21:54:51 +01:00
snapshot_path => $sroot->{PATH} . (config_key($svol, "snapshot_dir", prefix => '/') // ""),
snapshot_name => config_key($svol, "snapshot_name"),
2016-03-08 18:22:58 +01:00
snapshot_preserve => format_preserve_matrix(config_preserve_hash($svol, "snapshot")),
2015-10-11 01:44:13 +02:00
};
2015-10-11 19:01:59 +02:00
push @subvol_data, $subvolh;
2015-09-24 13:51:15 +02:00
2015-10-11 19:01:59 +02:00
my $found = 0;
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-11 01:44:13 +02:00
my $targeth = { %$subvolh,
2015-10-12 23:58:38 +02:00
vinfo_prefixed_keys("target", $droot),
2016-03-08 18:22:58 +01:00
target_preserve => format_preserve_matrix(config_preserve_hash($droot, "target")),
2015-10-11 01:44:13 +02:00
};
2015-10-13 01:10:06 +02:00
if($action_list eq "target") {
2015-10-11 01:44:13 +02:00
next if($target_uniq{$droot->{URL}});
$target_uniq{$droot->{URL}} = 1;
2015-09-24 13:51:15 +02:00
}
2015-10-11 19:01:59 +02:00
push @target_data, $targeth;
push @mixed_data, $targeth;
$found = 1;
2015-09-24 13:51:15 +02:00
}
2015-10-11 19:01:59 +02:00
# make sure the subvol is always printed (even if no targets around)
push @mixed_data, $subvolh unless($found);
2015-09-24 13:51:15 +02:00
}
}
2015-10-12 14:59:02 +02:00
if($action_list eq "volume") {
2015-10-13 01:10:06 +02:00
print_formatted("list_volume", \@vol_data);
2015-10-12 14:59:02 +02:00
}
elsif($action_list eq "source") {
2015-10-13 01:10:06 +02:00
print_formatted("list_source", \@subvol_data);
2015-10-12 14:59:02 +02:00
}
elsif($action_list eq "target") {
2015-10-13 01:10:06 +02:00
print_formatted("list_target", \@target_data);
2015-10-12 14:59:02 +02:00
}
else {
# default format
2015-10-13 01:10:06 +02:00
print_formatted("list", \@mixed_data);
2015-10-12 14:59:02 +02:00
}
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2015-09-24 13:51:15 +02:00
}
2015-04-14 13:52:16 +02:00
#
2015-04-19 11:36:40 +02:00
# fill vinfo hash, basic checks on configuration
2015-04-14 13:52:16 +02:00
#
2016-03-09 19:52:45 +01:00
# read volume btrfs tree, and make sure subvolume exist
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2016-03-08 16:41:02 +01:00
DEBUG "Initializing volume section: $sroot->{PRINT}";
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($sroot)) {
2016-03-07 17:36:02 +01:00
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2016-03-07 19:20:15 +01:00
WARN "Skipping volume \"$sroot->{PRINT}\": $abrt";
2015-04-19 11:36:40 +02:00
next;
}
2016-03-07 20:47:24 +01:00
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-08 16:41:02 +01:00
DEBUG "Initializing subvolume section: $svol->{PRINT}";
2016-03-14 16:39:13 +01:00
unless(vinfo_init_root($svol)) {
ABORTED($svol, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
WARN "Skipping subvolume \"$svol->{PRINT}\": $abrt";
next;
2016-03-07 19:20:15 +01:00
}
2016-03-14 16:39:13 +01:00
if($svol->{node}{uuid} && _is_child_of($sroot->{node}, $svol->{node}{uuid})) {
2016-03-16 19:21:48 +01:00
DEBUG "Found \"$svol->{PRINT}\" (id=$svol->{node}{id}) in btrfs subtree of: $sroot->{PRINT}";
2016-03-14 16:39:13 +01:00
} else {
ABORTED($svol, "Not a child subvolume of: $sroot->{PRINT}");
WARN "Skipping subvolume \"$svol->{PRINT}\": $abrt";
next;
2015-04-19 11:36:40 +02:00
}
2016-03-09 19:52:45 +01:00
}
}
2015-04-19 11:36:40 +02:00
2016-03-09 19:52:45 +01:00
# read target btrfs tree
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2016-03-08 16:41:02 +01:00
DEBUG "Initializing target section: $droot->{PRINT}";
2016-03-07 19:20:15 +01:00
my $target_type = $droot->{CONFIG}->{target_type} || die;
2015-06-02 22:16:33 +02:00
if($target_type eq "send-receive")
{
2016-03-30 21:55:02 +02:00
unless(vinfo_init_root($droot, resolve_subdir => 1)) {
2016-03-07 17:36:02 +01:00
ABORTED($droot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2016-03-07 19:20:15 +01:00
WARN "Skipping target \"$droot->{PRINT}\": $abrt";
2015-06-02 22:16:33 +02:00
next;
}
}
elsif($target_type eq "raw")
{
DEBUG "Creating raw subvolume list: $droot->{PRINT}";
2016-04-13 22:04:53 +02:00
$droot->{SUBVOL_LIST} = [];
2015-06-02 22:16:33 +02:00
my $ret = run_cmd(
2016-03-22 19:05:12 +01:00
cmd => [ 'find', $droot->{PATH} . '/', '-maxdepth', '1', '-type', 'f' ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($droot),
2015-06-02 22:16:33 +02:00
# note: use something like this to get the real (link resolved) path
# cmd => [ "find", $droot->{PATH} . '/', "-maxdepth", "1", "-name", "$snapshot_basename.\*.raw\*", '-printf', '%f\0', '-exec', 'realpath', '-z', '{}', ';' ],
non_destructive => 1,
);
unless(defined($ret)) {
2016-03-07 17:36:02 +01:00
ABORTED($droot, "Failed to list files from: $droot->{PATH}");
2016-03-07 19:20:15 +01:00
WARN "Skipping target \"$droot->{PRINT}\": $abrt";
2015-06-02 22:16:33 +02:00
next;
}
2016-05-09 12:42:04 +02:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2015-10-23 18:18:36 +02:00
my %child_uuid_list;
2016-04-03 21:38:19 +02:00
foreach (split("\n", $ret))
2015-06-02 22:16:33 +02:00
{
2016-04-03 21:38:19 +02:00
unless(/^($file_match)$/) {
DEBUG "Skipping non-parseable file: \"$_\"";
2015-06-02 22:16:33 +02:00
next;
}
2016-04-03 21:38:19 +02:00
my $file = $1; # untaint argument
2015-06-02 22:16:33 +02:00
unless($file =~ s/^\Q$droot->{PATH}\E\///) {
2016-03-07 17:36:02 +01:00
ABORTED($droot, "Unexpected result from 'find': file \"$file\" is not under \"$droot->{PATH}\"");
2015-06-02 22:16:33 +02:00
last;
}
2016-04-06 15:24:47 +02:00
2015-10-23 18:18:36 +02:00
# Set btrfs subvolume information (received_uuid, parent_uuid) from filename info.
2015-09-29 14:07:58 +02:00
#
2016-04-06 15:24:47 +02:00
# NOTE: remote_parent_uuid in BTRBK_RAW is the "parent of the source subvolume", NOT the
2016-04-03 20:46:29 +02:00
# "parent of the received subvolume".
2016-04-19 13:06:31 +02:00
my $subvol = vinfo_child($droot, $file);
2016-05-09 12:42:04 +02:00
unless(vinfo_inject_child($droot, $subvol, { TARGET_TYPE => 'raw' }))
2016-04-25 20:49:17 +02:00
{
2016-04-19 13:06:31 +02:00
DEBUG "Skipping file (filename scheme mismatch): \"$file\"";
next;
}
2016-05-09 12:42:04 +02:00
unless(defined($subvol->{node}{BTRBK_RAW}) &&
($snapshot_basename eq $subvol->{node}{BTRBK_BASENAME}))
{
# vinfo_inject_child() pushes all "valid" subvols to $droot->{SUBVOL_LIST},
# remove the non-matching ones again.
# If we don't remove them from the list, they will also
# be taken into account for incremental backups!
pop @{$droot->{SUBVOL_LIST}};
DEBUG "Skipping file (base name != \"$snapshot_basename\"): \"$file\"";
next;
}
2016-04-19 13:06:31 +02:00
# incomplete raw fakes get same semantics as real subvolumes (readonly=0, received_uuid='-')
$subvol->{node}{received_uuid} = ($subvol->{node}{BTRBK_RAW}->{incomplete} ? '-' : $subvol->{node}{BTRBK_RAW}->{received_uuid});
$subvol->{node}{parent_uuid} = undef; # correct value gets inserted below
$subvol->{node}{readonly} = ($subvol->{node}{BTRBK_RAW}->{incomplete} ? 0 : 1);
2016-04-13 22:04:53 +02:00
2016-04-19 13:06:31 +02:00
if($subvol->{node}{BTRBK_RAW}->{remote_parent_uuid} ne '-') {
$child_uuid_list{$subvol->{node}{BTRBK_RAW}->{remote_parent_uuid}} //= [];
push @{$child_uuid_list{$subvol->{node}{BTRBK_RAW}->{remote_parent_uuid}}}, $subvol;
2015-10-23 18:18:36 +02:00
}
2015-06-02 22:16:33 +02:00
}
2016-03-07 17:36:02 +01:00
if(ABORTED($droot)) {
2016-03-07 21:54:51 +01:00
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED($droot);
2015-06-02 22:16:33 +02:00
next;
}
2016-04-13 22:04:53 +02:00
my @subvol_list = @{vinfo_subvol_list($droot, sort => 'path')};
2016-03-15 14:46:25 +01:00
DEBUG "Found " . scalar(@subvol_list) . " raw subvolume backups of: $svol->{PRINT}";
2015-06-02 22:16:33 +02:00
2015-09-29 14:07:58 +02:00
# Make sure that incremental backup chains are never broken:
2016-03-15 14:46:25 +01:00
foreach my $subvol (@subvol_list)
2015-09-29 14:07:58 +02:00
{
# If restoring a backup from raw btrfs images (using "incremental yes|strict"):
# "btrfs send -p parent source > svol.btrfs", the backups
2015-10-23 18:18:36 +02:00
# on the target will get corrupted (unusable!) as soon as
2015-09-29 14:07:58 +02:00
# an any files in the chain gets deleted.
#
# We need to make sure btrbk will NEVER delete those:
# - svol.<timestamp>--<received_uuid_0>.btrfs : root (full) image
# - svol.<timestamp>--<received_uuid-n>[@<received_uuid_n-1>].btrfs : incremental image
2016-03-14 16:39:13 +01:00
foreach my $child (@{$child_uuid_list{$subvol->{node}{received_uuid}}}) {
2016-03-15 14:46:25 +01:00
$child->{node}{parent_uuid} = $subvol->{node}{uuid};
2015-10-23 18:18:36 +02:00
2015-09-29 14:07:58 +02:00
DEBUG "Found parent/child partners, forcing preserve of: \"$subvol->{PRINT}\", \"$child->{PRINT}\"";
2016-04-12 17:50:12 +02:00
$subvol->{node}{FORCE_PRESERVE} = "preserve forced: parent of another raw target";
$child->{node}{FORCE_PRESERVE} ||= "preserve forced: child of another raw target";
2015-09-29 14:07:58 +02:00
}
2015-10-23 19:06:55 +02:00
# For now, always preserve all raw files.
# TODO: remove this line as soon as incremental rotation is implemented.
2016-04-25 20:49:17 +02:00
$subvol->{node}{FORCE_PRESERVE} = "preserve forced: raw target";
2015-09-29 14:07:58 +02:00
}
2016-03-15 14:46:25 +01:00
# TRACE(Data::Dumper->Dump([\@subvol_list], ["vinfo_raw_subvol_list{$droot}"]));
2015-04-16 12:00:04 +02:00
}
2016-04-14 13:01:28 +02:00
if($config_override{FAILSAFE_PRESERVE}) {
ABORTED($droot, $config_override{FAILSAFE_PRESERVE});
WARN "Skipping target \"$droot->{PRINT}\": $abrt";
}
2016-03-06 17:46:46 +01:00
}
}
}
# check for duplicate snapshot locations
my %snapshot_check;
my %backup_check;
2016-03-07 19:20:15 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-06 17:46:46 +01:00
# check for duplicate snapshot locations
2016-04-03 20:46:29 +02:00
my $snapdir_ts = config_key($svol, "snapshot_dir", postfix => '/') // "";
2016-03-07 17:36:02 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2016-04-03 20:46:29 +02:00
my $snapshot_target = $sroot->{URL_PREFIX} . ($realpath_cache{$sroot->{URL}} // $sroot->{PATH}) . '/' . $snapdir_ts . $snapshot_basename;
2016-03-06 17:46:46 +01:00
if(my $prev = $snapshot_check{$snapshot_target}) {
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same snapshot: $snapshot_target";
ERROR "Please fix \"snapshot_name\" configuration options!";
exit 1;
}
$snapshot_check{$snapshot_target} = $svol->{PRINT};
2016-03-07 19:20:15 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-04-18 20:18:11 +02:00
# check for duplicate snapshot locations
2016-03-30 23:43:41 +02:00
my $snapshot_backup_target = $droot->{URL_PREFIX} . ($realpath_cache{$droot->{URL}} // $droot->{PATH}) . '/' . $snapshot_basename;
2015-04-20 18:19:55 +02:00
if(my $prev = $backup_check{$snapshot_backup_target}) {
2016-03-30 23:23:07 +02:00
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same backup target: $snapshot_backup_target";
2015-04-20 18:19:55 +02:00
ERROR "Please fix \"snapshot_name\" or \"target\" configuration options!";
2015-04-18 20:18:11 +02:00
exit 1;
}
2015-04-20 18:19:55 +02:00
$backup_check{$snapshot_backup_target} = $svol->{PRINT};
2015-01-10 16:02:35 +01:00
}
2014-12-13 20:33:31 +01:00
}
2014-12-13 13:52:43 +01:00
}
2015-01-04 19:30:41 +01:00
2015-01-26 17:31:18 +01:00
if($action_origin)
{
#
# print origin information
#
2015-09-02 11:04:22 +02:00
my $url = $filter_args[0] || die;
2016-03-15 16:54:54 +01:00
my $vol = vinfo($url, $config);
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($vol)) {
ERROR "Failed to fetch subvolume detail for: $url" . ($err ? ": $err" : "");
exit 1;
2015-04-20 17:08:59 +02:00
}
2016-03-14 16:39:13 +01:00
if($vol->{node}{is_root}) {
2015-04-16 12:00:04 +02:00
ERROR "Subvolume is btrfs root: $url\n";
2015-01-26 17:31:18 +01:00
exit 1;
}
2015-04-21 14:53:31 +02:00
2015-01-26 17:31:18 +01:00
my $lines = [];
2016-04-15 22:00:10 +02:00
_origin_tree("", $vol->{node}, $lines);
$output_format ||= "custom";
if($output_format eq "custom") {
print_header(title => "Origin Tree",
config => $config,
time => $start_time,
legend => [
"^-- : parent subvolume",
"newline : received-from relationship with subvolume (identical content)",
]
);
print join("\n", map { $_->{tree} } @$lines) . "\n";
2015-01-26 17:31:18 +01:00
}
2016-04-15 22:00:10 +02:00
else {
print_formatted('origin_tree', $lines );
2015-01-26 17:31:18 +01:00
}
2015-03-13 17:54:08 +01:00
exit 0;
2015-01-26 17:31:18 +01:00
}
2015-10-14 17:02:25 +02:00
if($action_resolve)
2014-12-12 12:32:04 +01:00
{
2015-10-20 16:33:23 +02:00
my @data;
2016-01-15 02:06:03 +01:00
my @stats_data;
my $stats_snapshots_total = 0;
my $stats_backups_total = 0;
my $stats_backups_total_incomplete = 0;
my $stats_backups_total_orphaned = 0;
2015-10-20 16:33:23 +02:00
if($action_resolve eq "snapshots")
{
2015-10-14 17:02:25 +02:00
#
# print all snapshots and their receive targets
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snapshot_name = config_key($svol, "snapshot_name") // die;
2016-03-14 16:39:13 +01:00
foreach my $snapshot (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } get_snapshot_children($sroot, $svol)) {
2015-10-22 14:49:20 +02:00
my $snapshot_data = { type => "snapshot",
2016-03-14 16:39:13 +01:00
status => ($snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
2015-10-22 14:49:20 +02:00
vinfo_prefixed_keys("source", $svol),
vinfo_prefixed_keys("snapshot", $snapshot),
snapshot_name => $snapshot_name,
};
2015-10-20 15:59:16 +02:00
my $found = 0;
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-14 17:02:25 +02:00
foreach (sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } get_receive_targets($droot, $snapshot)) {
2015-10-22 14:49:20 +02:00
push @data, { %$snapshot_data,
2015-10-20 15:59:16 +02:00
type => "received",
vinfo_prefixed_keys("target", $_),
};
$found = 1;
2015-10-14 17:02:25 +02:00
}
}
2015-10-22 14:49:20 +02:00
push @data, $snapshot_data unless($found);
2015-10-14 17:02:25 +02:00
}
}
}
}
2016-01-15 02:06:03 +01:00
elsif(($action_resolve eq "backups") || ($action_resolve eq "stats"))
2015-10-20 16:33:23 +02:00
{
2015-10-14 17:02:25 +02:00
#
# print all targets and their corresponding source snapshots
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snapshot_name = config_key($svol, "snapshot_name") // die;
2015-10-22 14:49:20 +02:00
my @snapshot_children = get_snapshot_children($sroot, $svol);
2016-01-17 18:56:14 +01:00
my $stats_snapshot_uptodate = "";
foreach my $snapshot (@snapshot_children) {
2016-03-14 16:39:13 +01:00
if($snapshot->{node}{cgen} == $svol->{node}{gen}) {
2016-01-17 18:56:14 +01:00
$stats_snapshot_uptodate = " (up-to-date)";
last;
}
}
push @stats_data, [ $svol->{PRINT}, sprintf("%4u snapshots$stats_snapshot_uptodate", scalar(@snapshot_children)) ];
$stats_snapshots_total += scalar(@snapshot_children); # NOTE: this adds ALL snaphot children under $sroot (not only the ones created by btrbk!)
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2016-01-15 02:06:03 +01:00
my $stats_received = 0;
my $stats_orphaned = 0;
my $stats_incomplete = 0;
2016-04-03 20:46:29 +02:00
foreach my $target_vol (@{vinfo_subvol_list($droot, sort => 'path')}) {
2015-10-14 17:02:25 +02:00
my $parent_snapshot;
2016-01-13 15:32:22 +01:00
my $incomplete_backup;
2015-10-22 14:49:20 +02:00
foreach (@snapshot_children) {
2016-04-15 01:22:19 +02:00
if($target_vol->{node}{received_uuid} eq '-') {
# incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
$parent_snapshot = undef;
$incomplete_backup = 1;
last;
}
if($_->{node}{uuid} eq $target_vol->{node}{received_uuid}) {
$parent_snapshot = $_;
last;
2015-10-14 17:02:25 +02:00
}
}
if($parent_snapshot) {
2016-01-15 02:06:03 +01:00
$stats_received++;
2015-10-20 15:59:16 +02:00
push @data, { type => "received",
vinfo_prefixed_keys("target", $target_vol),
vinfo_prefixed_keys("snapshot", $parent_snapshot),
vinfo_prefixed_keys("source", $svol),
2016-03-14 16:39:13 +01:00
status => ($parent_snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
2015-10-20 15:59:16 +02:00
};
2015-10-14 17:02:25 +02:00
}
else {
2015-10-20 16:33:23 +02:00
# don't display all subvolumes in $droot, only the ones matching snapshot_name
2016-04-19 13:06:31 +02:00
if($target_vol->{btrbk_direct_leaf} && ($target_vol->{node}{BTRBK_BASENAME} eq $snapshot_name)) {
2016-01-15 02:06:03 +01:00
if($incomplete_backup) { $stats_incomplete++; } else { $stats_orphaned++; }
2015-10-20 16:33:23 +02:00
push @data, { type => "received",
2016-04-30 13:01:12 +02:00
# suppress "orphaned" status here (snapshot column is empty anyways)
# status => ($incomplete_backup ? "incomplete" : "orphaned"),
status => ($incomplete_backup ? "incomplete" : undef),
2015-10-20 16:33:23 +02:00
vinfo_prefixed_keys("target", $target_vol),
vinfo_prefixed_keys("source", $svol),
};
}
else {
DEBUG "ignoring subvolume with non-matching snapshot_name";
}
2015-10-14 17:02:25 +02:00
}
2014-12-13 16:51:30 +01:00
}
2016-01-15 02:06:03 +01:00
my $stats_total = $stats_received + $stats_incomplete + $stats_orphaned;
$stats_backups_total += $stats_total;
$stats_backups_total_incomplete += $stats_incomplete;
$stats_backups_total_orphaned += $stats_orphaned;
my @stats_detail;
push @stats_detail, "$stats_orphaned orphaned" if($stats_orphaned);
push @stats_detail, "$stats_incomplete incomplete" if($stats_incomplete);
my $stats_detail_print = join(', ', @stats_detail);
2016-01-17 18:56:14 +01:00
$stats_detail_print = " ($stats_detail_print)" if($stats_detail_print);
push @stats_data, [ "^-- $droot->{PRINT}/$snapshot_name.*", sprintf("%4u backups$stats_detail_print", $stats_total) ];
2014-12-13 16:51:30 +01:00
}
}
}
2015-10-14 17:02:25 +02:00
}
elsif($action_resolve eq "latest")
{
#
# print latest common
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2015-10-14 17:02:25 +02:00
my $found = 0;
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-14 17:02:25 +02:00
my ($latest_common_src, $latest_common_target) = get_latest_common($sroot, $svol, $droot);
if ($latest_common_src && $latest_common_target) {
2015-10-20 15:59:16 +02:00
push @data, { type => "latest_common",
2016-03-14 16:39:13 +01:00
status => ($latest_common_src->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
2015-10-22 14:49:20 +02:00
vinfo_prefixed_keys("source", $svol),
vinfo_prefixed_keys("snapshot", $latest_common_src),
vinfo_prefixed_keys("target", $latest_common_target),
};
2015-10-14 17:02:25 +02:00
$found = 1;
}
}
unless($found) {
my $latest_snapshot = get_latest_snapshot_child($sroot, $svol);
2015-10-20 15:59:16 +02:00
push @data, { type => "latest_snapshot",
2016-03-14 16:39:13 +01:00
status => ($latest_snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
2015-10-22 14:49:20 +02:00
vinfo_prefixed_keys("source", $svol),
vinfo_prefixed_keys("snapshot", $latest_snapshot), # all unset if no $latest_snapshot
};
2015-10-14 17:02:25 +02:00
}
}
2015-03-24 13:13:00 +01:00
}
2015-10-11 15:38:43 +02:00
}
else {
2015-10-14 17:02:25 +02:00
die;
2015-09-23 11:27:36 +02:00
}
2015-10-20 16:33:23 +02:00
2016-01-15 02:06:03 +01:00
if($action_resolve eq "stats") {
2016-03-07 21:54:51 +01:00
print_header(title => "Statistics",
2016-01-15 02:06:03 +01:00
config => $config,
2016-03-07 21:54:51 +01:00
time => $start_time,
2016-04-30 13:01:12 +02:00
legend => [
"up-to-date: latest snapshot is up to date with parent subvolume",
"orphaned : parent snapshot was deleted (by snapshot_preserve policy)",
],
2016-01-15 02:06:03 +01:00
);
print_table(\@stats_data, " ");
print "\n";
my $stats_filter = $config->{CMDLINE_FILTER_LIST} ? join("; ", @{$config->{CMDLINE_FILTER_LIST}}) : "";
my @stats_total_detail;
push @stats_total_detail, "$stats_backups_total_orphaned orphaned" if($stats_backups_total_orphaned);
push @stats_total_detail, "$stats_backups_total_incomplete incomplete" if($stats_backups_total_incomplete);
my $stats_total_detail_print = join(', ', @stats_total_detail);
$stats_total_detail_print = " ($stats_total_detail_print)" if($stats_total_detail_print);
print "Total" . ($stats_filter ? " ($stats_filter)" : "") . ":\n";
my $maxlen = ($stats_snapshots_total > $stats_backups_total) ? length($stats_snapshots_total) : length($stats_backups_total);
printf("%" . $maxlen . "u snapshots\n", $stats_snapshots_total);
printf("%" . $maxlen . "u backups$stats_total_detail_print\n", $stats_backups_total);
}
else {
print_formatted("resolved", \@data);
}
2015-10-20 16:33:23 +02:00
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2014-12-13 13:52:43 +01:00
}
2015-01-03 21:25:46 +01:00
2016-01-14 15:52:33 +01:00
if($action_clean)
{
#
# identify and delete incomplete backups
#
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
init_transaction_log(config_key($config, "transaction_log"),
config_key($config, "transaction_syslog"));
2016-04-06 20:19:12 +02:00
2016-01-14 15:52:33 +01:00
my @out;
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snapshot_name = config_key($svol, "snapshot_name") // die;
foreach my $droot (vinfo_subsection($svol, 'target')) {
2016-03-22 19:05:12 +01:00
my $target_type = $droot->{CONFIG}->{target_type} || die;
2016-01-14 15:52:33 +01:00
INFO "Cleaning incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
push @out, "$droot->{PRINT}/$snapshot_name.*";
my @delete;
2016-04-03 20:46:29 +02:00
foreach my $target_vol (@{vinfo_subvol_list($droot, sort => 'path')}) {
2016-01-14 15:52:33 +01:00
# incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
2016-04-19 13:06:31 +02:00
next unless($target_vol->{btrbk_direct_leaf} && ($target_vol->{node}{BTRBK_BASENAME} eq $snapshot_name));
2016-03-22 19:05:12 +01:00
if($target_vol->{node}{received_uuid} eq '-') {
2016-01-14 15:52:33 +01:00
DEBUG "Found incomplete target subvolume: $target_vol->{PRINT}";
push(@delete, $target_vol);
push @out, "--- $target_vol->{PRINT}";
}
}
2016-03-22 19:05:12 +01:00
my $ret;
if($target_type eq "raw") {
2016-04-06 20:41:14 +02:00
if(scalar(@delete)) {
DEBUG "[raw] delete:";
DEBUG "[raw] file: $_->{PRINT}" foreach(@delete);
$ret = run_cmd({
cmd => ['rm', (map { $_->{PATH} } @delete) ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($droot),
2016-04-06 20:41:14 +02:00
});
} else {
$ret = 0;
}
2016-03-22 19:05:12 +01:00
}
else {
$ret = btrfs_subvolume_delete(\@delete, commit => config_key($droot, "btrfs_commit_delete"), type => "delete_garbled");
}
2016-01-14 15:52:33 +01:00
if(defined($ret)) {
INFO "Deleted $ret incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
2016-03-07 21:45:12 +01:00
$droot->{SUBVOL_DELETED} //= [];
push @{$droot->{SUBVOL_DELETED}}, @delete;
2016-01-14 15:52:33 +01:00
}
else {
2016-03-07 21:54:51 +01:00
ABORTED($droot, "Failed to delete incomplete target subvolume");
push @out, "!!! Target \"$droot->{PRINT}\" aborted: $abrt";
2016-01-14 15:52:33 +01:00
}
push(@out, "<no_action>") unless(scalar(@delete));
push(@out, "");
}
}
}
my $exit_status = exit_status($config);
my $time_elapsed = time - $start_time;
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
action("finished",
status => $exit_status ? "partial" : "success",
duration => $time_elapsed,
message => $exit_status ? "At least one delete operation failed" : undef,
);
close_transaction_log();
#
# print summary
#
unless($quiet)
{
$output_format ||= "custom";
if($output_format eq "custom")
{
2016-03-07 21:54:51 +01:00
print_header(title => "Cleanup Summary",
2016-01-14 15:52:33 +01:00
config => $config,
2016-03-07 21:54:51 +01:00
time => $start_time,
2016-01-14 15:52:33 +01:00
legend => [
"--- deleted subvolume (incomplete backup)",
],
);
print join("\n", @out);
if($dryrun) {
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
}
}
else
{
# print action log (without transaction start messages)
my @data = grep { $_->{status} ne "starting" } @transaction_log;
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
}
}
exit $exit_status;
}
2015-02-08 13:47:31 +01:00
if($action_run)
2014-12-13 13:52:43 +01:00
{
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
init_transaction_log(config_key($config, "transaction_log"),
config_key($config, "transaction_syslog"));
2016-04-06 20:19:12 +02:00
2015-05-15 20:24:14 +02:00
if($resume_only) {
INFO "Skipping snapshot creation (option \"-r\" present)";
}
else
2014-12-13 13:52:43 +01:00
{
2015-05-15 20:24:14 +02:00
#
# create snapshots
#
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-04-03 20:46:29 +02:00
my $snapdir_ts = config_key($svol, "snapshot_dir", postfix => '/') // "";
2016-03-07 20:47:24 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2014-12-14 20:35:15 +01:00
2015-05-15 20:24:14 +02:00
# check if we need to create a snapshot
2016-03-07 20:47:24 +01:00
my $snapshot_create = config_key($svol, "snapshot_create");
2015-05-25 14:38:32 +02:00
if(not $snapshot_create) {
DEBUG "Snapshot creation disabled (snapshot_create=no)";
2015-05-15 13:36:18 +02:00
next;
}
2015-05-25 14:38:32 +02:00
elsif($snapshot_create eq "always") {
DEBUG "Snapshot creation enabled (snapshot_create=always)";
}
elsif($snapshot_create eq "onchange") {
# check if latest snapshot is up-to-date with source subvolume (by generation)
my $latest = get_latest_snapshot_child($sroot, $svol);
2015-05-26 18:09:36 +02:00
if($latest) {
2016-03-14 16:39:13 +01:00
if($latest->{node}{cgen} == $svol->{node}{gen}) {
2015-05-26 18:09:36 +02:00
INFO "Snapshot creation skipped: snapshot_create=onchange, snapshot is up-to-date: $latest->{PRINT}";
2016-03-07 21:45:12 +01:00
$svol->{SNAPSHOT_UP_TO_DATE} = $latest;
2015-05-26 18:09:36 +02:00
next;
}
2016-03-14 16:39:13 +01:00
DEBUG "Snapshot creation enabled: snapshot_create=onchange, gen=$svol->{node}{gen} > snapshot_cgen=$latest->{node}{cgen}";
2015-05-26 18:09:36 +02:00
}
else {
DEBUG "Snapshot creation enabled: snapshot_create=onchange, no snapshots found";
2015-05-25 14:38:32 +02:00
}
2015-05-20 20:20:14 +02:00
}
elsif($snapshot_create eq "ondemand") {
2015-05-25 14:45:56 +02:00
# check if at least one target is present
2016-03-07 20:47:24 +01:00
if(scalar vinfo_subsection($svol, 'target')) {
2015-06-02 22:16:33 +02:00
DEBUG "Snapshot creation enabled (snapshot_create=ondemand): at least one target is present";
2015-05-20 20:20:14 +02:00
}
else {
2015-06-02 22:16:33 +02:00
INFO "Snapshot creation skipped: snapshot_create=ondemand, and no target is present for: $svol->{PRINT}";
2015-05-20 20:20:14 +02:00
next;
}
}
else {
die "illegal value for snapshot_create configuration option: $snapshot_create";
}
2015-05-15 13:36:18 +02:00
2015-05-15 20:24:14 +02:00
# find unique snapshot name
2016-04-21 13:27:54 +02:00
my $timestamp = timestamp(\@tm_now, config_key($svol, "timestamp_format"));
2015-05-15 20:24:14 +02:00
my @unconfirmed_target_name;
2016-03-10 05:26:43 +01:00
my @lookup = map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($sroot)};
2016-04-03 20:46:29 +02:00
@lookup = grep s/^\Q$snapdir_ts\E// , @lookup;
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
if(ABORTED($droot)) {
push(@unconfirmed_target_name, $droot);
2015-05-15 20:24:14 +02:00
next;
}
2016-03-10 05:26:43 +01:00
push(@lookup, map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($droot)});
2015-05-15 20:24:14 +02:00
}
@lookup = grep /^\Q$snapshot_basename.$timestamp\E(_[0-9]+)?$/ ,@lookup;
TRACE "Present snapshot names for \"$svol->{PRINT}\": " . join(', ', @lookup);
@lookup = map { /_([0-9]+)$/ ? $1 : 0 } @lookup;
@lookup = sort { $b <=> $a } @lookup;
my $postfix_counter = $lookup[0] // -1;
$postfix_counter++;
my $snapshot_name = $snapshot_basename . '.' . $timestamp . ($postfix_counter ? "_$postfix_counter" : "");
if(@unconfirmed_target_name) {
2015-09-20 14:25:20 +02:00
INFO "Assuming non-present subvolume \"$snapshot_name\" in skipped targets: " . join(", ", map { "\"$_->{PRINT}\"" } @unconfirmed_target_name);
2015-05-15 20:24:14 +02:00
}
# finally create the snapshot
INFO "Creating subvolume snapshot for: $svol->{PRINT}";
2016-04-03 20:46:29 +02:00
my $snapshot = vinfo_child($sroot, "$snapdir_ts$snapshot_name");
2016-04-12 17:50:12 +02:00
if(btrfs_subvolume_snapshot($svol, $snapshot))
{
vinfo_inject_child($sroot, $snapshot, {
parent_uuid => $svol->{node}{uuid},
received_uuid => '-',
readonly => 1,
FORCE_PRESERVE => 'preserve forced: created just now',
});
2016-03-07 23:53:47 +01:00
$svol->{SNAPSHOT_CREATED} = $snapshot;
2015-05-15 20:24:14 +02:00
}
else {
2016-04-03 20:46:29 +02:00
ABORTED($svol, "Failed to create snapshot: $svol->{PRINT} -> $sroot->{PRINT}/$snapdir_ts$snapshot_name");
2016-03-07 20:47:24 +01:00
WARN "Skipping subvolume section: $abrt";
2015-05-15 20:24:14 +02:00
}
2014-12-19 13:31:31 +01:00
}
2014-12-13 15:15:58 +01:00
}
2014-12-13 13:52:43 +01:00
}
2014-12-13 15:15:58 +01:00
#
# create backups
#
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-04-03 20:46:29 +02:00
my $snapdir = config_key($svol, "snapshot_dir") // "";
2016-03-07 20:47:24 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2016-04-19 13:06:31 +02:00
my @snapshot_children = sort({ cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
2016-04-13 14:47:38 +02:00
get_snapshot_children($sroot, $svol, $snapdir, $snapshot_basename));
2015-01-12 14:04:07 +01:00
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2016-04-16 18:06:55 +02:00
INFO "Checking for missing backups of subvolume \"$svol->{PRINT}\" in \"$droot->{PRINT}/\"";
2016-04-12 19:43:12 +02:00
my @schedule;
my $resume_total = 0;
my $resume_success = 0;
2016-04-19 13:42:18 +02:00
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list for get_receive_targets()
2016-04-13 14:47:38 +02:00
foreach my $child (@snapshot_children)
2015-01-12 14:04:07 +01:00
{
2016-04-16 16:05:57 +02:00
my $warning_seen = [];
2016-04-19 13:42:18 +02:00
my @receive_targets = get_receive_targets($droot, $child, exact_match => 1, warn => 1, seen => $warning_seen, droot_subvol_list => $droot_subvol_list );
2016-04-16 16:05:57 +02:00
get_receive_targets_fsroot($droot, $child, exclude => $warning_seen, warn => 1); # warn on unexpected on fs
2016-04-16 19:25:46 +02:00
if(scalar(@receive_targets)){
DEBUG "Found receive target of: $child->{PRINT}";
next;
2016-04-12 19:43:12 +02:00
}
2015-09-29 14:07:58 +02:00
2016-04-12 19:43:12 +02:00
DEBUG "Adding backup candidate: $child->{PRINT}";
push(@schedule, { value => $child,
2016-04-19 13:06:31 +02:00
btrbk_date => $child->{node}{BTRBK_DATE},
2016-04-12 19:43:12 +02:00
# not enforcing resuming of latest snapshot anymore (since v0.23.0)
# preserve => $child->{node}{FORCE_PRESERVE},
});
}
2015-05-15 16:06:36 +02:00
2016-04-12 19:43:12 +02:00
if(scalar @schedule)
{
DEBUG "Checking schedule for backup candidates";
2016-04-16 19:25:46 +02:00
# add all present backups as informative_only: these are needed for correct results of schedule()
2016-04-19 13:42:18 +02:00
foreach my $vol (@$droot_subvol_list) {
2016-04-19 13:06:31 +02:00
unless($vol->{btrbk_direct_leaf} && ($vol->{node}{BTRBK_BASENAME} eq $snapshot_basename)) {
2016-04-12 19:43:12 +02:00
TRACE "Receive target does not match btrbk filename scheme, skipping: $vol->{PRINT}";
next;
}
2016-04-15 22:33:19 +02:00
push(@schedule, { informative_only => 1,
value => $vol,
2016-04-19 13:06:31 +02:00
btrbk_date => $vol->{node}{BTRBK_DATE},
2016-04-07 14:34:51 +02:00
});
2015-06-02 22:16:33 +02:00
}
2016-04-12 19:43:12 +02:00
my ($preserve, undef) = schedule(
2016-04-13 14:47:38 +02:00
schedule => \@schedule,
preserve => config_preserve_hash($droot, "target"),
2016-04-12 19:43:12 +02:00
);
my @resume = grep defined, @$preserve; # remove entries with no value from list (target subvolumes)
$resume_total = scalar @resume;
foreach my $child (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @resume)
2015-06-02 22:16:33 +02:00
{
2016-04-16 19:25:46 +02:00
# Continue gracefully (skip instead of abort) on existing (possibly garbled) target
my $err_vol = vinfo_subvol($droot, $child->{NAME});
if($err_vol) {
my $status_msg = "Please delete stray subvolume (\"btrbk clean\"): $err_vol->{PRINT}";
WARN "Target subvolume \"$err_vol->{PRINT}\" exists, but is not a receive target of \"$child->{PRINT}\"";
WARN $status_msg;
WARN "Skipping backup of: $child->{PRINT}";
$droot->{SUBVOL_RECEIVED} //= [];
push(@{$droot->{SUBVOL_RECEIVED}}, { ERROR => $status_msg, received_subvolume => $err_vol });
$droot->{CONFIG}->{UNRECOVERABLE} = $status_msg;
next;
}
INFO "Creating subvolume backup (send-receive) for: $child->{PRINT}";
2016-04-12 19:43:12 +02:00
my ($latest_common_src, $latest_common_target) = get_latest_common($sroot, $child, $droot, $snapdir);
if(macro_send_receive(source => $child,
target => $droot,
parent => $latest_common_src, # this is <undef> if no common found
latest_common_target => $latest_common_target,
))
2015-04-02 15:53:53 +02:00
{
2016-04-12 19:43:12 +02:00
$resume_success++;
}
else {
# note: ABORTED flag is already set by macro_send_receive()
ERROR("Error while resuming backups, aborting");
last;
2015-03-31 13:37:56 +02:00
}
2015-06-02 22:16:33 +02:00
}
2016-04-12 19:43:12 +02:00
}
2015-04-02 16:24:13 +02:00
2016-04-12 19:43:12 +02:00
if($resume_total) {
INFO "Created $resume_success/$resume_total missing backups";
} else {
INFO "No missing backups found";
}
2014-12-12 14:05:37 +01:00
}
2014-12-12 12:32:04 +01:00
}
}
2015-01-12 17:56:35 +01:00
2015-01-04 21:26:48 +01:00
2015-01-04 19:30:41 +01:00
#
2015-01-13 12:38:01 +01:00
# remove backups following a preserve daily/weekly/monthly scheme
2015-01-04 19:30:41 +01:00
#
2015-10-12 20:46:05 +02:00
my $schedule_results = [];
2015-05-15 20:24:14 +02:00
if($preserve_backups || $resume_only) {
2016-04-11 22:02:31 +02:00
INFO "Preserving all snapshots and backups (option \"-p\" or \"-r\" present)";
2015-02-28 12:02:28 +01:00
}
else
2015-01-04 19:30:41 +01:00
{
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-04-13 14:47:38 +02:00
my $snapdir = config_key($svol, "snapshot_dir") // "";
2016-04-03 20:46:29 +02:00
my $snapdir_ts = config_key($svol, "snapshot_dir", postfix => '/') // "";
2016-03-07 20:47:24 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2015-02-28 12:02:28 +01:00
my $target_aborted = 0;
2016-04-19 13:06:31 +02:00
my @snapshot_children = sort({ cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } # sort descending
2016-04-13 14:47:38 +02:00
get_snapshot_children($sroot, $svol, $snapdir, $snapshot_basename));
2015-05-20 20:20:14 +02:00
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
if(ABORTED($droot)) {
if(ABORTED($droot) eq "USER_SKIP") {
2015-09-20 14:25:20 +02:00
$target_aborted ||= -1;
} else {
$target_aborted = 1;
}
2015-02-28 12:02:28 +01:00
next;
}
2016-04-13 14:47:38 +02:00
# always preserve latest common snapshot/backup pair
2016-04-19 13:42:18 +02:00
my $droot_subvol_list = vinfo_subvol_list($droot); # cache subvol list for get_receive_targets()
2016-04-13 14:47:38 +02:00
foreach my $child (@snapshot_children) {
2016-04-19 13:42:18 +02:00
my @receive_targets = get_receive_targets($droot, $child, droot_subvol_list => $droot_subvol_list);
2016-04-13 14:47:38 +02:00
if(scalar(@receive_targets)) {
DEBUG "Force preserve for latest common snapshot: $child->{PRINT}";
$child->{node}{FORCE_PRESERVE} = 'preserve forced: latest common snapshot';
foreach(@receive_targets) {
DEBUG "Force preserve for latest common target: $_->{PRINT}";
$_->{node}{FORCE_PRESERVE} = 'preserve forced: latest common target';
}
last;
2015-09-29 14:07:58 +02:00
}
2015-06-02 22:16:33 +02:00
}
2015-02-28 12:02:28 +01:00
#
# delete backups
#
2015-04-20 20:35:13 +02:00
INFO "Cleaning backups of subvolume \"$svol->{PRINT}\": $droot->{PRINT}/$snapshot_basename.*";
2016-04-03 20:46:29 +02:00
unless(macro_delete($droot, "", $snapshot_basename, $droot,
2016-04-13 14:47:38 +02:00
{ preserve => config_preserve_hash($droot, "target"),
results => $schedule_results,
result_hints => { topic => "backup", root_path => $droot->{PATH} },
2016-03-08 15:25:35 +01:00
},
commit => config_key($droot, "btrfs_commit_delete"),
type => "delete_target",
))
2016-03-02 00:03:54 +01:00
{
2015-09-20 14:25:20 +02:00
$target_aborted = -1;
2015-02-28 12:02:28 +01:00
}
2015-01-16 17:41:57 +01:00
}
2015-01-12 17:56:35 +01:00
#
2015-02-28 12:02:28 +01:00
# delete snapshots
2015-01-12 17:56:35 +01:00
#
2015-02-28 12:02:28 +01:00
if($target_aborted) {
2015-09-20 14:25:20 +02:00
if($target_aborted == -1) {
INFO "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target is skipped by command line argument";
} else {
WARN "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target aborted earlier";
}
2015-02-28 12:02:28 +01:00
next;
}
2016-04-03 20:46:29 +02:00
INFO "Cleaning snapshots: $sroot->{PRINT}/$snapdir_ts$snapshot_basename.*";
2016-04-13 14:47:38 +02:00
macro_delete($sroot, $snapdir, $snapshot_basename, $svol,
{ preserve => config_preserve_hash($svol, "snapshot"),
results => $schedule_results,
result_hints => { topic => "snapshot", root_path => $sroot->{PATH} },
2016-03-08 15:25:35 +01:00
},
commit => config_key($svol, "btrfs_commit_delete"),
type => "delete_snapshot",
);
2015-01-04 19:30:41 +01:00
}
}
}
2015-01-13 14:38:44 +01:00
2015-10-14 16:51:39 +02:00
my $exit_status = exit_status($config);
2015-01-17 14:55:46 +01:00
my $time_elapsed = time - $start_time;
2015-01-20 21:07:28 +01:00
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
2015-10-13 18:24:30 +02:00
action("finished",
2015-10-14 16:51:39 +02:00
status => $exit_status ? "partial" : "success",
2015-10-13 18:24:30 +02:00
duration => $time_elapsed,
2015-10-14 16:51:39 +02:00
message => $exit_status ? "At least one backup task aborted" : undef,
2015-10-13 18:24:30 +02:00
);
close_transaction_log();
2015-01-13 17:51:24 +01:00
2015-10-12 20:46:05 +02:00
2015-01-13 17:51:24 +01:00
unless($quiet)
{
2015-10-12 20:46:05 +02:00
#
# print scheduling results
#
2016-04-09 14:16:14 +02:00
if($print_schedule) {
2015-10-19 22:10:08 +02:00
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
2015-10-23 21:28:58 +02:00
my @data_snapshot = grep { $_->{topic} eq "snapshot" } @data;
my @data_backup = grep { $_->{topic} eq "backup" } @data;
2015-10-12 20:46:05 +02:00
2015-10-21 21:58:30 +02:00
if(scalar(@data_snapshot)) {
print_formatted("schedule", \@data_snapshot, title => "SNAPSHOT SCHEDULE");
print "\n";
}
if(scalar(@data_backup)) {
print_formatted("schedule", \@data_backup, title => "BACKUP SCHEDULE");
print "\n";
}
2015-10-12 20:46:05 +02:00
}
#
# print summary
#
2015-10-12 22:26:36 +02:00
$output_format ||= "custom";
if($output_format eq "custom")
2015-01-13 17:51:24 +01:00
{
2015-10-12 22:26:36 +02:00
my @unrecoverable;
my @out;
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
2015-10-12 22:26:36 +02:00
my @subvol_out;
2016-03-07 21:45:12 +01:00
if($svol->{SNAPSHOT_UP_TO_DATE}) {
push @subvol_out, "=== $svol->{SNAPSHOT_UP_TO_DATE}->{PRINT}";
2015-10-11 15:38:43 +02:00
}
2016-03-07 21:45:12 +01:00
if($svol->{SNAPSHOT_CREATED}) {
push @subvol_out, "+++ $svol->{SNAPSHOT_CREATED}->{PRINT}";
2015-03-31 19:07:33 +02:00
}
2016-03-07 21:45:12 +01:00
foreach(sort { $a->{PATH} cmp $b->{PATH} } @{$svol->{SUBVOL_DELETED} // []}) {
push @subvol_out, "--- $_->{PRINT}";
2015-01-13 17:51:24 +01:00
}
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
2016-03-07 21:45:12 +01:00
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
2015-10-12 22:26:36 +02:00
my $create_mode = "***";
$create_mode = ">>>" if($_->{parent});
# substr($create_mode, 0, 1, '%') if($_->{resume});
$create_mode = "!!!" if($_->{ERROR});
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
}
2015-03-31 19:07:33 +02:00
2016-03-07 21:45:12 +01:00
foreach(sort { $a->{PATH} cmp $b->{PATH} } @{$droot->{SUBVOL_DELETED} // []}) {
push @subvol_out, "--- $_->{PRINT}";
2015-10-12 22:26:36 +02:00
}
2016-03-07 20:47:24 +01:00
if(ABORTED($droot) && (ABORTED($droot) ne "USER_SKIP")) {
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED($droot);
2015-10-12 22:26:36 +02:00
}
2016-03-07 20:47:24 +01:00
if($droot->{CONFIG}->{UNRECOVERABLE}) {
push(@unrecoverable, $droot->{CONFIG}->{UNRECOVERABLE});
2015-10-13 18:24:30 +02:00
}
2015-10-12 22:26:36 +02:00
}
2016-03-07 20:47:24 +01:00
if(ABORTED($sroot) && (ABORTED($sroot) ne "USER_SKIP")) {
2015-10-13 18:24:30 +02:00
# repeat volume errors in subvolume context
2016-03-07 20:47:24 +01:00
push @subvol_out, "!!! Volume \"$sroot->{PRINT}\" aborted: " . ABORTED($sroot);
2015-10-12 22:26:36 +02:00
}
2016-03-07 20:47:24 +01:00
if(ABORTED($svol) && (ABORTED($svol) ne "USER_SKIP")) {
push @subvol_out, "!!! Aborted: " . ABORTED($svol);
2015-03-31 19:07:33 +02:00
}
2015-05-15 16:06:36 +02:00
2015-10-12 22:26:36 +02:00
if(@subvol_out) {
push @out, "$svol->{PRINT}", @subvol_out, "";
}
2016-03-07 20:47:24 +01:00
elsif(ABORTED($svol) && (ABORTED($svol) eq "USER_SKIP")) {
2015-10-12 22:26:36 +02:00
# don't print "<no_action>" on USER_SKIP
}
else {
push @out, "$svol->{PRINT}", "<no_action>", "";
}
2015-01-13 17:51:24 +01:00
}
}
2015-04-20 18:53:44 +02:00
2015-09-23 11:27:36 +02:00
print_header(title => "Backup Summary",
config => $config,
time => $start_time,
legend => [
"=== up-to-date subvolume (source snapshot)",
"+++ created subvolume (source snapshot)",
"--- deleted subvolume",
"*** received subvolume (non-incremental)",
">>> received subvolume (incremental)",
],
);
print join("\n", @out);
if($resume_only) {
print "\nNOTE: No snapshots created (option -r present)\n";
}
if($preserve_backups || $resume_only) {
2016-04-11 22:02:31 +02:00
print "\nNOTE: Preserved all snapshots and backups (option -p or -r present)\n";
2015-09-23 11:27:36 +02:00
}
2016-04-16 19:25:46 +02:00
if($exit_status || scalar(@unrecoverable)) {
2015-09-23 11:27:36 +02:00
print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
print "Please check warning and error messages above.\n";
print join("\n", @unrecoverable) . "\n" if(@unrecoverable);
}
if($dryrun) {
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
}
2015-01-16 17:41:57 +01:00
}
2015-10-11 15:38:43 +02:00
else
{
2015-10-20 18:23:54 +02:00
# print action log (without transaction start messages)
my @data = grep { $_->{status} ne "starting" } @transaction_log;
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
2015-10-11 15:38:43 +02:00
}
2015-01-13 17:51:24 +01:00
}
2015-09-30 14:00:39 +02:00
2015-10-14 16:51:39 +02:00
exit $exit_status if($exit_status);
2015-01-04 19:30:41 +01:00
}
2014-12-11 18:03:10 +01:00
}
2014-12-14 22:45:23 +01:00
2014-12-11 18:03:10 +01:00
1;