2017-09-25 16:05:42 +02:00
#!/usr/bin/perl
2017-08-28 20:33:00 +02:00
#
# btrbk - Create snapshots and remote backups of btrfs subvolumes
#
2019-03-29 04:35:16 +01:00
# Copyright (C) 2014-2019 Axel Burri
2017-08-28 20:33:00 +02:00
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------
# The official btrbk website is located at:
2017-10-11 18:14:26 +02:00
# https://digint.ch/btrbk/
2017-08-28 20:33:00 +02:00
#
# Author:
# Axel Burri <axel@tty0.ch>
# ---------------------------------------------------------------------
2014-12-11 18:03:10 +01:00
use strict;
2017-08-29 16:42:09 +02:00
use warnings FATAL => qw( all ), NONFATAL => qw( deprecated );
2014-12-11 18:03:10 +01:00
2015-01-13 14:38:44 +01:00
use Carp qw(confess);
2015-08-15 17:51:00 +02:00
use Getopt::Long qw(GetOptions);
2016-04-21 13:27:54 +02:00
use Time::Local qw( timelocal timegm timegm_nocheck );
2014-12-11 18:03:10 +01:00
2019-07-28 17:25:20 +02:00
our $VERSION = '0.29.0-dev';
2016-04-15 01:22:19 +02:00
our $AUTHOR = 'Axel Burri <axel@tty0.ch>';
2017-10-11 18:14:26 +02:00
our $PROJECT_HOME = '<https://digint.ch/btrbk/>';
2016-04-15 01:22:19 +02:00
2018-12-25 22:12:23 +01:00
our $BTRFS_PROGS_MIN = '4.12'; # required since btrbk-v0.27.0
2016-04-15 01:22:19 +02:00
2016-05-03 13:19:42 +02:00
my $VERSION_INFO = "btrbk command line client, version $VERSION";
2014-12-11 18:03:10 +01:00
2015-01-17 13:14:47 +01:00
my @config_src = ("/etc/btrbk.conf", "/etc/btrbk/btrbk.conf");
2014-12-11 18:03:10 +01:00
2016-08-18 14:02:51 +02:00
my %compression = (
2016-08-18 17:41:26 +02:00
# NOTE: also adapt "compress_list" in ssh_filter_btrbk.sh if you change this
2016-08-18 14:02:51 +02:00
gzip => { name => 'gzip', format => 'gz', compress_cmd => [ 'gzip', '-c' ], decompress_cmd => [ 'gzip', '-d', '-c' ], level_min => 1, level_max => 9 },
pigz => { name => 'pigz', format => 'gz', compress_cmd => [ 'pigz', '-c' ], decompress_cmd => [ 'pigz', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
bzip2 => { name => 'bzip2', format => 'bz2', compress_cmd => [ 'bzip2', '-c' ], decompress_cmd => [ 'bzip2', '-d', '-c' ], level_min => 1, level_max => 9 },
pbzip2 => { name => 'pbzip2', format => 'bz2', compress_cmd => [ 'pbzip2', '-c' ], decompress_cmd => [ 'pbzip2', '-d', '-c' ], level_min => 1, level_max => 9, threads => '-p' },
xz => { name => 'xz', format => 'xz', compress_cmd => [ 'xz', '-c' ], decompress_cmd => [ 'xz', '-d', '-c' ], level_min => 0, level_max => 9, threads => '-T' },
lzo => { name => 'lzo', format => 'lzo', compress_cmd => [ 'lzop', '-c' ], decompress_cmd => [ 'lzop', '-d', '-c' ], level_min => 1, level_max => 9 },
2016-08-18 14:09:08 +02:00
lz4 => { name => 'lz4', format => 'lz4', compress_cmd => [ 'lz4', '-c' ], decompress_cmd => [ 'lz4', '-d', '-c' ], level_min => 1, level_max => 9 },
2016-08-18 14:02:51 +02:00
);
my $compress_format_alt = join '|', map { $_->{format} } values %compression; # note: this contains duplicate alternations
2015-09-02 11:04:22 +02:00
my $ip_addr_match = qr/(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])/;
my $host_name_match = qr/(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])/;
my $file_match = qr/[0-9a-zA-Z_@\+\-\.\/]+/; # note: ubuntu uses '@' in the subvolume layout: <https://help.ubuntu.com/community/btrfs>
2016-04-25 14:23:15 +02:00
my $glob_match = qr/[0-9a-zA-Z_@\+\-\.\/\*]+/; # file_match plus '*'
2015-09-29 14:07:58 +02:00
my $uuid_match = qr/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/;
2019-08-05 14:59:22 +02:00
my $btrbk_timestamp_match = qr/(?<YYYY>[0-9]{4})(?<MM>[0-9]{2})(?<DD>[0-9]{2})(T(?<hh>[0-9]{2})(?<mm>[0-9]{2})((?<ss>[0-9]{2})(?<zz>(Z|[+-][0-9]{4})))?)?(_(?<NN>[0-9]+))?/; # matches "YYYYMMDD[Thhmm[ss+0000]][_NN]"
2017-09-26 12:28:18 +02:00
my $raw_postfix_match_DEPRECATED = qr/--(?<received_uuid>$uuid_match)(\@(?<parent_uuid>$uuid_match))?\.btrfs?(\.(?<compress>($compress_format_alt)))?(\.(?<encrypt>gpg))?(\.(?<split>split))?(\.(?<incomplete>part))?/; # matches ".btrfs_<received_uuid>[@<parent_uuid>][.gz|bz2|xz][.gpg][.split][.part]"
2017-06-16 17:04:18 +02:00
my $raw_postfix_match = qr/\.btrfs(\.($compress_format_alt))?(\.(gpg|encrypted))?/; # matches ".btrfs[.gz|bz2|xz][.gpg|encrypted]"
2016-08-18 14:02:51 +02:00
2015-09-02 11:04:22 +02:00
my $group_match = qr/[a-zA-Z0-9_:-]+/;
2015-09-20 18:32:19 +02:00
my $ssh_cipher_match = qr/[a-z0-9][a-z0-9@.-]+/;
2015-09-02 11:04:22 +02:00
2016-04-20 22:45:11 +02:00
my %day_of_week_map = ( sunday => 0, monday => 1, tuesday => 2, wednesday => 3, thursday => 4, friday => 5, saturday => 6 );
2016-04-25 18:36:15 +02:00
my @syslog_facilities = qw( user mail daemon auth lpr news cron authpriv local0 local1 local2 local3 local4 local5 local6 local7 );
2015-01-13 12:38:01 +01:00
2015-01-12 15:46:24 +01:00
my %config_options = (
# NOTE: the parser always maps "no" to undef
2015-01-16 17:29:04 +01:00
# NOTE: keys "volume", "subvolume" and "target" are hardcoded
2015-10-23 21:28:58 +02:00
# NOTE: files "." and "no" map to <undef>
2016-04-21 13:27:54 +02:00
timestamp_format => { default => "short", accept => [ "short", "long", "long-iso" ], context => [ "root", "volume", "subvolume" ] },
2018-01-31 19:01:00 +01:00
snapshot_dir => { default => undef, accept_file => { relative => 1 }, context => [ "root", "volume", "subvolume" ] },
2018-02-26 15:59:20 +01:00
snapshot_name => { c_default => 1, accept_file => { name_only => 1 }, context => [ "subvolume" ], deny_glob_context => 1 }, # NOTE: defaults to the subvolume name (hardcoded)
2018-01-31 19:01:00 +01:00
snapshot_create => { default => "always", accept => [ "no", "always", "ondemand", "onchange" ], context => [ "root", "volume", "subvolume" ] },
2016-01-14 18:02:53 +01:00
incremental => { default => "yes", accept => [ "yes", "no", "strict" ] },
2019-04-05 12:06:41 +02:00
incremental_clones => { default => 0, accept_numeric => 1 },
2018-10-18 17:54:46 +02:00
incremental_resolve => { default => "mountpoint", accept => [ "mountpoint", "directory", "_all_accessible" ] },
2016-01-14 18:02:53 +01:00
preserve_day_of_week => { default => "sunday", accept => [ (keys %day_of_week_map) ] },
2018-01-05 19:28:10 +01:00
preserve_hour_of_day => { default => 0, accept => [ (0..23) ] },
2016-04-14 14:15:12 +02:00
snapshot_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1, context => [ "root", "volume", "subvolume" ], },
2016-04-19 21:07:04 +02:00
snapshot_preserve_min => { default => "all", accept => [ "all", "latest" ], accept_regexp => qr/^[1-9][0-9]*[hdwmy]$/, context => [ "root", "volume", "subvolume" ], },
2016-04-14 14:15:12 +02:00
target_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1 },
2016-04-19 21:07:04 +02:00
target_preserve_min => { default => "all", accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/ },
2017-10-02 13:54:37 +02:00
archive_preserve => { default => undef, accept => [ "no" ], accept_preserve_matrix => 1, context => [ "root" ] },
archive_preserve_min => { default => "all", accept => [ "all", "latest", "no" ], accept_regexp => qr/^[0-9]+[hdwmy]$/, context => [ "root" ] },
2016-01-14 18:02:53 +01:00
btrfs_commit_delete => { default => undef, accept => [ "after", "each", "no" ] },
ssh_identity => { default => undef, accept_file => { absolute => 1 } },
ssh_user => { default => "root", accept_regexp => qr/^[a-z_][a-z0-9_-]*$/ },
ssh_compression => { default => undef, accept => [ "yes", "no" ] },
ssh_cipher_spec => { default => "default", accept_regexp => qr/^$ssh_cipher_match(,$ssh_cipher_match)*$/ },
2017-10-11 18:28:40 +02:00
transaction_log => { default => undef, accept => [ "no" ], accept_file => { absolute => 1 }, context => [ "root" ] },
transaction_syslog => { default => undef, accept => [ "no", @syslog_facilities ], context => [ "root" ] },
2017-06-25 18:06:06 +02:00
lockfile => { default => undef, accept => [ "no" ], accept_file => { absolute => 1 }, context => [ "root" ] },
2016-01-14 18:02:53 +01:00
2019-07-29 21:59:03 +02:00
rate_limit => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgtKMGT]?$/, require_bin => 'mbuffer' },
rate_limit_remote => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgtKMGT]?$/ }, # NOTE: requires 'mbuffer' command on remote hosts
stream_buffer => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgKMG%]?$/, require_bin => 'mbuffer' },
stream_buffer_remote => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+[kmgKMG%]?$/ }, # NOTE: requires 'mbuffer' command on remote hosts
2016-05-11 20:15:46 +02:00
stream_compress => { default => undef, accept => [ "no", (keys %compression) ] },
stream_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
stream_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
2016-05-11 20:17:58 +02:00
raw_target_compress => { default => undef, accept => [ "no", (keys %compression) ] },
2016-01-14 18:02:53 +01:00
raw_target_compress_level => { default => "default", accept => [ "default" ], accept_numeric => 1 },
raw_target_compress_threads => { default => "default", accept => [ "default" ], accept_numeric => 1 },
2017-06-16 17:04:18 +02:00
raw_target_encrypt => { default => undef, accept => [ "no", "gpg", "openssl_enc" ] },
2019-07-28 15:04:23 +02:00
raw_target_block_size => { default => "128K", accept_regexp => qr/^[0-9]+[kmgKMG]?$/ },
2017-03-18 14:47:43 +01:00
raw_target_split => { default => undef, accept => [ "no" ], accept_regexp => qr/^[0-9]+([kmgtpezyKMGTPEZY][bB]?)?$/ },
2016-01-14 18:02:53 +01:00
gpg_keyring => { default => undef, accept_file => { absolute => 1 } },
gpg_recipient => { default => undef, accept_regexp => qr/^[0-9a-zA-Z_@\+\-\.]+$/ },
2017-06-16 17:04:18 +02:00
openssl_ciphername => { default => "aes-256-cbc", accept_regexp => qr/^[0-9a-zA-Z\-]+$/ },
2018-01-21 18:53:29 +01:00
openssl_iv_size => { default => undef, accept => [ "no" ], accept_numeric => 1 },
2017-06-16 17:04:18 +02:00
openssl_keyfile => { default => undef, accept_file => { absolute => 1 } },
2016-01-14 18:02:53 +01:00
2017-06-30 14:35:20 +02:00
kdf_backend => { default => undef, accept_file => { absolute => 1 } },
kdf_keysize => { default => "32", accept_numeric => 1 },
kdf_keygen => { default => "once", accept => [ "once", "each" ] },
2019-04-19 15:24:59 +02:00
group => { default => undef, accept_regexp => qr/^$group_match(\s*[,\s]\s*$group_match)*$/, allow_multiple => 1, split => qr/\s*[,\s]\s*/ },
2019-04-17 15:43:08 +02:00
noauto => { default => undef, accept => [ "yes", "no" ] },
2015-05-20 18:20:16 +02:00
2016-11-21 14:15:57 +01:00
backend => { default => "btrfs-progs", accept => [ "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },
backend_local => { default => undef, accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },
backend_remote => { default => undef, accept => [ "no", "btrfs-progs", "btrfs-progs-btrbk", "btrfs-progs-sudo" ] },
2016-08-27 17:35:47 +02:00
2017-10-02 14:00:09 +02:00
snapshot_qgroup_destroy => { default => undef, accept => [ "yes", "no" ], context => [ "root", "volume", "subvolume" ] },
target_qgroup_destroy => { default => undef, accept => [ "yes", "no" ] },
archive_qgroup_destroy => { default => undef, accept => [ "yes", "no" ], context => [ "root" ] },
2018-02-13 21:36:21 +01:00
archive_exclude => { default => undef, accept_file => { wildcards => 1 }, allow_multiple => 1, context => [ "root" ] },
2019-08-04 14:52:00 +02:00
archive_exclude_older => { default => undef, accept => [ "yes", "no" ] },
2018-02-13 21:36:21 +01:00
2015-05-20 18:20:16 +02:00
# deprecated options
2019-03-31 23:31:55 +02:00
ssh_port => { default => "default", accept => [ "default" ], accept_numeric => 1,
deprecated => { DEFAULT => { warn => 'Please use "ssh://hostname[:port]" notation in the "volume" and "target" configuration lines.' } } },
2016-04-15 01:22:19 +02:00
btrfs_progs_compat => { default => undef, accept => [ "yes", "no" ],
deprecated => { DEFAULT => { ABORT => 1, warn => 'This feature has been dropped in btrbk-v0.23.0. Please update to newest btrfs-progs, AT LEAST >= $BTRFS_PROGS_MIN' } } },
2016-04-13 17:13:03 +02:00
snapshot_preserve_daily => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
snapshot_preserve_weekly => { default => 0, accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
snapshot_preserve_monthly => { default => 'all', accept => [ "all" ], accept_numeric => 1, context => [ "root", "volume", "subvolume" ],
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "snapshot_preserve" and/or "snapshot_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
target_preserve_daily => { default => 'all', accept => [ "all" ], accept_numeric => 1,
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
target_preserve_weekly => { default => 0, accept => [ "all" ], accept_numeric => 1,
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
target_preserve_monthly => { default => 'all', accept => [ "all" ], accept_numeric => 1,
2016-04-14 13:01:28 +02:00
deprecated => { DEFAULT => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve" and/or "target_preserve_min"' } } },
2016-04-13 17:13:03 +02:00
resume_missing => { default => "yes", accept => [ "yes", "no" ],
deprecated => { yes => { warn => 'ignoring (missing backups are always resumed since btrbk v0.23.0)' },
2016-04-14 13:01:28 +02:00
no => { FAILSAFE_PRESERVE => 1, warn => 'Please use "target_preserve_min latest" and "target_preserve no" if you want to keep only the latest backup', } } },
2016-01-14 18:02:53 +01:00
snapshot_create_always => { default => undef, accept => [ "yes", "no" ],
deprecated => { yes => { warn => "Please use \"snapshot_create always\"",
replace_key => "snapshot_create",
replace_value => "always",
},
no => { warn => "Please use \"snapshot_create no\" or \"snapshot_create ondemand\"",
replace_key => "snapshot_create",
replace_value => "ondemand",
}
},
},
receive_log => { default => undef, accept => [ "sidecar", "no" ], accept_file => { absolute => 1 },
deprecated => { DEFAULT => { warn => "ignoring" } },
}
2015-01-09 18:09:32 +01:00
);
2015-06-02 22:16:33 +02:00
my @config_target_types = qw(send-receive raw);
2015-01-09 18:09:32 +01:00
2015-10-13 01:10:06 +02:00
my %table_formats = (
2019-04-01 20:12:33 +02:00
list_volume => { table => [ qw( -volume_host -volume_port volume_path ) ],
long => [ qw( volume_host volume_port volume_path ) ],
raw => [ qw( volume_url volume_host volume_port volume_path volume_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2019-04-01 20:12:33 +02:00
list_source => { table => [ qw( -source_host -source_port source_subvol snapshot_path snapshot_name ) ],
long => [ qw( source_host source_port source_subvol snapshot_path snapshot_name ) ],
raw => [ qw( source_url source_host source_port source_path snapshot_path snapshot_name source_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2019-04-01 20:12:33 +02:00
list_target => { table => [ qw( -target_host -target_port target_path ) ],
long => [ qw( target_host target_port target_path ) ],
raw => [ qw( target_url target_host target_port target_path target_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2019-04-01 20:12:33 +02:00
list => { table => [ qw( -source_host -source_port source_subvol snapshot_path snapshot_name -target_host -target_port target_path ) ],
long => [ qw( source_host source_port source_subvol snapshot_path snapshot_name snapshot_preserve target_host target_port target_path target_preserve ) ],
raw => [ qw( source_url source_host source_port source_subvol snapshot_path snapshot_name snapshot_preserve target_url target_host target_port target_path target_preserve source_rsh target_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2019-04-01 20:12:33 +02:00
resolved => { table => [ qw( -source_host -source_port source_subvol snapshot_subvol status -target_host -target_port target_subvol ) ],
long => [ qw( type source_host source_port source_subvol snapshot_subvol status target_host target_port target_subvol target_type ) ],
raw => [ qw( type source_host source_port source_path snapshot_path snapshot_name status target_host target_port target_path target_type source_rsh target_rsh ) ],
2015-10-13 01:10:06 +02:00
},
2019-04-01 20:12:33 +02:00
schedule => { table => [ qw( action -host -port subvol scheme reason ) ],
long => [ qw( action host port root_path subvol_path scheme reason ) ],
raw => [ qw( topic action url host port path hod dow min h d w m y) ],
2015-10-13 01:10:06 +02:00
},
2019-04-01 20:12:33 +02:00
usage => { table => [ qw( -host -port path size used free ) ],
long => [ qw( type host port path size used device_size device_allocated device_unallocated device_missing device_used free free_min data_ratio metadata_ratio global_reserve global_reserve_used ) ],
raw => [ qw( type host port path size used device_size device_allocated device_unallocated device_missing device_used free free_min data_ratio metadata_ratio global_reserve global_reserve_used ) ],
2016-12-11 15:35:00 +01:00
RALIGN => { size=>1, used=>1, device_size=>1, device_allocated=>1, device_unallocated=>1, device_missing=>1, device_used=>1, free=>1, free_min=>1, data_ratio=>1, metadata_ratio=>1, global_reserve=>1, global_reserve_used=>1 },
2015-10-19 22:10:08 +02:00
},
2019-04-03 01:47:30 +02:00
transaction => { table => [ qw( type status -target_host -target_port target_subvol -source_host -source_port source_subvol parent_subvol ) ],
long => [ qw( localtime type status duration target_host target_port target_subvol source_host source_port source_subvol parent_subvol message ) ],
2017-08-28 17:55:27 +02:00
tlog => [ qw( localtime type status target_url source_url parent_url message ) ],
syslog => [ qw( type status target_url source_url parent_url message ) ],
raw => [ qw( time localtime type status duration target_url source_url parent_url message ) ],
2015-10-13 01:10:06 +02:00
},
2016-04-15 22:00:10 +02:00
origin_tree => { table => [ qw( tree uuid parent_uuid received_uuid ) ],
long => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
raw => [ qw( tree uuid parent_uuid received_uuid recursion ) ],
},
2019-07-28 18:54:37 +02:00
fs_list => { table => [ qw( mount_point id flags path ) ],
2019-08-02 16:47:56 +02:00
rel => [ qw( mount_source mount_point path subvolume_path subvolume_rel_path ) ],
long => [ qw( mount_source mount_subvol mount_point id top_level cgen gen uuid parent_uuid received_uuid flags path ) ],
raw => [ qw( mount_source mount_subvol mount_point mount_subvolid id top_level cgen gen uuid parent_uuid received_uuid readonly path subvolume_path ) ],
2019-07-28 18:54:37 +02:00
},
2015-10-13 01:10:06 +02:00
);
2016-08-27 17:35:47 +02:00
my %backend_cmd_map = (
"btrfs-progs-btrbk" => { "btrfs subvolume list" => [ "btrfs-subvolume-list" ],
"btrfs subvolume show" => [ "btrfs-subvolume-show" ],
"btrfs subvolume snapshot" => [ "btrfs-subvolume-snapshot" ],
"btrfs subvolume delete" => [ "btrfs-subvolume-delete" ],
"btrfs send" => [ "btrfs-send" ],
"btrfs receive" => [ "btrfs-receive" ],
2018-04-30 14:15:30 +02:00
"btrfs filesystem usage" => [ "btrfs-filesystem-usage" ],
2017-10-02 14:00:09 +02:00
"btrfs qgroup destroy" => [ "btrfs-qgroup-destroy" ],
2016-08-27 17:35:47 +02:00
},
2016-11-21 14:15:57 +01:00
"btrfs-progs-sudo" => { "btrfs subvolume list" => [ "sudo", "-n", "btrfs", "subvolume", "list" ],
"btrfs subvolume show" => [ "sudo", "-n", "btrfs", "subvolume", "show" ],
"btrfs subvolume snapshot" => [ "sudo", "-n", "btrfs", "subvolume", "snapshot" ],
"btrfs subvolume delete" => [ "sudo", "-n", "btrfs", "subvolume", "delete" ],
"btrfs send" => [ "sudo", "-n", "btrfs", "send" ],
"btrfs receive" => [ "sudo", "-n", "btrfs", "receive" ],
2018-04-30 14:15:30 +02:00
"btrfs filesystem usage" => [ "sudo", "-n", "btrfs", "filesystem", "usage" ],
2017-10-02 14:00:09 +02:00
"btrfs qgroup destroy" => [ "sudo", "-n", "btrfs", "qgroup", "destroy" ],
2018-10-23 15:34:36 +02:00
"readlink" => [ "sudo", "-n", "readlink" ],
2016-11-21 14:15:57 +01:00
},
2016-08-27 17:35:47 +02:00
);
2017-06-16 17:43:17 +02:00
# keys used in raw target sidecar files (.info):
my %raw_info_sort = (
TYPE => 1,
FILE => 2,
RECEIVED_UUID => 3,
RECEIVED_PARENT_UUID => 4,
INCOMPLETE => 5,
# disabled for now, as its not very useful and might leak information
#source_url => 6,
#parent_url => 7,
#target_url => 8,
compress => 9,
split => 10,
encrypt => 11,
2017-06-16 17:04:18 +02:00
cipher => 12,
iv => 13,
2017-06-30 14:35:20 +02:00
# kdf_* (generated by kdf_backend)
2017-06-16 17:43:17 +02:00
);
2018-02-07 20:17:23 +01:00
my %raw_url_cache; # map URL to (fake) btr_tree node
2019-04-01 00:32:24 +02:00
my %mountinfo_cache; # map MACHINE_ID to mount points (sorted descending by file length)
my %mount_source_cache; # map URL_PREFIX:mount_source (aka device) to btr_tree node
2016-03-30 15:32:28 +02:00
my %uuid_cache; # map UUID to btr_tree node
2016-03-30 23:43:41 +02:00
my %realpath_cache; # map URL to realpath (symlink target)
2016-04-13 22:04:53 +02:00
my $tree_inject_id = 0; # fake subvolume id for injected nodes (negative)
2016-04-15 22:00:10 +02:00
my $fake_uuid_prefix = 'XXXXXXXX-XXXX-XXXX-XXXX-'; # plus 0-padded inject_id: XXXXXXXX-XXXX-XXXX-XXXX-000000000000
2016-03-10 05:26:43 +01:00
2015-01-13 14:38:44 +01:00
my $dryrun;
my $loglevel = 1;
2019-03-29 20:42:40 +01:00
my $quiet;
2019-08-04 21:37:17 +02:00
my @exclude_vf;
2016-04-18 16:40:49 +02:00
my $do_dumper;
2015-08-15 18:23:48 +02:00
my $show_progress = 0;
2015-05-19 18:22:55 +02:00
my $err = "";
2015-10-13 18:24:30 +02:00
my $output_format;
2016-06-07 16:17:02 +02:00
my $lockfile;
2015-10-13 18:24:30 +02:00
my $tlog_fh;
2016-04-25 19:40:11 +02:00
my $syslog_enabled = 0;
2015-10-20 18:23:54 +02:00
my $current_transaction;
my @transaction_log;
2015-10-23 14:43:36 +02:00
my %config_override;
2016-04-21 13:27:54 +02:00
my @tm_now; # current localtime ( sec, min, hour, mday, mon, year, wday, yday, isdst )
2016-05-10 15:50:33 +02:00
my %warn_once;
2017-06-30 14:35:20 +02:00
my %kdf_vars;
my $kdf_session_key;
2015-01-13 14:38:44 +01:00
$SIG{__DIE__} = sub {
print STDERR "\nERROR: process died unexpectedly (btrbk v$VERSION)";
print STDERR "\nPlease contact the author: $AUTHOR\n\n";
print STDERR "Stack Trace:\n----------------------------------------\n";
Carp::confess @_;
};
2014-12-14 19:23:02 +01:00
2015-10-20 20:17:31 +02:00
$SIG{INT} = sub {
print STDERR "\nERROR: Cought SIGINT, dumping transaction log:\n";
action("signal", status => "SIGINT");
print_formatted("transaction", \@transaction_log, output_format => "tlog", outfile => *STDERR);
exit 1;
};
2014-12-11 18:03:10 +01:00
sub VERSION_MESSAGE
{
2016-04-15 01:22:19 +02:00
print STDERR $VERSION_INFO . "\n\n";
2014-12-11 18:03:10 +01:00
}
sub HELP_MESSAGE
{
2019-03-29 20:42:40 +01:00
return if($quiet);
2019-03-29 20:46:40 +01:00
#80-----------------------------------------------------------------------------
2018-03-28 22:16:54 +02:00
print STDERR <<"END_HELP";
usage: btrbk [options] <command> [filter...]
options:
-h, --help display this help message
--version display version information
-c, --config=FILE specify configuration file
-n, --dry-run perform a trial run with no changes made
2019-04-17 17:12:35 +02:00
--exclude=FILTER exclude configured sections
2018-03-28 22:16:54 +02:00
-p, --preserve preserve all (do not delete anything)
--preserve-snapshots preserve snapshots (do not delete snapshots)
--preserve-backups preserve backups (do not delete backups)
--wipe delete all but latest snapshots
-v, --verbose be verbose (set loglevel=info)
-q, --quiet be quiet (do not print backup summary)
-l, --loglevel=LEVEL set logging level (warn, info, debug, trace)
-t, --table change output to table format
2019-08-02 22:39:35 +02:00
-L, --long change output to long format
2018-03-28 22:16:54 +02:00
--format=FORMAT change output format, FORMAT=table|long|raw
2018-10-15 16:25:07 +02:00
-S, --print-schedule print scheduler details (for the "run" command)
2018-03-28 22:16:54 +02:00
--progress show progress bar on send-receive operation
2019-04-17 17:07:54 +02:00
--lockfile=FILE create and check lockfile
--override=KEY=VALUE globally override a configuration option
2018-03-28 22:16:54 +02:00
commands:
run run snapshot and backup operations
dryrun don't run btrfs commands; show what would be executed
snapshot run snapshot operations only
resume run backup operations, and delete snapshots
prune only delete snapshots and backups
2019-04-24 18:29:05 +02:00
archive <src> <dst> recursively copy all subvolumes
2018-03-28 22:16:54 +02:00
clean delete incomplete (garbled) backups
stats print snapshot/backup statistics
list <subcommand> available subcommands are:
backups all backups and corresponding snapshots
snapshots all snapshots and corresponding backups
latest most recent snapshots and backups
config configured source/snapshot/target relations
source configured source/snapshot relations
volume configured volume sections
target configured targets
usage print filesystem usage
origin <subvol> print origin information for subvolume
diff <from> <to> shows new files between related subvolumes
2019-07-28 18:54:37 +02:00
ls <path> list all btrfs subvolumes below path
2018-03-28 22:16:54 +02:00
For additional information, see $PROJECT_HOME
END_HELP
2019-03-29 20:46:40 +01:00
#80-----------------------------------------------------------------------------
2014-12-11 18:03:10 +01:00
}
2015-10-22 17:45:27 +02:00
2015-04-29 00:34:11 +02:00
sub TRACE { my $t = shift; print STDERR "... $t\n" if($loglevel >= 4); }
sub DEBUG { my $t = shift; print STDERR "$t\n" if($loglevel >= 3); }
sub INFO { my $t = shift; print STDERR "$t\n" if($loglevel >= 2); }
sub WARN { my $t = shift; print STDERR "WARNING: $t\n" if($loglevel >= 1); }
sub ERROR { my $t = shift; print STDERR "ERROR: $t\n"; }
2014-12-11 18:03:10 +01:00
2016-05-10 15:50:33 +02:00
sub WARN_ONCE {
my $t = shift;
2016-08-29 14:48:07 +02:00
if($warn_once{$t}) { TRACE("WARNING(again): $t"); }
2016-05-10 15:50:33 +02:00
else { $warn_once{$t} = 1; WARN($t); }
}
2016-03-10 18:29:21 +01:00
sub VINFO {
2016-04-28 13:03:15 +02:00
return undef unless($do_dumper);
2016-03-17 14:02:22 +01:00
my $vinfo = shift; my $t = shift || "vinfo"; my $maxdepth = shift // 2;
print STDERR Data::Dumper->new([$vinfo], [$t])->Maxdepth($maxdepth)->Dump();
2016-03-10 18:29:21 +01:00
}
sub SUBVOL_LIST {
2016-04-28 13:03:15 +02:00
return undef unless($do_dumper);
2016-03-10 18:29:21 +01:00
my $vol = shift; my $t = shift // "SUBVOL_LIST"; my $svl = vinfo_subvol_list($vol);
2016-04-12 17:50:12 +02:00
print STDERR "$t:\n " . join("\n ", map { "$vol->{PRINT}/./$_->{SUBVOL_PATH}\t$_->{node}{id}" } @$svl) . "\n";
2016-03-10 18:29:21 +01:00
}
2016-03-10 19:26:17 +01:00
2016-03-10 05:26:43 +01:00
2019-04-17 15:20:18 +02:00
sub ABORTED($$;$)
2015-10-12 22:26:36 +02:00
{
my $config = shift;
2019-04-17 15:20:18 +02:00
my $abrt_key = shift // die;
my $abrt = shift;
2016-03-07 23:53:47 +01:00
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
2019-04-17 15:20:18 +02:00
unless(defined($abrt)) {
# no key (only text) set: switch arguments, use default key
$abrt = $abrt_key;
$abrt_key = "abort_" . $config->{CONTEXT};
}
unless($abrt_key =~ /^skip_/) {
# keys starting with "skip_" are not actions
2016-03-07 19:17:33 +01:00
$abrt =~ s/\n/\\\\/g;
$abrt =~ s/\r//g;
2019-04-17 15:20:18 +02:00
action($abrt_key,
2015-10-12 22:56:52 +02:00
status => "ABORT",
2015-10-12 23:58:38 +02:00
vinfo_prefixed_keys("target", vinfo($config->{url}, $config)),
2016-03-07 19:17:33 +01:00
message => $abrt,
2015-10-12 22:56:52 +02:00
);
}
2019-04-17 15:20:18 +02:00
$config->{ABORTED} = { key => $abrt_key, text => $abrt };
}
sub IS_ABORTED($;$)
{
my $config = shift;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
return undef unless(defined($config->{ABORTED}));
my $abrt_key = $config->{ABORTED}->{key};
return undef unless(defined($abrt_key));
my $filter_prefix = shift;
return ($abrt_key =~ /^$filter_prefix/) if($filter_prefix);
return $abrt_key;
2015-10-12 22:26:36 +02:00
}
2014-12-14 19:23:02 +01:00
2019-04-17 15:20:18 +02:00
sub ABORTED_TEXT($)
{
my $config = shift;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
return "" unless(defined($config->{ABORTED}));
return $config->{ABORTED}->{text} // "";
}
2019-07-15 18:19:33 +02:00
sub FIX_MANUALLY($$)
{
# treated as error, but does not abort config section
my $config = shift;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
my $msg = shift // die;
$config->{FIX_MANUALLY} //= [];
push(@{$config->{FIX_MANUALLY}}, $msg);
}
2019-04-17 15:20:18 +02:00
2016-04-25 21:05:46 +02:00
sub eval_quiet(&)
{
local $SIG{__DIE__};
return eval { $_[0]->() }
}
2015-10-13 18:24:30 +02:00
2016-04-28 13:03:15 +02:00
sub require_data_dumper
{
if(eval_quiet { require Data::Dumper; }) {
Data::Dumper->import("Dumper");
$Data::Dumper::Sortkeys = 1;
$Data::Dumper::Quotekeys = 0;
$do_dumper = 1;
# silence perl warning: Name "Data::Dumper::Sortkeys" used only once: possible typo at...
TRACE "Successfully loaded Dumper module: sortkeys=$Data::Dumper::Sortkeys, quotekeys=$Data::Dumper::Quotekeys";
} else {
WARN "Perl module \"Data::Dumper\" not found: data trace dumps disabled!" if($loglevel >=4);
}
}
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
sub init_transaction_log($$)
2015-10-13 18:24:30 +02:00
{
2016-04-06 20:19:12 +02:00
my $file = shift;
2016-04-25 18:36:15 +02:00
my $config_syslog_facility = shift;
2016-04-06 20:19:12 +02:00
if(defined($file) && (not $dryrun)) {
if(open($tlog_fh, ">> $file")) {
2016-04-28 12:48:58 +02:00
# print headers (disabled)
# print_formatted("transaction", [ ], output_format => "tlog", outfile => $tlog_fh);
2016-04-06 20:19:12 +02:00
INFO "Using transaction log: $file";
} else {
$tlog_fh = undef;
ERROR "Failed to open transaction log '$file': $!";
}
2015-10-13 18:24:30 +02:00
}
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
if(defined($config_syslog_facility) && (not $dryrun)) {
2016-04-25 21:05:46 +02:00
DEBUG "Opening syslog";
if(eval_quiet { require Sys::Syslog; }) {
2016-04-25 19:40:11 +02:00
$syslog_enabled = 1;
Sys::Syslog::openlog("btrbk", "", $config_syslog_facility);
2016-04-25 21:05:46 +02:00
DEBUG "Syslog enabled";
}
else {
WARN "Syslog disabled: $@";
2016-04-25 18:36:15 +02:00
}
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
}
2019-04-18 16:28:53 +02:00
action("DEFERRED", %$_) foreach (@transaction_log);
2015-10-13 18:24:30 +02:00
}
sub close_transaction_log()
{
if($tlog_fh) {
DEBUG "Closing transaction log";
close $tlog_fh || ERROR "Failed to close transaction log: $!";
}
2016-04-25 19:40:11 +02:00
if($syslog_enabled) {
2016-04-25 21:05:46 +02:00
DEBUG "Closing syslog";
eval_quiet { Sys::Syslog::closelog(); };
2016-04-25 18:36:15 +02:00
}
2015-10-13 18:24:30 +02:00
}
sub action($@)
{
my $type = shift // die;
my $h = { @_ };
2019-04-18 16:28:53 +02:00
unless($type eq "DEFERRED") {
my $time = $h->{time} // time;
$h->{type} = $type;
$h->{time} = $time;
$h->{localtime} = timestamp($time, 'debug-iso');
push @transaction_log, $h;
}
2015-10-20 20:16:34 +02:00
print_formatted("transaction", [ $h ], output_format => "tlog", no_header => 1, outfile => $tlog_fh) if($tlog_fh);
2016-04-25 19:40:11 +02:00
print_formatted("transaction", [ $h ], output_format => "syslog", no_header => 1) if($syslog_enabled); # dirty hack, this calls syslog()
2015-10-20 18:23:54 +02:00
return $h;
}
sub start_transaction($@)
{
my $type = shift // die;
my $time = time;
die("start_transaction() while transaction is running") if($current_transaction);
my @actions = (ref($_[0]) eq "HASH") ? @_ : { @_ }; # single action is not hashref
$current_transaction = [];
foreach (@actions) {
2017-09-27 19:35:43 +02:00
push @$current_transaction, action($type, %$_, status => ($dryrun ? "dryrun_starting" : "starting"), time => $time);
2015-10-20 18:23:54 +02:00
}
}
sub end_transaction($$)
{
my $type = shift // die;
2017-09-27 19:35:43 +02:00
my $success = shift; # scalar or coderef: if scalar, status is set for all current transitions
2015-10-20 18:23:54 +02:00
my $time = time;
die("end_transaction() while no transaction is running") unless($current_transaction);
foreach (@$current_transaction) {
die("end_transaction() has different type") unless($_->{type} eq $type);
2017-09-27 19:35:43 +02:00
my $status = (ref($success) ? &{$success} ($_) : $success) ? "success" : "ERROR";
$status = "dryrun_" . $status if($dryrun);
2017-08-28 17:54:17 +02:00
action($type, %$_, status => $status, time => $time, duration => ($dryrun ? undef : ($time - $_->{time})));
2015-10-20 18:23:54 +02:00
}
$current_transaction = undef;
2015-10-13 18:24:30 +02:00
}
2016-04-25 19:40:11 +02:00
sub syslog($)
{
return undef unless($syslog_enabled);
2016-04-25 21:05:46 +02:00
my $line = shift;
eval_quiet { Sys::Syslog::syslog("info", $line); };
2016-04-25 19:40:11 +02:00
}
2016-05-03 14:34:04 +02:00
sub check_exe($)
{
my $cmd = shift // die;
foreach my $path (split(":", $ENV{PATH})) {
return 1 if( -x "$path/$cmd" );
}
return 0;
}
2019-07-29 21:59:03 +02:00
sub stream_buffer_cmd_text($)
2017-03-17 20:39:51 +01:00
{
2019-07-29 21:59:03 +02:00
my $opts = shift;
my $rl_in = $opts->{rate_limit_in} // $opts->{rate_limit}; # maximum read rate: b,k,M,G
my $rl_out = $opts->{rate_limit_out}; # maximum write rate: b,k,M,G
my $bufsize = $opts->{stream_buffer}; # b,k,M,G,% (default: 2%)
my $blocksize = $opts->{blocksize}; # defaults to 10k
my $progress = $opts->{show_progress};
2019-07-28 15:04:23 +02:00
2019-07-29 21:59:03 +02:00
# return empty array if mbuffer is not needed
return () unless($rl_in || $rl_out || $bufsize || $progress);
2019-07-28 15:04:23 +02:00
# NOTE: mbuffer takes defaults from /etc/mbuffer.rc
my @cmd = ( "mbuffer" );
2019-07-29 21:59:03 +02:00
push @cmd, ( "-v", "1" ); # disable warnings (they arrive asynchronously and cant be cought)
2019-07-28 15:04:23 +02:00
push @cmd, "-q" unless($progress);
2019-07-29 21:59:03 +02:00
push @cmd, ( "-s", $blocksize ) if($blocksize);
push @cmd, ( "-m", lc($bufsize) ) if($bufsize);
push @cmd, ( "-r", lc($rl_in) ) if($rl_in);
push @cmd, ( "-R", lc($rl_out) ) if($rl_out);
return { cmd_text => join(' ', @cmd) };
2017-03-17 20:39:51 +01:00
}
2019-07-27 18:56:19 +02:00
sub compress_cmd_text($;$)
2016-05-11 20:15:46 +02:00
{
2019-07-29 21:59:03 +02:00
my $def = shift // die;
2016-05-11 20:15:46 +02:00
my $decompress = shift;
my $cc = $compression{$def->{key}};
my @cmd = $decompress ? @{$cc->{decompress_cmd}} : @{$cc->{compress_cmd}};
if((not $decompress) && defined($def->{level}) && ($def->{level} ne "default")) {
my $level = $def->{level};
if($level < $cc->{level_min}) {
WARN_ONCE "Compression level capped to minimum for '$cc->{name}': $cc->{level_min}";
$level = $cc->{level_min};
}
if($level > $cc->{level_max}) {
WARN_ONCE "Compression level capped to maximum for '$cc->{name}': $cc->{level_max}";
$level = $cc->{level_max};
}
push @cmd, '-' . $level;
}
if(defined($def->{threads}) && ($def->{threads} ne "default")) {
my $thread_opt = $cc->{threads};
if($thread_opt) {
push @cmd, $thread_opt . $def->{threads};
}
else {
WARN_ONCE "Threading is not supported for '$cc->{name}', ignoring";
}
}
2019-07-29 21:59:03 +02:00
return { cmd_text => join(' ', @cmd) };
2016-05-11 20:15:46 +02:00
}
2019-07-27 18:56:19 +02:00
sub decompress_cmd_text($)
2016-05-11 20:15:46 +02:00
{
2019-07-27 18:56:19 +02:00
return compress_cmd_text($_[0], 1);
2016-04-02 14:13:16 +02:00
}
2015-10-13 18:24:30 +02:00
2016-08-21 12:57:15 +02:00
sub _assemble_cmd($;$)
{
my $cmd_pipe = shift;
my $catch_stderr = shift;
my $cmd = "";
# simple single-command
if(scalar(@$cmd_pipe) == 1) {
2016-08-24 15:54:05 +02:00
$cmd = $cmd_pipe->[0]->{cmd_text};
2016-08-21 12:57:15 +02:00
$cmd .= ' 2>&1' if($catch_stderr && $cmd_pipe->[0]->{catch_stderr});
return $cmd;
}
# cmd result is something like this:
2019-07-28 15:04:23 +02:00
# { btrfs send <src> 2>&3 | mbuffer | btrfs receive <dst> 2>&3 ; } 3>&1
2016-08-21 12:57:15 +02:00
my $pipe = "";
$cmd = "{ " if($catch_stderr);
foreach (@$cmd_pipe) {
2016-08-24 15:54:05 +02:00
if($_->{cmd_text} =~ /^>/) {
2016-08-21 12:57:15 +02:00
die unless($pipe);
2016-08-24 15:54:05 +02:00
$cmd .= ' ' . $_->{cmd_text};
2016-08-21 12:57:15 +02:00
$pipe = undef; # this dies if it is not last command
} else {
2016-08-24 15:54:05 +02:00
$cmd .= $pipe . $_->{cmd_text};
2016-08-21 12:57:15 +02:00
$cmd .= ' 2>&3' if($catch_stderr && $_->{catch_stderr});
$pipe = ' | ';
}
}
$cmd .= ' ; } 3>&1' if($catch_stderr);
return $cmd;
}
2016-08-24 15:15:46 +02:00
sub _safe_cmd($$)
{
# NOTE: this function alters $aref: hashes of form: "{ unsafe => 'string' }" get translated to "string"
my $aref = shift;
my $offending = shift;
foreach(@$aref) {
if(ref($_) eq 'HASH') {
$_ = $_->{unsafe}; # replace in-place
# NOTE: all files must be absolute
2016-08-29 14:43:29 +02:00
unless(defined(check_file($_, { absolute => 1 }))) {
2016-08-24 15:15:46 +02:00
push @$offending, "\"$_\"";
}
}
}
return join(' ', @$aref);
}
2015-08-07 15:31:05 +02:00
sub run_cmd(@)
2014-12-11 18:03:10 +01:00
{
2016-01-19 17:52:27 +01:00
# shell-based implementation.
# this needs some redirection magic for filter_stderr to work.
# NOTE: multiple filters are not supported!
2016-08-21 12:57:15 +02:00
my @cmd_pipe_in = (ref($_[0]) eq "HASH") ? @_ : { @_ };
die unless(scalar(@cmd_pipe_in));
2015-05-19 18:22:55 +02:00
$err = "";
2015-08-07 15:31:05 +02:00
my $destructive = 0;
my $catch_stderr = 0;
my $filter_stderr = undef;
2016-08-21 12:57:15 +02:00
my @cmd_pipe;
2016-08-24 15:15:46 +02:00
my @unsafe_cmd;
2016-05-11 20:15:46 +02:00
my $compressed = undef;
2019-07-29 21:59:03 +02:00
my $stream_options = $cmd_pipe_in[0]->{stream_options} // {};
$cmd_pipe_in[0]->{stream_source} = 1;
$cmd_pipe_in[-1]->{stream_sink} = 1;
2016-08-21 12:57:15 +02:00
foreach my $href (@cmd_pipe_in)
2016-05-11 20:15:46 +02:00
{
2016-08-24 15:54:05 +02:00
die if(defined($href->{cmd_text}));
2016-08-21 12:57:15 +02:00
2016-05-11 20:15:46 +02:00
$catch_stderr = 1 if($href->{catch_stderr});
$filter_stderr = $href->{filter_stderr} if($href->{filter_stderr}); # NOTE: last filter wins!
$destructive = 1 unless($href->{non_destructive});
2016-08-24 15:15:46 +02:00
if($href->{check_unsafe}) {
_safe_cmd($href->{check_unsafe}, \@unsafe_cmd);
}
2019-07-29 21:59:03 +02:00
if($href->{redirect_to_file}) {
die unless($href->{stream_sink});
$href->{cmd_text} = _safe_cmd([ '>', $href->{redirect_to_file} ], \@unsafe_cmd);
}
elsif($href->{compress_stdin}) {
# does nothing if already compressed correctly by stream_compress
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$href->{compress_stdin}->{key}}->{format})) {
# re-compress with different algorithm
push @cmd_pipe, decompress_cmd_text($compressed);
2016-05-11 20:15:46 +02:00
$compressed = undef;
}
unless($compressed) {
2019-07-29 21:59:03 +02:00
push @cmd_pipe, compress_cmd_text($href->{compress_stdin});
$compressed = $href->{compress_stdin};
2016-05-11 20:15:46 +02:00
}
2019-07-29 21:59:03 +02:00
next;
2015-10-20 22:05:02 +02:00
}
2019-07-29 21:59:03 +02:00
elsif($href->{cmd}) {
$href->{cmd_text} = _safe_cmd($href->{cmd}, \@unsafe_cmd);
}
return undef unless(defined($href->{cmd_text}));
2016-05-11 20:15:46 +02:00
2019-07-29 21:59:03 +02:00
my @rsh_compress_in;
my @rsh_compress_out;
my @decompress_in;
2016-05-11 20:15:46 +02:00
2019-07-29 21:59:03 +02:00
# input stream compression: local, in front of rsh_cmd_pipe
if($href->{rsh} && $stream_options->{stream_compress} && (not $href->{stream_source})) {
if($compressed && ($compression{$compressed->{key}}->{format} ne $compression{$stream_options->{stream_compress}->{key}}->{format})) {
# re-compress with different algorithm, should be avoided!
push @rsh_compress_in, decompress_cmd_text($compressed);
$compressed = undef;
}
if(not $compressed) {
$compressed = $stream_options->{stream_compress};
push @rsh_compress_in, compress_cmd_text($compressed);
}
}
2016-08-21 12:57:15 +02:00
2019-07-29 21:59:03 +02:00
if($compressed && (not ($href->{compressed_ok}))) {
push @decompress_in, decompress_cmd_text($compressed);
$compressed = undef;
}
2016-08-21 12:57:15 +02:00
2019-07-29 21:59:03 +02:00
# output stream compression: remote, at end of rsh_cmd_pipe
if($href->{rsh} && $stream_options->{stream_compress} && (not $href->{stream_sink}) && (not $compressed)) {
$compressed = $stream_options->{stream_compress};
push @rsh_compress_out, compress_cmd_text($compressed);
}
2019-07-28 15:04:23 +02:00
2019-07-29 21:59:03 +02:00
if($href->{rsh}) {
# honor stream_buffer_remote, rate_limit_remote for stream source / sink
my @rsh_stream_buffer_in = $href->{stream_sink} ? stream_buffer_cmd_text($stream_options->{rsh_sink}) : ();
my @rsh_stream_buffer_out = $href->{stream_source} ? stream_buffer_cmd_text($stream_options->{rsh_source}) : ();
2016-08-21 12:57:15 +02:00
2019-07-29 21:59:03 +02:00
my @rsh_cmd_pipe = (
@decompress_in,
@rsh_stream_buffer_in,
$href,
@rsh_stream_buffer_out,
@rsh_compress_out,
);
@decompress_in = ();
2016-08-21 12:57:15 +02:00
2019-07-29 21:59:03 +02:00
# fixup redirect_to_file
if((scalar(@rsh_cmd_pipe) == 1) && ($rsh_cmd_pipe[0]->{redirect_to_file})) {
# NOTE: direct redirection in ssh command does not work: "ssh '> outfile'"
# we need to assemble: "ssh 'cat > outfile'"
unshift @rsh_cmd_pipe, { cmd_text => 'cat' };
2016-01-19 17:52:27 +01:00
}
2019-07-28 15:04:23 +02:00
2019-07-29 21:59:03 +02:00
my $rsh_text = _safe_cmd($href->{rsh}, \@unsafe_cmd);
return undef unless(defined($rsh_text));
$href->{cmd_text} = $rsh_text . " '" . _assemble_cmd(\@rsh_cmd_pipe) . "'";
2016-05-11 20:15:46 +02:00
}
2019-07-29 21:59:03 +02:00
# local stream_buffer, rate_limit and show_progress in front of stream sink
my @stream_buffer_in = $href->{stream_sink} ? stream_buffer_cmd_text($stream_options->{local_sink}) : ();
push @cmd_pipe, (
@decompress_in, # empty if rsh
@stream_buffer_in,
@rsh_compress_in, # empty if not rsh
$href, # command or rsh_cmd_pipe
);
2016-05-11 20:15:46 +02:00
}
2016-08-21 12:57:15 +02:00
my $cmd = _assemble_cmd(\@cmd_pipe, $catch_stderr);
my $cmd_print = _assemble_cmd(\@cmd_pipe); # hide redirection magic from debug output
2016-01-19 17:52:27 +01:00
2016-08-24 15:15:46 +02:00
if(scalar(@unsafe_cmd)) {
ERROR "Unsafe command `$cmd_print` (offending string: " . join(', ', @unsafe_cmd) . ')';
return undef;
}
2016-01-19 17:52:27 +01:00
if($dryrun && $destructive) {
DEBUG "### (dryrun) $cmd_print";
return "";
2015-10-20 22:05:02 +02:00
}
2016-01-19 17:52:27 +01:00
DEBUG "### $cmd_print";
2017-08-29 16:52:58 +02:00
TRACE "Executing command: $cmd";
# disable warnings in this scope (e.g. "permission denied", "no such file"), these cases are handled below.
# NOTE: for some reason this is only needed if we use "use warnings FATAL => qw( all )"
no warnings qw(exec);
2015-10-20 22:05:02 +02:00
2016-01-19 17:52:27 +01:00
# execute command and parse output
2017-08-29 16:52:58 +02:00
my $ret = `$cmd`;
if(defined($ret)) {
chomp($ret);
2019-05-22 15:30:29 +02:00
TRACE "Command output:\n$ret" if($loglevel >= 4);
2017-08-29 16:52:58 +02:00
}
if($? == -1) {
ERROR "Command execution failed ($!): `$cmd_print`";
return undef;
}
elsif ($? & 127) {
2015-08-07 15:31:05 +02:00
my $signal = $? & 127;
2017-08-29 16:52:58 +02:00
ERROR "Command execution failed (child died with signal $signal): `$cmd_print`";
return undef;
}
elsif($?) {
my $exitcode= $? >> 8;
DEBUG "Command execution failed (exitcode=$exitcode): `$cmd_print`";
2015-08-07 15:31:05 +02:00
if($catch_stderr) {
$_ = $ret;
&{$filter_stderr} ($cmd) if($filter_stderr);
2017-08-29 16:52:58 +02:00
if($_) {
# no filter, or uncaught by filter
ERROR "Command execution failed (exitcode=$exitcode): `$cmd_print`: $_";
}
2015-01-14 14:10:41 +01:00
}
2015-08-07 15:31:05 +02:00
return undef;
2015-01-14 14:10:41 +01:00
}
else {
2015-08-07 15:31:05 +02:00
DEBUG "Command execution successful";
2014-12-11 18:03:10 +01:00
}
return $ret;
}
2014-12-13 13:52:43 +01:00
2016-03-15 11:21:59 +01:00
sub btrfs_filesystem_show($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
2016-08-27 17:35:47 +02:00
return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem show", { unsafe => $path } ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1
);
}
2016-03-09 19:52:45 +01:00
2016-03-15 11:21:59 +01:00
sub btrfs_filesystem_df($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
2016-08-27 17:35:47 +02:00
return run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem df", { unsafe => $path }),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1
);
}
sub btrfs_filesystem_usage($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
2016-08-27 17:35:47 +02:00
my $ret = run_cmd( cmd => vinfo_cmd($vol, "btrfs filesystem usage", { unsafe => $path } ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1
);
return undef unless(defined($ret));
my %detail;
foreach (split("\n", $ret)) {
if(/^\s+Device size:\s+(\S+)/) {
2016-12-11 15:35:00 +01:00
$detail{device_size} = $1;
2016-03-15 11:21:59 +01:00
}
elsif(/^\s+Device allocated:\s+(\S+)/) {
$detail{device_allocated} = $1;
}
elsif(/^\s+Device unallocated:\s+(\S+)/) {
$detail{device_unallocated} = $1;
}
elsif(/^\s+Device missing:\s+(\S+)/) {
$detail{device_missing} = $1;
}
elsif(/^\s+Used:\s+(\S+)/) {
2016-12-11 15:35:00 +01:00
$detail{device_used} = $1;
2016-03-15 11:21:59 +01:00
}
elsif(/^\s+Free \(estimated\):\s+(\S+)\s+\(min: (\S+)\)/) {
$detail{free} = $1;
$detail{free_min} = $2;
}
elsif(/^\s+Data ratio:\s+(\S+)/) {
$detail{data_ratio} = $1;
}
elsif(/^\s+Metadata ratio:\s+(\S+)/) {
$detail{metadata_ratio} = $1;
}
elsif(/^\s+Used:\s+(\S+)/) {
$detail{used} = $1;
}
elsif(/^\s+Global reserve:\s+(\S+)\s+\(used: (\S+)\)/) {
$detail{global_reserve} = $1;
$detail{global_reserve_used} = $2;
}
else {
TRACE "Failed to parse filesystem usage line \"$_\" for: $vol->{PRINT}";
}
}
DEBUG "Parsed " . scalar(keys %detail) . " filesystem usage detail items: $vol->{PRINT}";
2016-12-11 15:35:00 +01:00
# calculate aggregate size / usage
if($detail{data_ratio} =~ /^[0-9]+\.[0-9]+$/) {
if($detail{device_size} =~ /^([0-9]+\.[0-9]+)(.*)/) {
$detail{size} = sprintf('%.2f%s', $1 / $detail{data_ratio}, $2);
}
if($detail{device_used} =~ /^([0-9]+\.[0-9]+)(.*)/) {
$detail{used} = sprintf('%.2f%s', $1 / $detail{data_ratio}, $2);
}
}
2016-04-18 16:40:49 +02:00
TRACE(Data::Dumper->Dump([\%detail], ["btrfs_filesystem_usage($vol->{URL})"])) if($do_dumper);
2016-03-15 11:21:59 +01:00
return \%detail;
}
2018-06-29 16:56:59 +02:00
# returns hashref with keys: (uuid parent_uuid id gen cgen top_level)
# for btrfs root, returns at least: (id is_root)
2016-03-15 11:21:59 +01:00
# for btrfs-progs >= 4.1, also returns key: "received_uuid"
2018-06-29 16:56:59 +02:00
# if present, also returns (unvalidated) keys: (name creation_time flags)
2018-06-29 18:47:47 +02:00
sub btrfs_subvolume_show($;@)
2016-03-15 11:21:59 +01:00
{
my $vol = shift || die;
2018-06-29 18:47:47 +02:00
my %opts = @_;
my @cmd_options;
push(@cmd_options, '--rootid=' . $opts{rootid}) if($opts{rootid}); # btrfs-progs >= 4.12
2016-03-15 11:21:59 +01:00
my $path = $vol->{PATH} // die;
2018-06-29 18:47:47 +02:00
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume show", @cmd_options, { unsafe => $path }),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
catch_stderr => 1, # hack for shell-based run_cmd()
filter_stderr => sub {
if(/ssh command rejected/) {
# catch errors from ssh_filter_btrbk.sh
$err = "ssh command rejected (please fix ssh_filter_btrbk.sh)";
}
elsif(/^ERROR: (.*)/) {
# catch errors from btrfs command
$err = $1;
}
else {
DEBUG "Unparsed error: $_";
$err = $_;
}
# consume stderr line, as $err will be displayed as a user-friendly WARNING
$_ = undef;
}
);
return undef unless(defined($ret));
2017-07-30 15:25:32 +02:00
my @ret_lines = split("\n", $ret);
unless(@ret_lines) {
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
return undef;
2016-03-15 11:21:59 +01:00
}
2017-07-30 15:25:32 +02:00
2018-06-29 18:44:06 +02:00
# NOTE: the first line starts with a path:
# - btrfs-progs < 4.12 prints the full (absolute, resolved) path
# - btrfs-progs >= 4.12 prints the relative path to btrfs root (or "/" if it is the root)
2016-03-15 11:21:59 +01:00
2016-11-16 15:02:49 +01:00
my %detail;
2017-07-30 15:25:32 +02:00
if($ret_lines[0] =~ / is (btrfs root|toplevel subvolume)$/) {
2016-03-15 11:21:59 +01:00
# btrfs-progs < 4.4 prints: "<subvol> is btrfs root"
# btrfs-progs >= 4.4 prints: "<subvol> is toplevel subvolume"
2016-11-16 15:02:49 +01:00
# btrfs-progs >= 4.8.3 does not enter here, as output shares format with regular subvolumes
$detail{id} = 5;
2016-03-15 11:21:59 +01:00
}
2017-07-30 15:25:32 +02:00
else {
2016-03-15 11:21:59 +01:00
my %trans = (
"Name" => "name",
"uuid" => "uuid",
"UUID" => "uuid", # btrfs-progs >= 4.1
"Parent uuid" => "parent_uuid",
"Parent UUID" => "parent_uuid", # btrfs-progs >= 4.1
"Received UUID" => "received_uuid", # btrfs-progs >= 4.1
"Creation time" => "creation_time",
"Object ID" => "id",
"Subvolume ID" => "id", # btrfs-progs >= 4.1
"Generation (Gen)" => "gen",
"Generation" => "gen", # btrfs-progs >= 4.1
"Gen at creation" => "cgen",
"Parent" => "parent_id",
"Parent ID" => "parent_id", # btrfs-progs >= 4.1
"Top Level" => "top_level",
"Top level ID" => "top_level", # btrfs-progs >= 4.1
"Flags" => "flags",
2015-04-16 12:00:04 +02:00
);
2018-06-29 18:47:47 +02:00
foreach (@ret_lines) {
2016-03-15 11:21:59 +01:00
next unless /^\s+(.+):\s+(.*)$/;
my ($key, $value) = ($1, $2);
if($trans{$key}) {
$detail{$trans{$key}} = $value;
2016-03-09 19:52:45 +01:00
} else {
2019-08-04 13:25:01 +02:00
DEBUG "Ignoring subvolume detail \"$key: $value\" for: $vol->{PRINT}";
2016-03-15 11:21:59 +01:00
}
}
DEBUG "Parsed " . scalar(keys %detail) . " subvolume detail items: $vol->{PRINT}";
2016-08-19 16:33:30 +02:00
2019-08-04 13:49:19 +02:00
# NOTE: as of btrfs-progs v4.6.1, flags are either "-" or "readonly"
$detail{readonly} = ($detail{flags} =~ /readonly/) ? 1 : 0 if($detail{flags});
2018-06-29 16:56:59 +02:00
# validate required keys
unless((defined($detail{parent_uuid}) && (($detail{parent_uuid} eq '-') || ($detail{parent_uuid} =~ /^$uuid_match$/))) &&
(defined($detail{id}) && ($detail{id} =~ /^\d+$/) && ($detail{id} >= 5)) &&
(defined($detail{gen}) && ($detail{gen} =~ /^\d+$/)) &&
(defined($detail{cgen}) && ($detail{cgen} =~ /^\d+$/)) &&
2019-08-04 13:49:19 +02:00
(defined($detail{top_level}) && ($detail{top_level} =~ /^\d+$/)) &&
(defined($detail{readonly})))
2018-06-29 16:56:59 +02:00
{
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
return undef;
}
2016-08-19 16:33:30 +02:00
2018-06-29 16:56:59 +02:00
# NOTE: filesystems created with btrfs-progs < 4.16 have no UUID for subvolid=5,
# assert {uuid} is either valid or undef
if(defined($detail{uuid}) && ($detail{uuid} !~ /^$uuid_match$/)) {
if($detail{id} == 5) {
DEBUG "No UUID on btrfs root (id=5): $vol->{PRINT}";
} else {
2016-03-15 11:21:59 +01:00
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
return undef;
2016-03-09 19:52:45 +01:00
}
2018-06-29 16:56:59 +02:00
delete $detail{uuid};
}
# NOTE: received_uuid is not required here, as btrfs-progs < 4.1 does not give us that information.
# no worries, we get this from btrfs_subvolume_list() for all subvols.
if(defined($detail{received_uuid}) && ($detail{received_uuid} ne '-') && ($detail{received_uuid} !~ /^$uuid_match$/)) {
ERROR "Failed to parse subvolume detail (unsupported btrfs-progs) for: $vol->{PRINT}";
return undef;
2016-03-09 19:52:45 +01:00
}
2018-06-29 16:56:59 +02:00
VINFO(\%detail, "detail") if($loglevel >=4);
2015-04-14 02:17:17 +02:00
}
2016-11-16 15:02:49 +01:00
2018-06-29 18:47:47 +02:00
if($opts{rootid} && ($detail{id} != $opts{rootid})) {
ERROR "Failed to parse subvolume detail (rootid mismatch) for: $vol->{PRINT}";
return undef;
}
2016-11-16 15:02:49 +01:00
if($detail{id} == 5) {
DEBUG "found btrfs root: $vol->{PRINT}";
2018-06-29 16:56:59 +02:00
$detail{is_root} = 1;
2016-11-16 15:02:49 +01:00
}
2016-03-15 11:21:59 +01:00
return \%detail;
}
2015-04-14 16:03:31 +02:00
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_list_readonly_flag($)
{
my $vol = shift || die;
my $path = $vol->{PATH} // die;
2016-08-27 17:35:47 +02:00
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", '-a', '-r', { unsafe => $path } ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
);
return undef unless(defined($ret));
my %ro;
foreach (split(/\n/, $ret))
{
2017-10-09 23:04:07 +02:00
die("Failed to parse line: \"$_\"") unless(/^ID\s+([0-9]+)\s+gen\s+[0-9]+\s+top level\s+[0-9]+\s+path\s/);
2016-03-15 11:21:59 +01:00
$ro{$1} = 1;
}
DEBUG "Parsed " . scalar(keys %ro) . " readonly subvolumes for filesystem at: $vol->{PRINT}";
return \%ro;
2015-04-14 02:17:17 +02:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_list($;@)
{
my $vol = shift || die;
my %opts = @_;
2017-07-30 15:25:32 +02:00
my $path = $vol->{PATH} // die;
2016-03-15 11:21:59 +01:00
my @filter_options = ('-a');
push(@filter_options, '-o') if($opts{subvol_only});
2016-04-15 01:22:19 +02:00
# NOTE: btrfs-progs <= 3.17 do NOT support the '-R' flag.
# NOTE: Support for btrfs-progs <= 3.17 has been dropped in
# btrbk-0.23, the received_uuid flag very essential!
my @display_options = ('-c', '-u', '-q', '-R');
2016-08-27 17:35:47 +02:00
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume list", @filter_options, @display_options, { unsafe => $path } ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
);
return undef unless(defined($ret));
my @nodes;
foreach (split(/\n/, $ret))
{
my %node;
2017-10-09 23:04:07 +02:00
# NOTE: btrfs-progs >= 4.13.2 pads uuid's with 36 whitespaces
2018-06-03 18:16:26 +02:00
unless(/^ID \s+ ([0-9]+) \s+
gen \s+ ([0-9]+) \s+
cgen \s+ ([0-9]+) \s+
top\ level \s+ ([0-9]+) \s+
parent_uuid \s+ ([0-9a-f-]+) \s+
received_uuid \s+ ([0-9a-f-]+) \s+
uuid \s+ ([0-9a-f-]+) \s+
path \s+ (.+) $/x) {
2016-04-15 01:22:19 +02:00
ERROR "Failed to parse subvolume list (unsupported btrfs-progs) for: $vol->{PRINT}";
DEBUG "Offending line: $_";
return undef;
}
%node = (
2016-03-15 11:21:59 +01:00
id => $1,
gen => $2,
cgen => $3,
top_level => $4,
parent_uuid => $5, # note: parent_uuid="-" if no parent
received_uuid => $6,
uuid => $7,
path => $8 # btrfs path, NOT filesystem path
);
# NOTE: "btrfs subvolume list <path>" prints <FS_TREE> prefix only if
# the subvolume is reachable within <path>. (as of btrfs-progs-3.18.2)
#
# NOTE: Be prepared for this to change in btrfs-progs!
$node{path} =~ s/^<FS_TREE>\///; # remove "<FS_TREE>/" portion from "path".
push @nodes, \%node;
}
DEBUG "Parsed " . scalar(@nodes) . " total subvolumes for filesystem at: $vol->{PRINT}";
2018-07-09 14:29:28 +02:00
return \@nodes;
}
sub btrfs_subvolume_list_complete($)
{
my $vol = shift || die;
# fetch subvolume list
my $nodes = btrfs_subvolume_list($vol);
return undef unless($nodes);
2016-03-15 11:21:59 +01:00
# fetch readonly flag
# NOTE: the only way to get "readonly" flag is via a second call to "btrfs subvol list" with the "-r" option (as of btrfs-progs v4.3.1)
my $ro = btrfs_subvolume_list_readonly_flag($vol);
return undef unless(defined($ro));
2018-07-09 14:29:28 +02:00
foreach (@$nodes) {
2016-03-15 11:21:59 +01:00
$_->{readonly} = $ro->{$_->{id}} // 0;
}
2016-03-07 17:35:17 +01:00
2018-07-09 14:29:28 +02:00
# btrfs root (id=5) is not provided by btrfs_subvolume_list above, read it separately (best-efford)
my $tree_root = btrfs_subvolume_show($vol, rootid => 5);
unless($tree_root) {
# this is not an error:
# - btrfs-progs < 4.12 does not support rootid lookup
# - UUID can be missing if filesystem was created with btrfs-progs < 4.16
DEBUG "Failed to fetch subvolume detail (old btrfs-progs?) for btrfs root (id=5) on: $vol->{PRINT}";
$tree_root = { id => 5, is_root => 1 };
}
unshift(@$nodes, $tree_root);
return $nodes;
2016-03-07 17:35:17 +01:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_find_new($$;$)
2015-04-16 12:00:04 +02:00
{
2016-03-15 11:21:59 +01:00
my $vol = shift || die;
my $path = $vol->{PATH} // die;
my $lastgen = shift // die;
2016-08-27 17:35:47 +02:00
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs subvolume find-new", { unsafe => $path }, $lastgen ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-15 11:21:59 +01:00
non_destructive => 1,
);
unless(defined($ret)) {
ERROR "Failed to fetch modified files for: $vol->{PRINT}";
return undef;
}
2015-04-16 12:00:04 +02:00
2016-03-15 11:21:59 +01:00
my %files;
my $parse_errors = 0;
my $transid_marker;
foreach (split(/\n/, $ret))
2015-04-19 11:36:40 +02:00
{
2016-03-15 11:21:59 +01:00
if(/^inode \S+ file offset (\S+) len (\S+) disk start \S+ offset \S+ gen (\S+) flags (\S+) (.+)$/) {
my $file_offset = $1;
my $len = $2;
my $gen = $3;
my $flags = $4;
my $name = $5;
$files{$name}->{len} += $len;
$files{$name}->{new} = 1 if($file_offset == 0);
$files{$name}->{gen}->{$gen} = 1; # count the generations
if($flags eq "COMPRESS") {
$files{$name}->{flags}->{compress} = 1;
}
elsif($flags eq "COMPRESS|INLINE") {
$files{$name}->{flags}->{compress} = 1;
$files{$name}->{flags}->{inline} = 1;
}
elsif($flags eq "INLINE") {
$files{$name}->{flags}->{inline} = 1;
}
elsif($flags eq "NONE") {
}
else {
WARN "unparsed flags: $flags";
}
}
elsif(/^transid marker was (\S+)$/) {
$transid_marker = $1;
}
else {
$parse_errors++;
}
2015-04-16 12:00:04 +02:00
}
2016-03-15 11:21:59 +01:00
return { files => \%files,
transid_marker => $transid_marker,
parse_errors => $parse_errors,
};
2015-04-19 11:36:40 +02:00
}
2015-04-16 12:00:04 +02:00
2015-04-19 11:36:40 +02:00
2016-03-15 11:21:59 +01:00
# returns $target, or undef on error
sub btrfs_subvolume_snapshot($$)
2015-10-12 23:58:38 +02:00
{
2016-03-15 11:21:59 +01:00
my $svol = shift || die;
my $target_vol = shift // die;
my $target_path = $target_vol->{PATH} // die;
my $src_path = $svol->{PATH} // die;
2016-04-16 21:08:07 +02:00
INFO "[snapshot] source: $svol->{PRINT}";
INFO "[snapshot] target: $target_vol->{PRINT}";
2016-03-15 11:21:59 +01:00
start_transaction("snapshot",
vinfo_prefixed_keys("target", $target_vol),
vinfo_prefixed_keys("source", $svol),
);
2016-08-27 17:35:47 +02:00
my $ret = run_cmd(cmd => vinfo_cmd($svol, "btrfs subvolume snapshot", '-r', { unsafe => $src_path }, { unsafe => $target_path } ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($svol),
2016-03-15 11:21:59 +01:00
);
2017-09-27 19:35:43 +02:00
end_transaction("snapshot", defined($ret));
2016-04-12 17:50:12 +02:00
unless(defined($ret)) {
ERROR "Failed to create btrfs subvolume snapshot: $svol->{PRINT} -> $target_path";
return undef;
}
return $target_vol;
2015-10-12 23:58:38 +02:00
}
2016-03-15 11:21:59 +01:00
sub btrfs_subvolume_delete($@)
2015-01-09 18:09:32 +01:00
{
2016-03-15 11:21:59 +01:00
my $targets = shift // die;
2015-09-10 11:22:19 +02:00
my %opts = @_;
2016-03-15 11:21:59 +01:00
my $commit = $opts{commit};
die if($commit && ($commit ne "after") && ($commit ne "each"));
$targets = [ $targets ] unless(ref($targets) eq "ARRAY");
2017-09-27 20:23:08 +02:00
return () unless(scalar(@$targets));
2016-08-27 17:35:47 +02:00
# NOTE: rsh and backend command is taken from first target
2019-04-01 00:32:24 +02:00
my $rsh_machine_check = $targets->[0]->{MACHINE_ID};
2017-06-30 18:49:17 +02:00
my $target_type = $targets->[0]->{node}{TARGET_TYPE} || "";
2016-03-15 11:21:59 +01:00
foreach (@$targets) {
2019-04-01 00:32:24 +02:00
# assert all targets share same MACHINE_ID
die if($rsh_machine_check ne $_->{MACHINE_ID});
2017-06-30 18:49:17 +02:00
# assert all targets share same target type
die if($target_type && ($_->{node}{TARGET_TYPE} ne $target_type));
2015-01-09 18:09:32 +01:00
}
2016-08-27 17:35:47 +02:00
2017-06-30 18:49:17 +02:00
INFO "[delete] options: commit-$commit" if($commit && (not $target_type));
2016-04-16 21:08:07 +02:00
INFO "[delete] target: $_->{PRINT}" foreach(@$targets);
2016-03-15 11:21:59 +01:00
start_transaction($opts{type} // "delete",
2017-09-27 20:23:08 +02:00
# NOTE: "target_url" from vinfo_prefixed_keys() is used for matching in end_transaction() below
2016-03-15 11:21:59 +01:00
map( { { vinfo_prefixed_keys("target", $_) }; } @$targets)
);
2017-06-30 18:49:17 +02:00
my $ret;
2017-09-27 20:23:08 +02:00
my @deleted;
my @unparsed_errors;
my %err_catch;
2017-06-30 18:49:17 +02:00
if($target_type eq "raw") {
my @cmd_target_paths;
foreach(@$targets) {
if($_->{node}{BTRBK_RAW}{split}) {
push @cmd_target_paths, "$_->{PATH}.split_??"; # unsafe is checked with path.info below
} else {
push @cmd_target_paths, { unsafe => $_->{PATH} };
}
2017-09-26 12:28:18 +02:00
if($_->{node}{BTRBK_RAW}{INFO_FILE}) {
# DEPRECATED raw format: no info file in deprecated format
push @cmd_target_paths, { unsafe => "$_->{PATH}.info" };
}
2017-06-30 18:49:17 +02:00
}
2017-09-27 20:23:08 +02:00
$ret = run_cmd(cmd => ['rm', '-f', @cmd_target_paths ],
2017-06-30 18:49:17 +02:00
rsh => vinfo_rsh($targets->[0]),
2017-09-27 20:23:08 +02:00
catch_stderr => 1, # hack for shell-based run_cmd()
filter_stderr => sub {
# catch errors from "rm -f"
my @error_lines = split("\n", $_);
foreach (@error_lines) {
if(/^rm: cannot remove '($file_match)':/) {
my $catch = $1; # make sure $catch matches $vol->{PATH}
$catch =~ s/\.info$//;
$catch =~ s/\.split_[a-z][a-z]$//;
$err_catch{$catch} //= [];
push(@{$err_catch{$catch}}, $_);
}
else {
push @unparsed_errors, $_;
}
}
$_ = undef; # prevent "Command execution failed" error message
}
2017-06-30 18:49:17 +02:00
);
}
else {
my @cmd_target_paths = map { { unsafe => $_->{PATH} } } @$targets;
my @options;
@options = ("--commit-$commit") if($commit);
$ret = run_cmd(cmd => vinfo_cmd($targets->[0], "btrfs subvolume delete", @options, @cmd_target_paths ),
rsh => vinfo_rsh($targets->[0]),
2017-09-27 20:23:08 +02:00
catch_stderr => 1, # hack for shell-based run_cmd()
filter_stderr => sub {
# catch errors from btrfs command
2018-07-12 15:12:08 +02:00
# NOTE: this is NOT RELIABLE ANY MORE (path is not printed in btrfs-progs >= 4.16), e.g.:
# "ERROR: Could not statfs: No such file or directory"
2017-09-27 20:23:08 +02:00
my @error_lines = split("\n", $_);
foreach (@error_lines) {
next if(/^Delete subvolume/); # NOTE: stdout is also reflected here!
2018-07-12 15:12:08 +02:00
if(/^ERROR: cannot access subvolume ($file_match):/ || # btrfs-progs < 4.16
/^ERROR: not a subvolume: ($file_match)/ || # btrfs-progs < 4.16
2017-09-27 20:23:08 +02:00
/^ERROR: cannot find real path for '($file_match)':/ ||
2018-07-12 15:12:08 +02:00
/^ERROR: cannot delete '($file_match)'/ || # btrfs-progs < 4.16
2017-09-27 20:23:08 +02:00
/^ERROR: cannot access subvolume '($file_match)'$/ || # btrfs-progs < 4.4
/^ERROR: error accessing '($file_match)'/ || # btrfs-progs < 4.4
/^ERROR: '($file_match)' is not a subvolume/ || # btrfs-progs < 4.4
/^ERROR: finding real path for '($file_match)'/ || # btrfs-progs < 4.4
/^ERROR: can't access '($file_match)'/ ) # btrfs-progs < 4.4
{
$err_catch{$1} //= [];
push(@{$err_catch{$1}}, $_);
}
else {
push @unparsed_errors, $_;
}
}
$_ = undef; # prevent "Command execution failed" error message
}
2017-06-30 18:49:17 +02:00
);
}
2017-09-27 20:23:08 +02:00
if(defined($ret)) {
@deleted = @$targets;
}
else {
if(%err_catch) {
my $catch_count = 0;
foreach my $check_target (@$targets) {
my $err_ary = $err_catch{$check_target->{PATH}};
if($err_ary) {
ERROR "Failed to delete subvolume \"$check_target->{PRINT}\": $_" foreach(@$err_ary);
$catch_count++;
}
else {
push @deleted, $check_target;
}
}
if($catch_count != (scalar keys %err_catch)) {
@deleted = ();
ERROR "Failed to assign error messages, assuming nothing deleted";
ERROR "Failed to delete subvolume: $_" foreach(map( { $_->{PRINT} } @$targets));
}
}
if(@unparsed_errors) {
@deleted = ();
2018-07-12 15:12:08 +02:00
ERROR "Failed to delete subvolume, assuming nothing deleted";
2017-09-27 20:23:08 +02:00
ERROR "[delete]: $_" foreach(@unparsed_errors);
ERROR "Failed to delete subvolume: $_" foreach(map( { $_->{PRINT} } @$targets));
}
}
end_transaction($opts{type} // "delete", sub { my $h = shift; return (grep { $_->{URL} eq $h->{target_url} } @deleted); });
return @deleted;
2015-01-09 18:09:32 +01:00
}
2017-10-02 14:00:09 +02:00
sub btrfs_qgroup_destroy($@)
{
my $vol = shift // die;
my %opts = @_;
my $vol_id = $vol->{node}{id};
unless($vol_id) {
ERROR "Unknown subvolume_id for: $vol->{PRINT}";
return undef;
}
my $path = $vol->{PATH} // die;
my $qgroup_id = "0/$vol_id";
INFO "[qgroup-destroy] qgroup_id: $qgroup_id";
INFO "[qgroup-destroy] subvolume: $vol->{PRINT}";
start_transaction($opts{type} // "qgroup_destroy",
vinfo_prefixed_keys("target", $vol));
my $ret = run_cmd(cmd => vinfo_cmd($vol, "btrfs qgroup destroy", $qgroup_id, { unsafe => $path }),
rsh => vinfo_rsh($vol),
);
end_transaction($opts{type} // "qgroup_destroy", defined($ret));
unless(defined($ret)) {
ERROR "Failed to destroy qgroup \"$qgroup_id\" for subvolume: $vol->{PRINT}";
return undef;
}
return $vol;
}
2019-04-05 12:06:41 +02:00
sub btrfs_send_receive($$;$$$)
2016-03-08 15:25:35 +01:00
{
2016-03-15 11:21:59 +01:00
my $snapshot = shift || die;
my $target = shift || die;
my $parent = shift;
2019-04-09 22:09:12 +02:00
my $clone_src = shift // [];
2016-03-15 11:21:59 +01:00
my $ret_vol_received = shift;
my $snapshot_path = $snapshot->{PATH} // die;
my $target_path = $target->{PATH} // die;
my $parent_path = $parent ? $parent->{PATH} : undef;
my $vol_received = vinfo_child($target, $snapshot->{NAME});
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
2016-04-23 14:58:08 +02:00
print STDOUT "Creating backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
2016-03-15 11:21:59 +01:00
2019-04-09 22:09:12 +02:00
INFO "[send/receive] target: $vol_received->{PRINT}";
2016-04-16 21:08:07 +02:00
INFO "[send/receive] source: $snapshot->{PRINT}";
INFO "[send/receive] parent: $parent->{PRINT}" if($parent);
2019-04-09 22:09:12 +02:00
INFO "[send/receive] clone-src: $_->{PRINT}" foreach(@$clone_src);
2016-03-15 11:21:59 +01:00
2019-07-29 21:59:03 +02:00
my $stream_options = config_stream_hash($snapshot, $target);
2016-03-15 11:21:59 +01:00
my @send_options;
my @receive_options;
2016-08-24 15:15:46 +02:00
push(@send_options, '-p', { unsafe => $parent_path} ) if($parent_path);
2019-04-09 22:09:12 +02:00
push(@send_options, '-c', { unsafe => $_ } ) foreach(map { $_->{PATH} } @$clone_src);
2016-03-15 11:21:59 +01:00
# push(@send_options, '-v') if($loglevel >= 3);
# push(@receive_options, '-v') if($loglevel >= 3);
my @cmd_pipe;
push @cmd_pipe, {
2016-08-27 17:35:47 +02:00
cmd => vinfo_cmd($snapshot, "btrfs send", @send_options, { unsafe => $snapshot_path } ),
2019-07-29 21:59:03 +02:00
rsh => vinfo_rsh($snapshot, disable_compression => $stream_options->{stream_compress}),
2016-03-15 11:21:59 +01:00
name => "btrfs send",
2019-07-29 21:59:03 +02:00
stream_options => $stream_options,
2016-03-15 11:21:59 +01:00
catch_stderr => 1, # hack for shell-based run_cmd()
};
2019-07-29 21:59:03 +02:00
2016-03-15 11:21:59 +01:00
push @cmd_pipe, {
2016-08-27 17:35:47 +02:00
cmd => vinfo_cmd($target, "btrfs receive", @receive_options, { unsafe => $target_path . '/' } ),
2019-07-29 21:59:03 +02:00
rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
2016-03-15 11:21:59 +01:00
name => "btrfs receive",
catch_stderr => 1, # hack for shell-based run_cmd()
filter_stderr => sub { $err = $_; $_ = undef }
};
my $send_receive_error = 0;
start_transaction("send-receive",
vinfo_prefixed_keys("target", $vol_received),
vinfo_prefixed_keys("source", $snapshot),
vinfo_prefixed_keys("parent", $parent),
);
my $ret = run_cmd(@cmd_pipe);
unless(defined($ret)) {
$send_receive_error = 1;
$ret = $err; # print the errors below
}
if(defined($ret)) {
2018-07-12 13:50:07 +02:00
# NOTE: btrfs-progs < 4.11: if "btrfs send" fails, "btrfs receive" returns 0! so we need to parse the output for "ERROR:" lines:
my @ret_lines = split("\n", $ret);
$send_receive_error = 1 if(grep /^ERROR: /, @ret_lines);
foreach(@ret_lines) {
if(/^WARNING: / || $send_receive_error) {
WARN "[send/receive] (send=$snapshot_path, receive=$target_path) $_";
2016-03-15 11:21:59 +01:00
}
}
}
2016-03-08 15:25:35 +01:00
2018-07-09 19:24:52 +02:00
# Read in target subvolume metadata (btrfs subvolume show):
# Double checking the output increases robustness against exotic
# revisions of external commands (btrfs-progs, pv, xz, lz4, ...).
#
# NOTE: we cannot rely on the underlying shell to have
# "pipefail" functionality.
#
# NOTE: btrfs-progs < 4.11:
# "cat /dev/null | btrfs receive" returns with exitcode=0 and no
# error message, having the effect that silently no subvolume is
# created if any command in @cmd_pipe fail.
my $is_garbled;
if($dryrun) {
INFO "[send/receive] (dryrun, skip) checking target metadata: $vol_received->{PRINT}";
}
else {
INFO "[send/receive] checking target metadata: $vol_received->{PRINT}";
my $detail = btrfs_subvolume_show($vol_received);
if(defined($detail)) {
unless($send_receive_error) {
2016-08-29 13:08:45 +02:00
# plausibility checks on target detail
unless($detail->{readonly}) {
ERROR "[send/receive] target is not readonly: $vol_received->{PRINT}";
$send_receive_error = 1;
}
if($detail->{received_uuid} && ($detail->{received_uuid} eq '-')) {
# NOTE: received_uuid is not in @required_keys (needs btrfs-progs >= 4.1 (BTRFS_PROGS_MIN))
# so we only check it if it's really present
ERROR "[send/receive] received_uuid is not set on target: $vol_received->{PRINT}";
$send_receive_error = 1;
}
if($parent && ($detail->{parent_uuid} eq '-')) {
ERROR "[send/receive] parent_uuid is not set on target: $vol_received->{PRINT}";
$send_receive_error = 1;
}
if((not $parent) && ($detail->{parent_uuid} ne '-')) {
ERROR "[send/receive] parent_uuid is set on target: $vol_received->{PRINT}";
$send_receive_error = 1;
}
2016-08-19 16:33:30 +02:00
}
2018-07-09 19:24:52 +02:00
# incomplete received (garbled) subvolumes are not readonly and have no received_uuid
$is_garbled = ((not $detail->{readonly}) && defined($detail->{received_uuid}) && ($detail->{received_uuid} eq '-'));
}
else {
$send_receive_error = 1;
2016-08-19 16:33:30 +02:00
}
}
2016-03-08 15:25:35 +01:00
2017-09-27 19:35:43 +02:00
end_transaction("send-receive", not $send_receive_error);
2015-10-10 21:26:59 +02:00
2016-03-15 11:21:59 +01:00
if($send_receive_error) {
ERROR "Failed to send/receive btrfs subvolume: $snapshot->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $target->{PRINT}";
2018-07-09 19:24:52 +02:00
}
2016-03-15 11:21:59 +01:00
2018-07-09 19:24:52 +02:00
if($is_garbled) {
# NOTE: btrfs-progs does not delete incomplete received (garbled) subvolumes,
2016-03-15 11:21:59 +01:00
# we need to do this by hand.
# TODO: remove this as soon as btrfs-progs handle receive errors correctly.
2017-09-27 20:23:08 +02:00
my @deleted = btrfs_subvolume_delete($vol_received, commit => "after", type => "delete_garbled");
if(scalar(@deleted)) {
2016-03-15 11:21:59 +01:00
WARN "Deleted partially received (garbled) subvolume: $vol_received->{PRINT}";
2015-10-10 21:26:59 +02:00
}
2016-03-15 11:21:59 +01:00
else {
WARN "Deletion of partially received (garbled) subvolume failed, assuming clean environment: $vol_received->{PRINT}";
2015-10-10 21:26:59 +02:00
}
}
2018-07-09 19:24:52 +02:00
return $send_receive_error ? undef : 1;
2015-10-10 21:26:59 +02:00
}
2017-06-16 17:43:17 +02:00
sub btrfs_send_to_file($$$;$$)
2015-01-16 17:29:04 +01:00
{
2016-03-15 11:21:59 +01:00
my $source = shift || die;
my $target = shift || die;
my $parent = shift;
my $ret_vol_received = shift;
2017-06-16 17:43:17 +02:00
my $ret_raw_info = shift;
2016-03-15 11:21:59 +01:00
my $source_path = $source->{PATH} // die;
my $target_path = $target->{PATH} // die;
my $parent_path = $parent ? $parent->{PATH} : undef;
my $parent_uuid = $parent ? $parent->{node}{uuid} : undef ;
my $received_uuid = $source->{node}{uuid};
die unless($received_uuid);
die if($parent && !$parent_uuid);
2015-01-16 17:29:04 +01:00
2017-06-16 17:43:17 +02:00
# prepare raw_info (for vinfo_inject_child)
my %raw_info = (
TYPE => 'raw',
RECEIVED_UUID => $received_uuid,
INCOMPLETE => 1,
# source_url => $source->{URL},
);
2016-03-15 11:21:59 +01:00
my $target_filename = $source->{NAME} || die;
2017-06-16 17:43:17 +02:00
# $target_filename .= "--$received_uuid";
# $target_filename .= '@' . $parent_uuid if($parent_uuid);
2016-03-15 11:21:59 +01:00
$target_filename .= ".btrfs";
2017-03-18 15:06:48 +01:00
my $compress = config_compress_hash($target, "raw_target_compress");
my $encrypt = config_encrypt_hash($target, "raw_target_encrypt");
my $split = config_key($target, "raw_target_split");
2019-07-29 21:59:03 +02:00
my $stream_options = config_stream_hash($source, $target);
# make sure we dont re-compress, override "stream_compress" with "raw_target_compress"
$stream_options->{stream_compress} = $compress if($compress);
2017-03-18 15:06:48 +01:00
2016-03-15 11:21:59 +01:00
my @send_options;
push(@send_options, '-p', $parent_path) if($parent_path);
2019-07-29 21:59:03 +02:00
#push(@send_options, '-v') if($loglevel >= 3);
2016-03-15 11:21:59 +01:00
my @cmd_pipe;
push @cmd_pipe, {
2016-08-27 17:35:47 +02:00
cmd => vinfo_cmd($source, "btrfs send", @send_options, { unsafe => $source_path } ),
2019-07-29 21:59:03 +02:00
rsh => vinfo_rsh($source, disable_compression => $stream_options->{stream_compress}),
2016-03-15 11:21:59 +01:00
name => "btrfs send",
2019-07-29 21:59:03 +02:00
stream_options => $stream_options,
2016-03-15 11:21:59 +01:00
};
2019-07-29 21:59:03 +02:00
2017-03-18 15:06:48 +01:00
if($compress) {
2019-07-29 21:59:03 +02:00
$raw_info{compress} = $compression{$compress->{key}}->{format};
2017-03-18 15:06:48 +01:00
$target_filename .= '.' . $compression{$compress->{key}}->{format};
2019-07-29 21:59:03 +02:00
push @cmd_pipe, { compress_stdin => $compress }; # does nothing if already compressed by stream_compress
2015-01-16 17:29:04 +01:00
}
2017-06-16 17:04:18 +02:00
if($encrypt) {
$target_filename .= ($encrypt->{type} eq "gpg") ? '.gpg' : '.encrypted';
}
# NOTE: $ret_vol_received must always be set when function returns!
my $vol_received = vinfo_child($target, $target_filename);
$$ret_vol_received = $vol_received if(ref $ret_vol_received);
2017-03-18 15:06:48 +01:00
if($encrypt) {
2017-06-16 17:43:17 +02:00
$raw_info{encrypt} = $encrypt->{type};
if($encrypt->{type} eq "gpg") {
# NOTE: We set "--no-random-seed-file" since one of the btrbk
# design principles is to never create any files unasked. Enabling
# "--no-random-seed-file" creates ~/.gnupg/random_seed, and as
# such depends on $HOME to be set correctly (which e.g. is set to
# "/" by some cron daemons). From gpg2(1) man page:
# --no-random-seed-file GnuPG uses a file to store its
# internal random pool over invocations This makes random
# generation faster; however sometimes write operations are not
# desired. This option can be used to achieve that with the cost
# of slower random generation.
my @gpg_options = ( '--batch', '--no-tty', '--no-random-seed-file', '--trust-model', 'always' );
push @gpg_options, ( '--compress-algo', 'none' ) if($compress); # NOTE: if --compress-algo is not set, gpg might still compress according to OpenPGP standard.
push(@gpg_options, ( '--no-default-keyring', '--keyring', $encrypt->{keyring} )) if($encrypt->{keyring});
push(@gpg_options, ( '--default-recipient', $encrypt->{recipient} )) if($encrypt->{recipient});
push @cmd_pipe, {
cmd => [ 'gpg', @gpg_options, '--encrypt' ],
name => 'gpg',
compressed_ok => ($compress ? 1 : 0),
};
}
2017-06-16 17:04:18 +02:00
elsif($encrypt->{type} eq "openssl_enc") {
# encrypt using "openssl enc"
$raw_info{cipher} = $encrypt->{ciphername};
# NOTE: iv is always generated locally!
my $iv_size = $encrypt->{iv_size};
my $iv;
if($iv_size) {
INFO "Generating iv for openssl encryption (cipher=$encrypt->{ciphername})";
$iv = system_urandom($iv_size, 'hex');
unless($iv) {
ERROR "Failed generate IV for openssl_enc: $source->{PRINT}";
return undef;
}
$raw_info{iv} = $iv;
}
2017-06-30 14:35:20 +02:00
my $encrypt_key;
if($encrypt->{keyfile}) {
if($encrypt->{kdf_backend}) {
WARN "Both openssl_keyfile and kdf_backend are configured, ignoring kdf_backend!";
}
$encrypt_key = '$(cat ' . $encrypt->{keyfile} . ')';
}
elsif($encrypt->{kdf_backend}) {
if($encrypt->{kdf_keygen_each}) {
$kdf_session_key = undef;
%kdf_vars = ();
}
if($kdf_session_key) {
INFO "Reusing session key for: $vol_received->{PRINT}";
}
else {
# run kdf backend, set session key and vars
DEBUG "Generating session key for: $vol_received->{PRINT}";
my $kdf_backend_name = $encrypt->{kdf_backend};
$kdf_backend_name =~ s/^.*\///;
print STDOUT "\nGenerate session key for " . ($encrypt->{kdf_keygen_each} ? "\"$vol_received->{PRINT}\"" : "all raw backups") . ":\n";
my $kdf_values = run_cmd(cmd => [ $encrypt->{kdf_backend}, $encrypt->{kdf_keysize} ],
non_destructive => 1,
name => $kdf_backend_name
);
return undef unless(defined($kdf_values));
foreach(split("\n", $kdf_values)) {
chomp;
next if /^\s*$/; # ignore empty lines
if(/^KEY=([0-9a-fA-f]+)/) {
$kdf_session_key = $1;
}
elsif(/^([a-z_]+)=(.*)/) {
my $info_key = 'kdf_' . $1;
my $info_val = $2;
DEBUG "Adding raw_info from kdf_backend: $info_key=$info_val";
$kdf_vars{$info_key} = $info_val;
}
else {
ERROR "Ambiguous line from kdf_backend: $encrypt->{kdf_backend}";
return undef;
}
}
unless($kdf_session_key && (length($kdf_session_key) == ($encrypt->{kdf_keysize} * 2))) {
ERROR "Ambiguous key value from kdf_backend: $encrypt->{kdf_backend}";
return undef;
}
INFO "Generated session key for: $vol_received->{PRINT}";
}
$encrypt_key = $kdf_session_key;
%raw_info = ( %kdf_vars, %raw_info );
}
2017-06-16 17:04:18 +02:00
my @openssl_options = (
'-' . $encrypt->{ciphername},
2017-06-30 14:35:20 +02:00
'-K', $encrypt_key,
2017-06-16 17:04:18 +02:00
);
push @openssl_options, ('-iv', $iv) if($iv);
push @cmd_pipe, {
cmd => [ 'openssl', 'enc', '-e', @openssl_options ],
name => 'openssl_enc',
compressed_ok => ($compress ? 1 : 0),
};
}
2017-06-16 17:43:17 +02:00
else {
die "Usupported encryption type (raw_target_encrypt)";
}
2015-01-16 17:29:04 +01:00
}
2017-03-18 14:47:43 +01:00
if($split) {
2017-06-16 17:43:17 +02:00
# NOTE: we do not append a ".split" suffix on $target_filename here, as this propagates to ".info" file
$raw_info{split} = $split;
2017-03-18 14:47:43 +01:00
push @cmd_pipe, {
2017-06-16 17:43:17 +02:00
cmd => [ 'split', '-b', uc($split), '-', "${target_path}/${target_filename}.split_" ],
check_unsafe => [ { unsafe => "${target_path}/${target_filename}.split_" } ],
2019-07-29 21:59:03 +02:00
rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
2017-03-18 15:06:48 +01:00
compressed_ok => ($compress ? 1 : 0),
2017-03-18 14:47:43 +01:00
}
}
else {
push @cmd_pipe, {
# NOTE: We use "dd" instead of shell redirections here, as it is
# common to have special filesystems (like NFS, SMB, FUSE) mounted
# on $target_path. By using "dd" we make sure to write in
# reasonably large blocks (default=128K), which is not always the
# case when using redirections (e.g. "gpg > outfile" writes in 8K
# blocks).
# Another approach would be to always pipe through "cat", which
# uses st_blksize from fstat(2) (with a minimum of 128K) to
# determine the block size.
2017-06-16 17:43:17 +02:00
cmd => [ 'dd', 'status=none', 'bs=' . config_key($target, "raw_target_block_size"), "of=${target_path}/${target_filename}" ],
check_unsafe => [ { unsafe => "${target_path}/${target_filename}" } ],
#redirect_to_file => { unsafe => "${target_path}/${target_filename}" }, # alternative (use shell redirection), less overhead on local filesystems (barely measurable):
2019-07-29 21:59:03 +02:00
rsh => vinfo_rsh($target, disable_compression => $stream_options->{stream_compress}),
2017-03-18 15:06:48 +01:00
compressed_ok => ($compress ? 1 : 0),
2017-03-18 14:47:43 +01:00
};
}
2016-03-15 11:21:59 +01:00
2017-06-16 17:43:17 +02:00
$raw_info{FILE} = $target_filename;
$raw_info{RECEIVED_PARENT_UUID} = $parent_uuid if($parent_uuid);
# disabled for now, as its not very useful and might leak information:
# $raw_info{parent_url} = $parent->{URL} if($parent);
# $raw_info{target_url} = $vol_received->{URL};
$$ret_raw_info = \%raw_info if($ret_raw_info);
2016-04-23 14:58:08 +02:00
print STDOUT "Creating raw backup: $vol_received->{PRINT}\n" if($show_progress && (not $dryrun));
2016-03-15 11:21:59 +01:00
2016-04-16 21:08:07 +02:00
INFO "[send-to-raw] source: $source->{PRINT}";
INFO "[send-to-raw] parent: $parent->{PRINT}" if($parent);
INFO "[send-to-raw] target: $vol_received->{PRINT}";
2016-03-15 11:21:59 +01:00
start_transaction("send-to-raw",
vinfo_prefixed_keys("target", $vol_received),
vinfo_prefixed_keys("source", $source),
vinfo_prefixed_keys("parent", $parent),
);
2017-06-16 17:43:17 +02:00
my $ret;
$ret = system_write_raw_info($vol_received, \%raw_info);
2017-03-18 14:47:43 +01:00
if(defined($ret)) {
$ret = run_cmd(@cmd_pipe);
}
2016-03-15 11:21:59 +01:00
if(defined($ret)) {
2017-03-18 14:47:43 +01:00
# Test target file for "exists and size > 0" after writing, as we
# can not rely on the exit status of the command pipe, and a shell
# redirection as well as "dd" always creates the target file.
# Note that "split" does not create empty files.
2017-06-16 17:43:17 +02:00
my $test_postfix = ($split ? ".split_aa" : "");
2017-03-18 14:47:43 +01:00
DEBUG "Testing target data file (non-zero size)";
2016-03-15 11:21:59 +01:00
$ret = run_cmd({
2017-03-18 14:47:43 +01:00
cmd => ['test', '-s', { unsafe => "${target_path}/${target_filename}${test_postfix}" } ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($target),
2016-03-15 11:21:59 +01:00
name => "test",
});
2016-03-22 19:05:12 +01:00
if(defined($ret)) {
2017-06-16 17:43:17 +02:00
# Write raw info file again, this time wihtout incomplete flag
delete $raw_info{INCOMPLETE};
$ret = system_write_raw_info($vol_received, \%raw_info);
2016-03-22 19:05:12 +01:00
}
2016-03-15 11:21:59 +01:00
}
2017-09-27 19:35:43 +02:00
end_transaction("send-to-raw", defined($ret));
2016-03-15 11:21:59 +01:00
unless(defined($ret)) {
ERROR "Failed to send btrfs subvolume to raw file: $source->{PRINT} " . ($parent_path ? "[$parent_path]" : "") . " -> $vol_received->{PRINT}";
2015-10-23 21:28:58 +02:00
return undef;
}
2015-04-07 11:52:45 +02:00
return 1;
2015-01-16 17:29:04 +01:00
}
2018-08-27 14:52:28 +02:00
sub system_list_mountinfo($)
{
my $vol = shift // die;
2018-08-27 14:54:32 +02:00
my $file = '/proc/self/mountinfo'; # NOTE: /proc/self/mounts is deprecated
2018-08-27 14:52:28 +02:00
my $ret = run_cmd(cmd => [ qw(cat), $file ],
rsh => vinfo_rsh($vol),
non_destructive => 1,
catch_stderr => 1, # hack for shell-based run_cmd()
);
return undef unless(defined($ret));
my @mountinfo;
foreach (split(/\n/, $ret))
{
# https://www.kernel.org/doc/Documentation/filesystems/proc.txt
unless(/^(?<mount_id>[0-9]+) # mount ID: unique identifier of the mount (may be reused after umount)
\s(?<parent_id>[0-9]+) # parent ID: ID of parent (or of self for the top of the mount tree)
\s(?<st_dev>[0-9]+:[0-9]+) # major:minor: value of st_dev for files on filesystem
\s(?<fs_root>\S+) # root: root of the mount within the filesystem
\s(?<mount_point>\S+) # mount point: mount point relative to the process's root
\s(?<mount_options>\S+) # mount options: per mount options
(\s\S+)* # optional fields: zero or more fields of the form "tag[:value]"
\s- # separator: marks the end of the optional fields
\s(?<fs_type>\S+) # filesystem type: name of filesystem of the form "type[.subtype]"
\s(?<mount_source>\S+) # mount source: filesystem specific information or "none"
\s(?<super_options>\S+)$ # super options: per super block options
/x)
{
2019-04-01 00:32:24 +02:00
ERROR "Failed to parse \"$vol->{URL_PREFIX}$file\"";
2018-08-27 14:52:28 +02:00
DEBUG "Offending line: $_";
return undef;
}
my %line = %+;
foreach (split(',', $line{super_options})) {
if(/^(.+?)=(.+)$/) {
$line{MNTOPS}->{$1} = $2;
} else {
$line{MNTOPS}->{$_} = 1;
}
}
push @mountinfo, \%line;
}
# TRACE(Data::Dumper->Dump([\@mountinfo], ["mountinfo"])) if($do_dumper);
return \@mountinfo;
}
2016-03-30 15:32:28 +02:00
sub system_realpath($)
{
my $vol = shift // die;
my $path = $vol->{PATH} // die;;
my @quiet = ($loglevel < 3) ? ('-q') : ();
2018-10-23 15:34:36 +02:00
my $ret = run_cmd(cmd => vinfo_cmd($vol, "readlink", '-e', @quiet, { unsafe => $path } ),
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-03-30 15:32:28 +02:00
non_destructive => 1,
);
return undef unless(defined($ret));
2016-04-02 14:10:35 +02:00
unless($ret =~ /^($file_match)$/) {
2016-03-30 15:32:28 +02:00
ERROR "Failed to parse output of `realpath` for \"$vol->{PRINT}\": \"$ret\"";
return undef;
}
2016-04-02 14:10:35 +02:00
my $realpath = $1; # untaint argument
DEBUG "Real path for \"$vol->{PRINT}\" is: $realpath";
return $realpath;
2016-03-30 15:32:28 +02:00
}
2016-04-14 18:46:35 +02:00
sub system_mkdir($)
{
my $vol = shift // die;
my $path = $vol->{PATH} // die;;
INFO "Creating directory: $vol->{PRINT}/";
2019-04-18 17:28:12 +02:00
start_transaction("mkdir", vinfo_prefixed_keys("target", $vol));
2016-08-24 15:15:46 +02:00
my $ret = run_cmd(cmd => [ qw(mkdir), '-p', { unsafe => $path } ],
2016-05-10 15:51:44 +02:00
rsh => vinfo_rsh($vol),
2016-04-14 18:46:35 +02:00
);
2019-04-18 17:28:12 +02:00
end_transaction("mkdir", defined($ret));
2016-04-14 18:46:35 +02:00
return undef unless(defined($ret));
return 1;
}
2019-01-04 17:32:01 +01:00
sub btrfs_mountpoint
2016-03-30 15:32:28 +02:00
{
my $vol = shift // die;
2019-01-04 17:32:01 +01:00
my $autofs_retry = shift;
2016-03-30 15:32:28 +02:00
DEBUG "Resolving btrfs mount point for: $vol->{PRINT}";
# get real path
2018-02-05 18:03:20 +01:00
my $realpath = $realpath_cache{$vol->{URL}};
unless($realpath) {
$realpath = system_realpath($vol);
$realpath_cache{$vol->{URL}} = $realpath;
2016-03-30 15:32:28 +02:00
}
2018-02-05 18:03:20 +01:00
return undef unless($realpath);
2016-03-30 15:32:28 +02:00
2018-05-10 12:26:15 +02:00
# get all mountpoints
2019-04-01 00:32:24 +02:00
my $mountinfo = $mountinfo_cache{$vol->{MACHINE_ID}};
TRACE "mountinfo_cache " . ($mountinfo ? "HIT" : "MISS") . ": $vol->{MACHINE_ID}";
2018-08-27 14:54:32 +02:00
unless($mountinfo) {
$mountinfo = system_list_mountinfo($vol);
return undef unless($mountinfo);
2019-04-01 00:32:24 +02:00
$mountinfo_cache{$vol->{MACHINE_ID}} = $mountinfo;
2016-03-30 15:32:28 +02:00
}
2019-07-18 15:28:21 +02:00
# find mount point (last mountinfo entry matching realpath)
2018-02-05 18:03:20 +01:00
$realpath .= '/' unless($realpath =~ /\/$/); # correctly handle root path="/"
2018-05-10 12:26:15 +02:00
my $mountpoint;
2019-07-18 15:28:21 +02:00
foreach(reverse @$mountinfo) {
2018-08-27 14:54:32 +02:00
my $mnt_path = $_->{mount_point};
2016-03-30 15:32:28 +02:00
$mnt_path .= '/' unless($mnt_path =~ /\/$/); # correctly handle root path="/"
2018-05-10 12:26:15 +02:00
if($realpath =~ /^\Q$mnt_path\E/) {
2019-07-18 15:28:21 +02:00
$mountpoint = $_;
last;
2018-05-10 12:26:15 +02:00
}
}
unless($mountpoint) {
# should never happen, as "/" should always be present in mounts
ERROR "No mount point found for: $vol->{PRINT} (realpath=\"$realpath\")";
return undef;
2016-03-30 15:32:28 +02:00
}
2018-08-27 14:54:32 +02:00
TRACE "resolved mount point (mount_source=$mountpoint->{mount_source}, subvolid=" . ($mountpoint->{MNTOPS}->{subvolid} // '<undef>') . "): $mountpoint->{mount_point}";
2019-01-04 17:32:01 +01:00
# handle autofs
if($mountpoint->{fs_type} eq 'autofs') {
if($autofs_retry) {
DEBUG "non-btrfs autofs mount point for: $vol->{PRINT}";
return undef;
}
DEBUG "autofs mount point found, triggering automount on $mountpoint->{mount_point} for: $vol->{PRINT}";
btrfs_subvolume_show(vinfo($vol->{URL_PREFIX} . $mountpoint->{mount_point}, $vol->{CONFIG}));
2019-04-01 00:32:24 +02:00
$mountinfo_cache{$vol->{MACHINE_ID}} = undef;
2019-01-04 17:32:01 +01:00
return btrfs_mountpoint($vol, 1);
}
elsif($mountpoint->{fs_type} ne 'btrfs') {
2016-03-30 15:32:28 +02:00
DEBUG "No btrfs mount point found for: $vol->{PRINT}";
2018-02-05 18:03:20 +01:00
return undef;
}
# list all mountpoints of same device
2018-08-27 14:54:32 +02:00
my @same_source_mounts;
my $mount_source_match = $mountpoint->{mount_source};
foreach my $mnt (@$mountinfo) {
if($mnt->{mount_source} eq $mount_source_match) {
unless($mnt->{fs_type} eq 'btrfs') {
# should never happen, same device should always have fs_type=btrfs
DEBUG "Ignoring non-btrfs mount point: $mnt->{mount_source} $mnt->{mount_point} $mnt->{fs_type}";
2018-05-10 12:26:15 +02:00
next;
}
2018-08-27 14:54:32 +02:00
unless($mnt->{mount_point} =~ /^$file_match$/) {
2019-04-01 00:32:24 +02:00
INFO "Ignoring non-parseable btrfs mountpoint on $vol->{MACHINE_ID}: \"$mnt->{mount_point}\"";
2018-05-10 12:26:15 +02:00
next;
}
2018-02-05 18:03:20 +01:00
unless($mnt->{MNTOPS}->{subvolid}) {
# kernel <= 4.2 does not have subvolid=NN in /proc/self/mounts, read it with btrfs-progs
2018-08-27 14:54:32 +02:00
DEBUG "No subvolid provided in mounts for: $mnt->{mount_point}";
my $detail = btrfs_subvolume_show(vinfo($vol->{URL_PREFIX} . $mnt->{mount_point}, $vol->{CONFIG}));
2018-02-05 18:03:20 +01:00
return undef unless($detail);
2018-08-27 14:54:32 +02:00
$mnt->{MNTOPS}->{subvolid} = $detail->{id} || die; # also affects %mountinfo_cache
2018-02-05 18:03:20 +01:00
}
2018-08-27 14:54:32 +02:00
TRACE "using btrfs mount point (mount_source=$mnt->{mount_source}, subvolid=$mnt->{MNTOPS}->{subvolid}): $mnt->{mount_point}";
push(@same_source_mounts, { file => $mnt->{mount_point}, subvolid => $mnt->{MNTOPS}->{subvolid} } );
2018-02-05 18:03:20 +01:00
}
2016-03-30 15:32:28 +02:00
}
2018-02-05 18:03:20 +01:00
2018-08-27 14:54:32 +02:00
DEBUG "Btrfs mount point for \"$vol->{PRINT}\": $mountpoint->{mount_point} (mount_source=$mountpoint->{mount_source}, subvolid=$mountpoint->{MNTOPS}->{subvolid})";
return ($mountpoint->{mount_point}, $realpath, $mountpoint->{MNTOPS}->{subvolid}, $mountpoint->{mount_source}, \@same_source_mounts);
2016-03-30 15:32:28 +02:00
}
2017-06-16 17:43:17 +02:00
sub system_read_raw_info_dir($)
{
my $droot = shift // die;
my $ret = run_cmd(
# NOTE: we cannot simply "cat" all files here, as it will fail if no files found
cmd => [ 'find', { unsafe => $droot->{PATH} },
'-maxdepth', '1',
'-type', 'f',
'-name', '\*.btrfs.\*info', # match ".btrfs[.gz|bz2|xz][.gpg].info"
'-exec', 'echo INFO_FILE=\{\} \;',
'-exec', 'cat \{\} \;'
],
rsh => vinfo_rsh($droot),
non_destructive => 1,
);
unless(defined($ret)) {
2018-02-07 20:17:23 +01:00
ERROR("Failed to read *.btrfs.*.info files in: $droot->{PATH}");
2017-06-16 17:43:17 +02:00
return undef;
}
my @raw_targets;
my $cur_target;
foreach (split("\n", $ret))
{
if(/^INFO_FILE=/) {
push @raw_targets, $cur_target if($cur_target);
$cur_target = {};
}
next if /^#/; # ignore comments
next if /^\s*$/; # ignore empty lines
if(/^([a-zA-Z_]+)=(.*)/) {
my ($key, $value) = ($1, $2);
if($cur_target) {
$cur_target->{$key} = $value;
}
}
}
push @raw_targets, $cur_target if($cur_target);
# input validation (we need to abort here, or the backups will be resumed)
foreach my $raw_info (@raw_targets) {
unless($raw_info->{INFO_FILE}) {
2018-02-07 20:17:23 +01:00
ERROR("Error while parsing command output for: $droot->{PATH}");
2017-06-16 17:43:17 +02:00
return undef;
}
unless($raw_info->{FILE}) {
2018-02-07 20:17:23 +01:00
ERROR("Missing \"FILE=\" in raw info file: " . $raw_info->{INFO_FILE});
2017-06-16 17:43:17 +02:00
return undef;
}
unless(check_file($raw_info->{FILE}, { name_only => 1 })) {
2018-02-07 20:17:23 +01:00
ERROR("Ambiguous \"FILE=\" in raw info file: " . $raw_info->{INFO_FILE});
2017-06-16 17:43:17 +02:00
return undef;
}
unless($raw_info->{TYPE} && ($raw_info->{TYPE} eq 'raw')) {
2018-02-07 20:17:23 +01:00
ERROR("Unsupported \"type\" in raw info file: " . $raw_info->{INFO_FILE});
2017-06-16 17:43:17 +02:00
return undef;
}
unless($raw_info->{RECEIVED_UUID} && ($raw_info->{RECEIVED_UUID} =~ /^$uuid_match$/)) {
2018-02-07 20:17:23 +01:00
ERROR("Missing/Illegal \"received_uuid\" in raw info file: " . $raw_info->{INFO_FILE});
2017-06-16 17:43:17 +02:00
return undef;
}
if(defined $raw_info->{RECEIVED_PARENT_UUID}) {
unless(($raw_info->{RECEIVED_PARENT_UUID} eq '-') || ($raw_info->{RECEIVED_PARENT_UUID} =~ /^$uuid_match$/)) {
2018-02-07 20:17:23 +01:00
ERROR("Illegal \"RECEIVED_PARENT_UUID\" in raw info file: " . $raw_info->{INFO_FILE});
2017-06-16 17:43:17 +02:00
return undef;
}
}
else {
$raw_info->{RECEIVED_PARENT_UUID} = '-';
}
}
DEBUG("Parsed " . @raw_targets . " raw info files in path: $droot->{PATH}");
TRACE(Data::Dumper->Dump([\@raw_targets], ["system_read_raw_info_dir($droot->{URL})"])) if($do_dumper);
2017-09-26 12:28:18 +02:00
#
# read DEPRECATED raw format (btrbk < v0.26.0)
#
$ret = run_cmd(
cmd => [ 'find', { unsafe => $droot->{PATH} . '/' }, '-maxdepth', '1', '-type', 'f' ],
rsh => vinfo_rsh($droot),
non_destructive => 1,
);
unless(defined($ret)) {
2018-02-07 20:17:23 +01:00
ERROR("Failed to list files from: $droot->{PATH}");
2017-09-26 12:28:18 +02:00
return undef;
}
my $deprecated_found = 0;
foreach (split("\n", $ret))
{
unless(/^($file_match)$/) {
DEBUG "Skipping non-parseable file: \"$_\"";
next;
}
my $file = $1; # untaint argument
unless($file =~ s/^\Q$droot->{PATH}\E\///) {
2018-02-07 20:17:23 +01:00
ERROR("Unexpected result from 'find': file \"$file\" is not under \"$droot->{PATH}\"");
return undef;
2017-09-26 12:28:18 +02:00
}
2019-08-05 14:59:22 +02:00
if($file =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$raw_postfix_match_DEPRECATED$/) {
2017-09-26 12:28:18 +02:00
push @raw_targets, {
# NOTE: if INFO_FILE is not present, this raw target is treated as deprecated format
TYPE => 'raw',
FILE => $file,
RECEIVED_UUID => $+{received_uuid} // die,
RECEIVED_PARENT_UUID => $+{parent_uuid} // '-',
INCOMPLETE => $+{incomplete} ? 1 : 0,
encrypt => $+{encrypt} // "",
compress => $+{compress} // "",
};
$deprecated_found++;
}
}
DEBUG("Parsed $deprecated_found deprecated raw backup files in path: $droot->{PATH}");
if($deprecated_found) {
WARN("Found $deprecated_found raw backup files with deprecated file format in: $droot->{PRINT}");
WARN("Please convert the raw backup files using the `raw_suffix2sidecar` utility.");
}
2017-06-16 17:43:17 +02:00
return \@raw_targets;
}
sub system_write_raw_info($$)
{
my $vol = shift // die;
my $raw_info = shift // die;
my $info_file = $vol->{PATH} . '.info';
my @line;
push @line, "#btrbk-v$VERSION";
push @line, "# Do not edit this file";
# sort by %raw_info_sort, then by key
foreach(sort { (($raw_info_sort{$a} || 99) <=> ($raw_info_sort{$b} || 99)) || ($a cmp $b) } keys %$raw_info) {
push @line, ($_ . '=' . $raw_info->{$_});
}
DEBUG "Creating raw info file " . ($raw_info->{INCOMPLETE} ? "(incomplete)" : "(complete)") . ": $info_file";
my $echo_text = (join '\n', @line);
TRACE "DUMP INFO_FILE=$info_file\n" . join("\n", @line);
my $ret = run_cmd(
{ cmd => [ 'echo', '-e', '-n', '"' . (join '\n', @line) . '\n"' ] },
{ redirect_to_file => { unsafe => $info_file },
rsh => vinfo_rsh($vol),
});
return undef unless(defined($ret));
return $info_file;
}
2017-06-16 17:04:18 +02:00
sub system_urandom($;$) {
my $size = shift;
my $format = shift || 'hex';
die unless(($size > 0) && ($size <= 256)); # sanity check
unless(open(URANDOM, '<', '/dev/urandom')) {
ERROR "Failed to open /dev/urandom: $!";
return undef;
}
binmode URANDOM;
my $rand;
my $rlen = read(URANDOM, $rand, $size);
close(FILE);
unless(defined($rand) && ($rlen == $size)) {
ERROR "Failed to read from /dev/urandom: $!";
return undef;
}
if($format eq 'hex') {
my $hex = unpack('H*', $rand);
die unless(length($hex) == ($size * 2)); # paranoia check
return $hex;
}
elsif($format eq 'bin') {
return $rand;
}
die "unsupported format";
}
2018-02-05 18:03:20 +01:00
sub btr_tree($$$$)
2015-10-23 14:43:36 +02:00
{
2016-03-15 11:21:59 +01:00
my $vol = shift;
my $vol_root_id = shift || die;
2018-08-27 14:54:32 +02:00
my $mount_source = shift || die; # aka device
2018-02-05 18:03:20 +01:00
my $mountpoints = shift || die; # all known mountpoints for this filesystem: arrayref of { file, subvolid }
2016-04-14 18:21:00 +02:00
die unless($vol_root_id >= 5);
2018-02-14 22:09:45 +01:00
2018-08-27 14:54:32 +02:00
# return parsed tree from %mount_source_cache if present
2019-04-01 00:32:24 +02:00
my $host_mount_source = $vol->{URL_PREFIX} . $mount_source; # printed in _fs_path()
2018-08-27 14:54:32 +02:00
my $cached_tree = $mount_source_cache{$host_mount_source};
TRACE "mount_source_cache " . ($cached_tree ? "HIT" : "MISS") . ": $host_mount_source";
2018-02-14 22:09:45 +01:00
if($cached_tree) {
TRACE "btr_tree: returning cached tree at id=$vol_root_id";
my $node = $cached_tree->{ID_HASH}{$vol_root_id};
2018-08-27 14:54:32 +02:00
ERROR "Unknown subvolid=$vol_root_id in btrfs tree of $host_mount_source" unless($node);
2018-02-14 22:09:45 +01:00
return $node;
}
2018-07-09 14:29:28 +02:00
my $node_list = btrfs_subvolume_list_complete($vol);
2016-03-15 11:21:59 +01:00
return undef unless(ref($node_list) eq "ARRAY");
2016-04-05 16:37:23 +02:00
my $vol_root;
2016-02-29 23:19:55 +01:00
2016-03-15 11:21:59 +01:00
TRACE "btr_tree: processing subvolume list of: $vol->{PRINT}";
2016-02-29 23:19:55 +01:00
2019-04-01 00:32:24 +02:00
# return a reference to the cached root if we already know the tree,
# making sure every tree is only stored once, which is essential
# e.g. when injecting nodes. die if duplicate UUID exist on
# different file systems (no matter if local or remote).
#
2018-01-30 13:20:44 +01:00
# note: this relies on subvolume UUID's to be "universally unique"
# (which is why cloning btrfs filesystems using "dd" is a bad idea)
2019-04-01 00:32:24 +02:00
#
# note: a better way would be to always compare the UUID of
# subvolid=5. unfortunately this is not possible for filesystems
# created with btrfs-progs < 4.16 (no UUID for subvolid=5).
2018-07-09 14:29:28 +02:00
foreach(@$node_list) {
my $node_uuid = $_->{uuid};
next unless($node_uuid);
if($uuid_cache{$node_uuid}) {
# at least one uuid of $node_list is already known
TRACE "uuid_cache HIT: $node_uuid";
$vol_root = $uuid_cache{$node_uuid}->{TREE_ROOT}->{ID_HASH}->{$vol_root_id};
die "Duplicate UUID on different file systems" unless($vol_root);
2019-04-01 00:32:24 +02:00
INFO "Assuming same filesystem: \"$vol_root->{TREE_ROOT}->{host_mount_source}\", \"$host_mount_source\"";
2018-07-09 14:29:28 +02:00
TRACE "btr_tree: returning already parsed tree at id=$vol_root->{id}";
2019-04-01 00:32:24 +02:00
$mount_source_cache{$host_mount_source} = $vol_root->{TREE_ROOT};
2018-07-09 14:29:28 +02:00
return $vol_root;
}
last; # check only first UUID (for performance)
2016-04-05 16:37:23 +02:00
}
2018-02-15 17:42:41 +01:00
# fill our hashes and uuid_cache
2018-07-09 14:29:28 +02:00
my %id;
my %uuid_hash;
my %received_uuid_hash;
2018-10-18 17:52:01 +02:00
my %parent_uuid_hash;
2016-04-12 17:50:12 +02:00
my $gen_max = 0;
2018-07-09 14:29:28 +02:00
foreach my $node (@$node_list) {
my $node_id = $node->{id};
2018-07-09 18:34:57 +02:00
my $node_uuid = $node->{uuid};
2018-07-09 14:29:28 +02:00
die unless($node_id >= 5);
die "duplicate node id" if(exists($id{$node_id}));
$id{$node_id} = $node;
2018-07-09 18:34:57 +02:00
if($node_uuid) {
# NOTE: uuid on btrfs root (id=5) is not always present
$uuid_hash{$node_uuid} = $node;
$uuid_cache{$node_uuid} = $node;
# hacky: if root node has no "uuid", it also has no "received_uuid" and no "gen"
push(@{$received_uuid_hash{$node->{received_uuid}}}, $node) if($node->{received_uuid} ne '-');
2018-10-18 17:52:01 +02:00
push(@{$parent_uuid_hash{$node->{parent_uuid}}}, $node) if($node->{parent_uuid} ne '-');
2018-07-09 18:34:57 +02:00
$gen_max = $node->{gen} if($node->{gen} > $gen_max);
2018-07-09 14:29:28 +02:00
}
elsif(not $node->{is_root}) {
die "missing uuid on subvolume";
}
$node->{SUBTREE} = [];
2015-10-23 14:43:36 +02:00
}
2018-07-09 14:29:28 +02:00
my $tree_root = $id{5} // die "missing btrfs root";
2018-08-27 14:54:32 +02:00
$tree_root->{MOUNTPOINTS} = $mountpoints; # { file, mount_source, node }
2018-07-09 14:29:28 +02:00
$tree_root->{ID_HASH} = \%id;
$tree_root->{UUID_HASH} = \%uuid_hash;
$tree_root->{RECEIVED_UUID_HASH} = \%received_uuid_hash;
2018-10-18 17:52:01 +02:00
$tree_root->{PARENT_UUID_HASH} = \%parent_uuid_hash;
2018-06-29 19:00:12 +02:00
$tree_root->{GEN_MAX} = $gen_max;
2019-04-01 00:32:24 +02:00
# NOTE: host_mount_source is NOT dependent on MACHINE_ID:
# if we return already present tree (see above), the value of
# host_mount_source will still point to the mount_source of the
# first machine.
$tree_root->{host_mount_source} = $host_mount_source; # unique identifier, e.g. "LOCAL:/dev/sda1" or "ssh://hostname[:port]/dev/sda1"
2018-06-29 19:00:12 +02:00
$vol_root = $id{$vol_root_id};
unless($vol_root) {
ERROR "Failed to resolve tree root for subvolid=$vol_root_id: " . ($vol->{PRINT} // $vol->{id});
return undef;
}
2016-03-15 11:21:59 +01:00
2018-07-09 14:29:28 +02:00
# set REL_PATH and tree references (TREE_ROOT, SUBTREE, TOP_LEVEL)
foreach my $node (@$node_list) {
unless($node->{is_root}) {
# note: it is possible that id < top_level, e.g. after restoring
my $top_level = $id{$node->{top_level}};
die "missing top_level reference" unless(defined($top_level));
2015-10-23 14:43:36 +02:00
2018-07-09 14:29:28 +02:00
push(@{$top_level->{SUBTREE}}, $node);
$node->{TOP_LEVEL} = $top_level;
2016-03-15 11:21:59 +01:00
2018-07-09 14:29:28 +02:00
# "path" always starts with set REL_PATH
my $rel_path = $node->{path};
unless($top_level->{is_root}) {
2019-07-31 13:44:26 +02:00
die unless($rel_path =~ s/^\Q$top_level->{path}\E\///);
2018-07-09 14:29:28 +02:00
}
$node->{REL_PATH} = $rel_path; # relative to {TOP_LEVEL}->{path}
2016-03-15 11:21:59 +01:00
}
2018-07-09 14:29:28 +02:00
$node->{TREE_ROOT} = $tree_root;
2016-04-19 13:06:31 +02:00
add_btrbk_filename_info($node);
2015-10-23 14:43:36 +02:00
}
2018-02-14 22:17:32 +01:00
# add known mountpoints to nodes
foreach(@$mountpoints) {
my $node = $id{$_->{subvolid}};
unless($node) {
2018-08-27 14:54:32 +02:00
WARN "Unknown subvolid=$_->{subvolid} (in btrfs tree of $host_mount_source) for mountpoint: $vol->{URL_PREFIX}$_->{file}";
2018-02-14 22:17:32 +01:00
next;
}
$node->{MOUNTPOINT_URL} = $vol->{URL_PREFIX} . $_->{file};
}
2016-03-15 11:21:59 +01:00
TRACE "btr_tree: returning tree at id=$vol_root->{id}";
2016-04-28 13:03:15 +02:00
VINFO($vol_root, "node") if($loglevel >=4);
2015-10-23 14:43:36 +02:00
2018-08-27 14:54:32 +02:00
$mount_source_cache{$host_mount_source} = $tree_root;
2016-03-15 11:21:59 +01:00
return $vol_root;
2015-10-23 14:43:36 +02:00
}
2018-10-18 17:52:01 +02:00
sub btr_tree_inject_node($$$)
2016-04-12 17:50:12 +02:00
{
my $top_node = shift;
my $detail = shift;
my $rel_path = shift;
my $subtree = $top_node->{SUBTREE} // die;
my $tree_root = $top_node->{TREE_ROOT};
2018-02-15 17:42:41 +01:00
die unless($detail->{parent_uuid} && $detail->{received_uuid} && exists($detail->{readonly}));
2016-04-12 17:50:12 +02:00
$tree_inject_id -= 1;
$tree_root->{GEN_MAX} += 1;
2016-04-13 22:04:53 +02:00
my $uuid = sprintf("${fake_uuid_prefix}%012u", -($tree_inject_id));
2016-04-12 17:50:12 +02:00
my $node = {
%$detail, # make a copy
2018-02-15 17:42:41 +01:00
TREE_ROOT => $tree_root,
2016-04-12 17:50:12 +02:00
SUBTREE => [],
TOP_LEVEL => $top_node,
REL_PATH => $rel_path,
INJECTED => 1,
id => $tree_inject_id,
uuid => $uuid,
gen => $tree_root->{GEN_MAX},
cgen => $tree_root->{GEN_MAX},
};
push(@$subtree, $node);
$uuid_cache{$uuid} = $node;
$tree_root->{ID_HASH}->{$tree_inject_id} = $node;
2018-02-15 17:42:41 +01:00
$tree_root->{UUID_HASH}->{$uuid} = $node;
push( @{$tree_root->{RECEIVED_UUID_HASH}->{$node->{received_uuid}}}, $node ) if($node->{received_uuid} ne '-');
2018-10-18 17:52:01 +02:00
push( @{$tree_root->{PARENT_UUID_HASH}->{$node->{parent_uuid}}}, $node ) if($node->{parent_uuid} ne '-');
2016-04-12 17:50:12 +02:00
return $node;
}
2016-04-07 14:34:51 +02:00
sub _fs_path
{
my $node = shift // die;
2018-02-14 22:17:32 +01:00
return $node->{MOUNTPOINT_URL} if($node->{MOUNTPOINT_URL});
2018-08-27 14:54:32 +02:00
return "<$node->{host_mount_source}>" if($node->{is_root});
2016-04-07 14:34:51 +02:00
return _fs_path($node->{TOP_LEVEL}) . '/' . $node->{REL_PATH};
}
2018-02-07 16:23:46 +01:00
sub _is_correlated($$)
{
my $a = shift; # node a
my $b = shift; # node b
return 0 if($a->{is_root} || $b->{is_root});
return 0 unless($a->{readonly} && $b->{readonly});
return (($a->{uuid} eq $b->{received_uuid}) ||
($b->{uuid} eq $a->{received_uuid}) ||
(($a->{received_uuid} ne '-') && ($a->{received_uuid} eq $b->{received_uuid})));
}
2018-02-15 00:17:01 +01:00
sub _is_same_fs_tree($$)
{
2018-08-27 14:54:32 +02:00
return ($_[0]->{TREE_ROOT}{host_mount_source} eq $_[1]->{TREE_ROOT}{host_mount_source});
2018-02-15 00:17:01 +01:00
}
2016-03-15 11:21:59 +01:00
sub _is_child_of
2015-10-20 19:07:08 +02:00
{
2016-03-15 11:21:59 +01:00
my $node = shift;
my $uuid = shift;
foreach(@{$node->{SUBTREE}}) {
return 1 if($_->{uuid} eq $uuid);
return 1 if(_is_child_of($_, $uuid));
}
return 0;
}
2015-10-20 19:07:08 +02:00
2016-03-30 21:55:02 +02:00
sub _get_longest_match
{
my $node = shift;
my $path = shift;
my $check_path = shift; # MUST have a trailing slash
$path .= '/' unless($path =~ /\/$/); # correctly handle root path="/"
return undef unless($check_path =~ /^\Q$path\E/);
foreach(@{$node->{SUBTREE}}) {
my $ret = _get_longest_match($_, $path . $_->{REL_PATH}, $check_path);
return $ret if($ret);
}
return { node => $node,
path => $path };
}
2019-08-07 21:30:59 +02:00
sub vinfo($$)
2014-12-12 12:32:04 +01:00
{
2016-03-15 11:21:59 +01:00
my $url = shift // die;
my $config = shift;
2014-12-13 13:52:43 +01:00
2016-04-25 15:10:00 +02:00
my ($url_prefix, $path) = check_url($url);
die "invalid url: $url" unless(defined($path));
2016-05-10 15:51:44 +02:00
my $print = $path;
2016-04-25 15:10:00 +02:00
my $name = $path;
2016-03-15 11:21:59 +01:00
$name =~ s/^.*\///;
2016-04-25 15:10:00 +02:00
$name = '/' if($name eq "");
2015-01-17 14:55:46 +01:00
2016-05-10 15:51:44 +02:00
my $host = undef;
2019-03-31 23:31:55 +02:00
my $port = undef;
2016-04-25 15:10:00 +02:00
if($url_prefix) {
2016-05-10 15:51:44 +02:00
$host = $url_prefix;
2016-04-25 15:10:00 +02:00
die unless($host =~ s/^ssh:\/\///);
2019-03-31 23:31:55 +02:00
if($host =~ s/:([1-9][0-9]*)$//) {
$port = $1;
2019-04-01 00:32:24 +02:00
$print = "$host\[$port\]:$path"; # hostname[port]:/path
} else {
$print = "$host:$path"; # hostname:/path
2019-03-31 23:31:55 +02:00
}
2014-12-12 12:32:04 +01:00
}
2016-05-10 15:51:44 +02:00
return {
2019-03-31 23:31:55 +02:00
HOST => $host, # hostname|<undef>
PORT => $port, # port|<undef>
2016-05-10 15:51:44 +02:00
NAME => $name,
PATH => $path,
PRINT => $print,
2019-04-01 00:32:24 +02:00
URL => $url_prefix . $path, # ssh://hostname[:port]/path
URL_PREFIX => $url_prefix, # ssh://hostname[:port] (or "" if local)
MACHINE_ID => $url_prefix || "LOCAL:", # unique: "LOCAL:" or hostname and port
2016-05-10 15:51:44 +02:00
CONFIG => $config,
2016-03-30 21:55:02 +02:00
}
}
2016-05-10 15:51:44 +02:00
sub vinfo_child($$;$)
2015-01-20 19:18:38 +01:00
{
2016-03-15 11:21:59 +01:00
my $parent = shift || die;
my $rel_path = shift // die;
2016-05-10 15:51:44 +02:00
my $config = shift; # override parent config
2016-03-15 11:21:59 +01:00
my $name = $rel_path;
2016-04-03 20:46:29 +02:00
my $subvol_dir = "";
$subvol_dir = $1 if($name =~ s/^(.*)\///);
2018-02-03 12:55:21 +01:00
# Note that PATH and URL intentionally contain "//" if $parent->{PATH} = "/".
# For consistency reasons (not required), we dont sanitize PRINT either.
2016-03-30 21:55:02 +02:00
my $vinfo = {
2016-05-10 15:51:44 +02:00
HOST => $parent->{HOST},
2019-03-31 23:31:55 +02:00
PORT => $parent->{PORT},
2016-03-15 11:21:59 +01:00
NAME => $name,
PATH => "$parent->{PATH}/$rel_path",
PRINT => "$parent->{PRINT}/$rel_path",
2016-05-10 15:51:44 +02:00
URL => "$parent->{URL}/$rel_path",
2016-03-30 21:55:02 +02:00
URL_PREFIX => $parent->{URL_PREFIX},
2019-04-01 00:32:24 +02:00
MACHINE_ID => $parent->{MACHINE_ID},
2016-03-15 11:21:59 +01:00
SUBVOL_PATH => $rel_path,
2016-04-03 20:46:29 +02:00
SUBVOL_DIR => $subvol_dir, # SUBVOL_PATH=SUBVOL_DIR/NAME
2016-05-10 15:51:44 +02:00
CONFIG => $config // $parent->{CONFIG},
2018-10-18 17:52:01 +02:00
VINFO_MOUNTPOINT => $parent->{VINFO_MOUNTPOINT},
2016-03-30 21:55:02 +02:00
};
2015-01-20 19:18:38 +01:00
2016-04-19 13:06:31 +02:00
# TRACE "vinfo_child: created from \"$parent->{PRINT}\": $info{PRINT}";
return $vinfo;
}
2016-05-11 20:15:46 +02:00
sub vinfo_rsh($;@)
2016-05-10 15:51:44 +02:00
{
my $vinfo = shift || die;
2016-05-11 20:15:46 +02:00
my %opts = @_;
2016-05-10 15:51:44 +02:00
my $host = $vinfo->{HOST};
return undef unless(defined($host));
my $config = $vinfo->{CONFIG};
die unless($config);
2019-03-31 23:31:55 +02:00
# as of btrbk-0.28.0, ssh port is a property of a "vinfo", set with
# "ssh://hostname[:port]" in 'volume' and 'target' sections. Note
# that the port number is also used for the MACHINE_ID to
# distinguish virtual machines on same host with different ports.
my $ssh_port = $vinfo->{PORT};
unless($ssh_port) {
# PORT defaults to ssh_port (DEPRECATED)
$ssh_port = config_key($config, "ssh_port") // "default";
$ssh_port = undef if($ssh_port eq "default");
}
2016-05-10 15:51:44 +02:00
my $ssh_user = config_key($config, "ssh_user");
my $ssh_identity = config_key($config, "ssh_identity");
2016-08-21 17:36:36 +02:00
my $ssh_compression = config_key($config, "ssh_compression");
2016-05-10 15:51:44 +02:00
my $ssh_cipher_spec = config_key($config, "ssh_cipher_spec") // "default";
2017-09-04 04:43:48 +02:00
my @ssh_options = ('-q');
2019-03-31 23:31:55 +02:00
push(@ssh_options, '-p', $ssh_port) if($ssh_port);
2016-05-10 15:51:44 +02:00
push(@ssh_options, '-c', $ssh_cipher_spec) if($ssh_cipher_spec ne "default");
if($ssh_identity) {
push(@ssh_options, '-i', $ssh_identity);
} else {
WARN_ONCE "No SSH identity provided (option ssh_identity is not set) for: " . ($vinfo->{CONFIG}->{url} // $vinfo->{PRINT});
}
2016-08-21 17:36:36 +02:00
if($opts{disable_compression}) {
push(@ssh_options, '-o', 'compression=no'); # force ssh compression=no (in case it is defined in ssh_config)
} elsif($ssh_compression) {
push(@ssh_options, '-C');
}
2016-05-10 15:51:44 +02:00
return ['ssh', @ssh_options, $ssh_user . '@' . $host ];
}
2016-08-27 17:35:47 +02:00
sub vinfo_cmd($$@)
{
my $vinfo = shift || die;
my $cmd = shift || die;
my @cmd_args = @_;
my $ret;
my $backend = $vinfo->{HOST} ? config_key($vinfo, "backend_remote") : config_key($vinfo, "backend_local");
$backend = $backend // config_key($vinfo, "backend") // die;
my $cmd_mapped = $backend_cmd_map{$backend}{$cmd};
if(defined($cmd_mapped)) {
TRACE "vinfo_cmd: found mapping for backend=$backend cmd=\"$cmd\": " . join(' ', @$cmd_mapped);
$ret = [ @$cmd_mapped, @cmd_args ];
}
else {
my @ret_cmd = split(/\s/, $cmd);
TRACE "vinfo_cmd: no mapping found for backend=$backend cmd=\"$cmd\", assuming btrfs-progs: " . join(' ', @ret_cmd);
$ret = [ @ret_cmd, @cmd_args ];
}
return $ret;
}
2019-08-05 14:59:41 +02:00
sub _get_btrbk_date(@)
2016-04-19 13:06:31 +02:00
{
2019-08-05 14:59:41 +02:00
my %bts = @_; # named capture buffers (%+) from $btrbk_timestamp_match
2016-04-20 22:45:11 +02:00
2016-04-21 13:27:54 +02:00
my @tm = ( ($+{ss} // 0), ($+{mm} // 0), ($+{hh} // 0), $+{DD}, ($+{MM} - 1), ($+{YYYY} - 1900) );
my $NN = $+{NN} // 0;
my $zz = $+{zz};
2018-04-05 00:17:12 +02:00
my $has_exact_time = defined($+{hh}); # false if timestamp_format=short
2016-04-20 22:45:11 +02:00
my $time;
2016-04-25 21:05:46 +02:00
if(defined($zz)) {
eval_quiet { $time = timegm(@tm); };
} else {
eval_quiet { $time = timelocal(@tm); };
}
unless(defined($time)) {
2016-04-21 13:27:54 +02:00
# WARN "$@"; # sadly Time::Local croaks, which also prints the line number from here.
2016-04-20 22:45:11 +02:00
return undef;
}
2016-04-21 13:27:54 +02:00
# handle ISO 8601 time offset
if(defined($zz)) {
my $offset;
if($zz eq 'Z') {
$offset = 0; # Zulu time == UTC
}
elsif($zz =~ /^([+-])([0-9][0-9])([0-9][0-9])$/) {
$offset = ( $3 * 60 ) + ( $2 * 60 * 60 );
$offset *= -1 if($1 eq '-');
}
else {
return undef;
}
$time -= $offset;
}
2019-08-05 14:59:41 +02:00
return [ $time, $NN, $has_exact_time ];
}
sub add_btrbk_filename_info($;$)
{
my $node = shift;
my $raw_info = shift;
my $name = $node->{REL_PATH};
return undef unless(defined($name));
# NOTE: unless long-iso file format is encountered, the timestamp is interpreted in local timezone.
$name =~ s/^(.*)\///;
if($raw_info && ($name =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$raw_postfix_match$/)) { ; }
elsif($raw_info && $name =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$raw_postfix_match_DEPRECATED$/) { ; } # DEPRECATED raw format
elsif((not $raw_info) && ($name =~ /^(?<name>$file_match)\.$btrbk_timestamp_match$/)) { ; }
else {
return undef;
}
$name = $+{name} // die;
my $btrbk_date = _get_btrbk_date(%+); # use named capture buffers of previous match
unless($btrbk_date) {
WARN "Illegal timestamp on subvolume \"$node->{REL_PATH}\", ignoring";
return undef;
}
2016-04-20 22:45:11 +02:00
$node->{BTRBK_BASENAME} = $name;
2019-08-05 14:59:41 +02:00
$node->{BTRBK_DATE} = $btrbk_date;
2017-06-16 17:43:17 +02:00
$node->{BTRBK_RAW} = $raw_info if($raw_info);
2016-04-21 13:27:54 +02:00
return $node;
2015-01-20 19:18:38 +01:00
}
2019-04-24 23:46:44 +02:00
sub vinfo_init_root($)
2015-01-25 13:36:07 +01:00
{
2015-04-16 12:00:04 +02:00
my $vol = shift || die;
2015-04-14 02:17:17 +02:00
2018-02-14 22:09:45 +01:00
# resolve btrfs tree from mount point
2018-08-27 14:54:32 +02:00
my ($mnt_path, $real_path, $subvolid, $mount_source, $mountpoints) = btrfs_mountpoint($vol);
2018-02-14 22:09:45 +01:00
return undef unless($mnt_path && $real_path && $subvolid);
2018-02-12 21:58:18 +01:00
2018-02-14 22:09:45 +01:00
# read btrfs tree for the mount point
my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
2018-08-27 14:54:32 +02:00
my $mnt_tree_root = btr_tree($mnt_vol, $subvolid, $mount_source, $mountpoints);
2018-02-14 22:09:45 +01:00
return undef unless($mnt_tree_root);
2016-03-30 21:55:02 +02:00
2018-02-14 22:09:45 +01:00
# find longest match in btrfs tree
my $ret = _get_longest_match($mnt_tree_root, $mnt_path, $real_path) // die;
my $tree_root = $ret->{node};
2016-03-15 11:21:59 +01:00
return undef unless($tree_root);
2015-06-07 11:52:39 +02:00
2018-02-14 22:09:45 +01:00
# set NODE_SUBDIR if $vol->{PATH} points to a regular (non-subvolume) directory.
# in other words, "PATH=<path_to_subvolume>/NODE_SUBDIR"
my $node_subdir = $real_path;
die unless($node_subdir =~ s/^\Q$ret->{path}\E//); # NOTE: $ret->{path} has trailing slash!
$node_subdir =~ s/\/+$//;
$vol->{NODE_SUBDIR} = $node_subdir if($node_subdir ne '');
2016-03-15 11:21:59 +01:00
$vol->{node} = $tree_root;
2018-10-18 17:52:01 +02:00
$vol->{VINFO_MOUNTPOINT} = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
$vol->{VINFO_MOUNTPOINT}{node} = $mnt_tree_root;
2018-02-14 22:09:45 +01:00
2016-03-15 11:21:59 +01:00
return $tree_root;
2015-01-26 17:23:37 +01:00
}
2018-02-07 20:17:23 +01:00
sub vinfo_init_raw_root($;@)
{
my $droot = shift || die;
my $tree_root = $raw_url_cache{$droot->{URL}};
TRACE "raw_url_cache " . ($tree_root ? "HIT" : "MISS") . ": URL=$droot->{URL}";
unless($tree_root) {
if(my $real_path = $realpath_cache{$droot->{URL}}) {
my $real_url = $droot->{URL_PREFIX} . $real_path;
$tree_root = $raw_url_cache{$real_url};
TRACE "raw_url_cache " . ($tree_root ? "HIT" : "MISS") . ": REAL_URL=$real_url";
}
}
unless($tree_root) {
DEBUG "Creating raw subvolume list: $droot->{PRINT}";
# create fake btr_tree
$tree_root = { id => 5,
is_root => 1,
2019-04-01 00:32:24 +02:00
host_mount_source => $droot->{URL} . '@raw_tree', # for completeness (this is never used)
2018-02-07 20:17:23 +01:00
GEN_MAX => 1,
SUBTREE => [],
UUID_HASH => {},
RECEIVED_UUID_HASH => {},
2018-02-14 22:17:32 +01:00
MOUNTPOINT_URL => $droot->{URL},
2018-02-07 20:17:23 +01:00
};
$tree_root->{TREE_ROOT} = $tree_root;
# list and parse *.info
my $raw_info_ary = system_read_raw_info_dir($droot);
return undef unless($raw_info_ary);
# inject nodes to fake btr_tree
$droot->{node} = $tree_root;
my %child_uuid_list;
foreach my $raw_info (@$raw_info_ary)
{
# Set btrfs subvolume information (received_uuid, parent_uuid) from filename info.
#
# NOTE: received_parent_uuid in BTRBK_RAW is the "parent of the source subvolume", NOT the
# "parent of the received subvolume".
my $subvol = vinfo_child($droot, $raw_info->{FILE});
unless(vinfo_inject_child($droot, $subvol, {
TARGET_TYPE => $raw_info->{TYPE},
parent_uuid => '-', # NOTE: correct value gets inserted below
# Incomplete raw fakes get same semantics as real subvolumes (readonly=0, received_uuid='-')
received_uuid => ($raw_info->{INCOMPLETE} ? '-' : $raw_info->{RECEIVED_UUID}),
readonly => ($raw_info->{INCOMPLETE} ? 0 : 1),
}, $raw_info))
{
if($raw_info->{INFO_FILE}) {
ERROR("Ambiguous \"FILE=\" in raw info file: \"$raw_info->{INFO_FILE}\"");
} else {
# DEPRECATED raw format
ERROR("Ambiguous file: \"$raw_info->{FILE}\"");
}
return undef;
}
if($raw_info->{RECEIVED_PARENT_UUID} ne '-') {
$child_uuid_list{$raw_info->{RECEIVED_PARENT_UUID}} //= [];
push @{$child_uuid_list{$raw_info->{RECEIVED_PARENT_UUID}}}, $subvol;
}
}
my @subvol_list = @{vinfo_subvol_list($droot, sort => 'path')};
DEBUG "Found " . scalar(@subvol_list) . " raw subvolume backups in: $droot->{PRINT}";
foreach my $subvol (@subvol_list)
{
# If restoring a backup from raw btrfs images (using "incremental yes|strict"):
# "btrfs send -p parent source > svol.btrfs", the backups
# on the target will get corrupted (unusable!) as soon as
# an any files in the chain gets deleted.
#
# We need to make sure btrbk will NEVER delete those:
# - svol.<timestamp>--<received_uuid_0>.btrfs : root (full) image
# - svol.<timestamp>--<received_uuid-n>[@<received_uuid_n-1>].btrfs : incremental image
foreach my $child (@{$child_uuid_list{$subvol->{node}{received_uuid}}}) {
# Insert correct (i.e. fake) parent UUID
$child->{node}{parent_uuid} = $subvol->{node}{uuid};
# Make sure that incremental backup chains are never broken:
DEBUG "Found parent/child partners, forcing preserve of: \"$subvol->{PRINT}\", \"$child->{PRINT}\"";
$subvol->{node}{FORCE_PRESERVE} = "preserve forced: parent of another raw target";
$child->{node}{FORCE_PRESERVE} ||= "preserve forced: child of another raw target";
}
}
# TRACE(Data::Dumper->Dump([\@subvol_list], ["vinfo_raw_subvol_list{$droot}"]));
}
$droot->{node} = $tree_root;
2019-05-19 15:20:13 +02:00
$droot->{VINFO_MOUNTPOINT} = $droot; # fake mountpoint
2018-02-07 20:17:23 +01:00
$raw_url_cache{$droot->{URL}} = $tree_root;
return $tree_root;
}
2016-03-15 11:21:59 +01:00
sub _vinfo_subtree_list
2016-01-13 14:29:44 +01:00
{
2016-03-15 11:21:59 +01:00
my $tree = shift;
my $vinfo_parent = shift;
2018-02-15 16:53:29 +01:00
my $filter_readonly = shift; # if set, return only read-only
my $filter_btrbk_direct_leaf = shift; # if set, return only read-only direct leafs matching btrbk_basename
2016-03-15 11:21:59 +01:00
my $list = shift // [];
my $path_prefix = shift // "";
2016-04-03 20:46:29 +02:00
my $depth = shift // 0;
2016-01-13 14:29:44 +01:00
2018-02-05 18:03:20 +01:00
# if $vinfo_parent->{NODE_SUBDIR} is set, vinfo_parent->{PATH} does
# not point to a subvolume directly, but to "<path_to_subvolume>/NODE_SUBDIR".
# skip nodes wich are not in NODE_SUBDIR, or strip NODE_SUBDIR from from rel_path.
my $node_subdir_filter = ($depth == 0) ? $vinfo_parent->{NODE_SUBDIR} : undef;
2016-04-03 20:46:29 +02:00
foreach my $node (@{$tree->{SUBTREE}}) {
my $rel_path = $node->{REL_PATH};
2016-04-03 16:24:38 +02:00
if(defined($node_subdir_filter)) {
next unless($rel_path =~ s/^\Q$node_subdir_filter\E\///);
2016-03-30 21:55:02 +02:00
}
2018-02-05 18:03:20 +01:00
my $path = $path_prefix . $rel_path; # always points to a subvolume
2016-04-03 20:46:29 +02:00
2018-02-15 16:53:29 +01:00
# filter direct leafs (SUBVOL_DIR="") matching btrbk_basename
next unless(!defined($filter_btrbk_direct_leaf) ||
(exists($node->{BTRBK_BASENAME}) && ($node->{BTRBK_BASENAME} eq $filter_btrbk_direct_leaf) &&
($rel_path !~ /\//))); # note: depth is always 0 if $filter_btrbk_direct_leaf
# filter readonly, push vinfo_child
if(!$filter_readonly || $node->{readonly}) {
my $vinfo = vinfo_child($vinfo_parent, $path);
$vinfo->{node} = $node;
# add some additional information to vinfo
$vinfo->{subtree_depth} = $depth;
push(@$list, $vinfo);
2016-04-03 20:46:29 +02:00
}
2016-01-13 14:29:44 +01:00
2018-02-15 16:53:29 +01:00
unless(defined($filter_btrbk_direct_leaf)) {
_vinfo_subtree_list($node, $vinfo_parent, $filter_readonly, undef, $list, $path . '/', $depth + 1);
}
2016-01-13 14:29:44 +01:00
}
2016-03-15 11:21:59 +01:00
return $list;
2016-01-13 14:29:44 +01:00
}
2016-04-03 20:46:29 +02:00
sub vinfo_subvol_list($;@)
2014-12-12 10:39:40 +01:00
{
2015-04-16 12:00:04 +02:00
my $vol = shift || die;
2016-04-03 20:46:29 +02:00
my %opts = @_;
2016-03-15 14:46:25 +01:00
2018-02-05 18:03:20 +01:00
TRACE "Creating subvolume list for: $vol->{PRINT}";
2018-02-07 19:21:35 +01:00
# recurse into tree from $vol->{node}, returns arrayref of vinfo
2018-02-15 16:53:29 +01:00
my $subvol_list = _vinfo_subtree_list($vol->{node}, $vol, $opts{readonly}, $opts{btrbk_direct_leaf});
2016-04-03 20:46:29 +02:00
if($opts{sort}) {
if($opts{sort} eq 'path') {
2016-04-12 17:50:12 +02:00
my @sorted = sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } @$subvol_list;
2018-02-05 18:03:20 +01:00
return \@sorted;
2016-04-03 20:46:29 +02:00
}
2016-04-12 17:50:12 +02:00
else { die; }
2016-04-03 20:46:29 +02:00
}
return $subvol_list;
2016-03-15 11:21:59 +01:00
}
2015-03-13 12:12:37 +01:00
2016-01-13 14:29:44 +01:00
2019-05-22 23:02:36 +02:00
# returns vinfo_child if $node is in tree below $vol (or equal if allow_equal), or undef
2019-04-11 14:38:41 +02:00
sub vinfo_resolved($$;@)
2018-02-15 17:42:41 +01:00
{
my $node = shift || die;
my $vol = shift || die; # root vinfo node
2019-04-11 14:38:41 +02:00
my %opts = @_;
2018-02-15 17:42:41 +01:00
my $top_id = $vol->{node}{id};
my @path;
my $nn = $node;
while(($nn->{id} != $top_id) && (!$nn->{is_root})) {
unshift(@path, $nn->{REL_PATH});
$nn = $nn->{TOP_LEVEL};
}
2019-05-22 23:02:36 +02:00
if(scalar(@path) == 0) {
return $vol if($opts{allow_equal} && not defined($vol->{NODE_SUBDIR}));
return undef;
}
2018-02-15 17:42:41 +01:00
return undef if($nn->{is_root} && (!$vol->{node}{is_root}));
my $jpath = join('/', @path);
2019-05-22 23:02:36 +02:00
if(defined($vol->{NODE_SUBDIR})) {
2018-02-15 17:42:41 +01:00
return undef unless($jpath =~ s/^\Q$vol->{NODE_SUBDIR}\E\///);
}
2019-04-11 14:38:41 +02:00
if(defined($opts{btrbk_direct_leaf})) {
return undef if($jpath =~ /\//);
return undef unless(exists($node->{BTRBK_BASENAME}) && ($node->{BTRBK_BASENAME} eq $opts{btrbk_direct_leaf}))
}
2018-10-15 16:19:52 +02:00
my $vinfo = vinfo_child($vol, $jpath);
$vinfo->{node} = $node;
return $vinfo;
2018-02-15 17:42:41 +01:00
}
2018-10-18 17:54:46 +02:00
# returns vinfo if $node is below any mountpoint of $vol
sub vinfo_resolved_all_mountpoints($$)
{
my $node = shift || die;
my $vol = shift || die;
my $tree_root = $vol->{node}{TREE_ROOT};
foreach (@{$tree_root->{MOUNTPOINTS}}) {
my $mnt_path = $_->{file};
my $mnt_node = $tree_root->{ID_HASH}{$_->{subvolid}};
next unless($mnt_node);
my $mnt_vol = vinfo($vol->{URL_PREFIX} . $mnt_path, $vol->{CONFIG});
$mnt_vol->{node} = $mnt_node;
TRACE "vinfo_resolved_all_mountpoints: trying mountpoint: $mnt_vol->{PRINT}";
2019-05-22 23:02:36 +02:00
my $vinfo = vinfo_resolved($node, $mnt_vol, allow_equal => 1);
2018-10-18 17:54:46 +02:00
return $vinfo if($vinfo);
}
return undef;
}
2016-03-15 11:21:59 +01:00
sub vinfo_subvol($$)
{
my $vol = shift || die;
my $subvol_path = shift // die;
foreach (@{vinfo_subvol_list($vol)}) {
return $_ if($_->{SUBVOL_PATH} eq $subvol_path);
2016-01-13 14:29:44 +01:00
}
2016-03-15 11:21:59 +01:00
return undef;
2014-12-14 19:23:02 +01:00
}
2015-01-14 14:10:41 +01:00
2018-02-15 16:53:29 +01:00
sub vinfo_is_btrbk_snapshot($$;$)
{
my $vol = shift || die;
my $btrbk_basename = shift || die;
my $subvol_dir = shift // "";
2018-05-14 23:43:13 +02:00
return ($vol->{node}{readonly} && defined($vol->{node}{BTRBK_BASENAME}) &&
($vol->{SUBVOL_DIR} eq $subvol_dir) &&
($vol->{node}{BTRBK_BASENAME} eq $btrbk_basename));
2018-02-15 16:53:29 +01:00
}
2017-06-16 17:43:17 +02:00
sub vinfo_inject_child($$$;$)
2016-04-12 17:50:12 +02:00
{
my $vinfo = shift;
my $vinfo_child = shift;
my $detail = shift;
2017-06-16 17:43:17 +02:00
my $raw_info = shift;
2016-04-13 22:04:53 +02:00
my $node;
2016-04-19 13:06:31 +02:00
my $node_subdir = defined($vinfo->{NODE_SUBDIR}) ? $vinfo->{NODE_SUBDIR} . '/' : "";
my $rel_path = $node_subdir . $vinfo_child->{SUBVOL_PATH};
2018-02-07 19:21:35 +01:00
$node = btr_tree_inject_node($vinfo->{node}, $detail, $rel_path);
return undef unless(add_btrbk_filename_info($node, $raw_info));
2016-04-19 13:06:31 +02:00
2016-04-12 17:50:12 +02:00
$vinfo_child->{node} = $node;
2016-04-14 18:24:11 +02:00
TRACE "vinfo_inject_child: injected child id=$node->{id} to $vinfo->{PRINT}";
2016-04-12 17:50:12 +02:00
return $vinfo_child;
}
2016-03-15 11:21:59 +01:00
# returns hash: ( $prefix_{url,path,host,name,subvol_path,rsh} => value, ... )
sub vinfo_prefixed_keys($$)
2014-12-14 21:29:22 +01:00
{
2016-03-15 11:21:59 +01:00
my $prefix = shift // die;
my $vinfo = shift;
return () unless($vinfo);
my %ret;
if($prefix) {
$ret{$prefix} = $vinfo->{PRINT};
$prefix .= '_';
2015-01-03 21:25:46 +01:00
}
2019-04-01 20:12:33 +02:00
foreach (qw( URL PATH HOST PORT NAME SUBVOL_PATH )) {
2016-03-15 11:21:59 +01:00
$ret{$prefix . lc($_)} = $vinfo->{$_};
2015-01-03 21:25:46 +01:00
}
2016-03-15 11:21:59 +01:00
$ret{$prefix . "subvol"} = $vinfo->{PATH};
2016-05-10 15:51:44 +02:00
my $rsh = vinfo_rsh($vinfo);
$ret{$prefix . "rsh"} = ($rsh ? join(" ", @$rsh) : undef),
2016-03-15 11:21:59 +01:00
return %ret;
2014-12-14 21:29:22 +01:00
}
2014-12-14 19:23:02 +01:00
2018-02-15 00:17:01 +01:00
sub vinfo_assign_config($;$)
2015-04-23 16:19:34 +02:00
{
2016-03-15 11:21:59 +01:00
my $vinfo = shift || die;
2018-02-15 00:17:01 +01:00
my $vinfo_snapshot_root = shift;
2016-05-10 15:51:44 +02:00
my $config = $vinfo->{CONFIG} || die;
2016-03-15 11:21:59 +01:00
die if($config->{VINFO});
$config->{VINFO} = $vinfo;
2018-02-15 00:17:01 +01:00
$config->{VINFO_SNAPROOT} = $vinfo_snapshot_root;
}
sub vinfo_snapshot_root($)
{
my $vinfo = shift;
return $vinfo->{CONFIG}{VINFO_SNAPROOT};
2015-04-23 16:19:34 +02:00
}
2016-03-15 11:21:59 +01:00
sub vinfo_subsection($$;$)
2015-04-23 16:19:34 +02:00
{
2016-03-15 11:21:59 +01:00
# if config: must have SUBSECTION key
# if vinfo: must have CONFIG key
my $config_or_vinfo = shift || die;
my $context = shift || die;
my $include_aborted = shift;
my $config_list;
my $vinfo_check;
if(exists($config_or_vinfo->{SUBSECTION})) {
# config
$config_list = $config_or_vinfo->{SUBSECTION};
2015-04-23 16:19:34 +02:00
}
2016-03-15 11:21:59 +01:00
else {
# vinfo
$config_list = $config_or_vinfo->{CONFIG}->{SUBSECTION};
die unless($config_or_vinfo->{CONFIG}->{VINFO} == $config_or_vinfo); # check back reference
}
# for now be paranoid and check all contexts
my @ret;
foreach (@$config_list) {
die unless($_->{CONTEXT} eq $context);
next if((not $include_aborted) && $_->{ABORTED});
die unless($_->{VINFO});
die unless($_->{VINFO}->{CONFIG});
die unless($_->{VINFO} == $_->{VINFO}->{CONFIG}->{VINFO}); # check all back references
push @ret, $_->{VINFO};
}
return @ret;
# much simpler implementation, without checks
#return map { $_->{ABORTED} ? () : $_->{VINFO} } @$config_list;
2015-04-23 16:19:34 +02:00
}
2019-04-17 15:56:35 +02:00
# allow (absolute) path / url with wildcards
# allow group (exact match)
# allow host[:port] (exact match)
sub vinfo_filter_statement($) {
my $filter = shift;
my %ret = ( unparsed => $filter );
my ($url_prefix, $path) = check_url($filter, accept_wildcards => 1);
unless($path) {
# allow relative path with wildcards
$url_prefix = "";
$path = check_file($filter, { relative => 1, wildcards => 1 }, sanitize => 1);
}
if($path) {
# support "*some*file*", "*/*"
my $regex = join('[^\/]*', map(quotemeta($_), split(/\*+/, $url_prefix . $path, -1)));
if($path =~ /^\//) {
$ret{url_regex} = qr/^$regex$/; # absolute path, match full string
} else {
$ret{url_regex} = qr/\/$regex$/; # match end of string
}
}
$ret{group_eq} = $filter if($filter =~ /^$group_match$/);
$ret{host_port_eq} = $filter if($filter =~ /^($ip_addr_match|$host_name_match)(:[1-9][0-9]*)?$/);
TRACE 'vinfo_filter_statement: filter="' . $filter . '" url_regex="' . ($ret{url_regex} // "<undef>") . '" group_eq="' . ($ret{group_eq} // "<undef>") . '" host_port_eq="' . ($ret{host_port_eq} // "<undef>") . '"';
return undef unless(exists($ret{url_regex}) || exists($ret{group_eq}) || exists($ret{host_port_eq}));
return \%ret;
}
sub vinfo_match($$;@)
{
my $filter = shift;
my $vinfo = shift;
my %opts = @_;
my $flag_matched = $opts{flag_matched};
my $url = join("", check_url($vinfo->{URL})); # sanitize URL (can contain "//", see vinfo_child)
my $count = 0;
foreach my $ff (@$filter) {
if(defined($ff->{group_eq}) && (grep { $ff->{group_eq} eq $_ } @{$vinfo->{CONFIG}{group}})) {
TRACE "filter \"$ff->{unparsed}\" equals $vinfo->{CONFIG}{CONTEXT} group: $vinfo->{PRINT}";
return $ff unless($flag_matched);
#push @{$ff->{$flag_matched}}, 'group=' . $ff->{group_eq};
$ff->{$flag_matched} = 1;
$count++;
}
if(defined($ff->{url_regex}) && ($url =~ /$ff->{url_regex}/)) {
TRACE "filter \"$ff->{unparsed}\" matches $vinfo->{CONFIG}{CONTEXT} url: $vinfo->{PRINT}";
return $ff unless($flag_matched);
#push @{$ff->{$flag_matched}}, $vinfo->{CONFIG}{CONTEXT} . '=' . $vinfo->{PRINT};
$ff->{$flag_matched} = 1;
$count++;
}
if(defined($ff->{host_port_eq})) {
if(my $host = $vinfo->{HOST}) {
if($ff->{host_port_eq} =~ /:/) {
$host .= ":" . ($vinfo->{PORT} // "");
}
if($host eq $ff->{host_port_eq}) {
TRACE "filter \"$ff->{unparsed}\" matches $vinfo->{CONFIG}{CONTEXT} host: $vinfo->{PRINT}";
return $ff unless($flag_matched);
#push @{$ff->{$flag_matched}}, $vinfo->{CONFIG}{CONTEXT} . '=' . $vinfo->{PRINT};
$ff->{$flag_matched} = 1;
$count++;
}
}
}
}
return $count;
}
2019-04-11 15:56:37 +02:00
sub get_related_snapshots($$;$)
2015-04-23 16:19:34 +02:00
{
2018-02-15 02:51:22 +01:00
my $snaproot = shift || die;
2016-03-15 11:21:59 +01:00
my $svol = shift // die;
2018-02-15 16:53:29 +01:00
my $btrbk_basename = shift; # if set, also filter by direct_leaf
btrbk: treat all related readonly subvolumes within snapdir as "snapshots"
With this, previous snapshots (far relations) are still listed when
restoring a snapshot.
Example (S = source subvolume, readwrite):
After 3 snapshots:
A->S, B->S, C->S
Restore B: `btrfs subvol delete S; btrfs subvol snapshot B S'`
A->S, B->S, C->S, S'->B
Previous implementation would show now snapshots for S', as no
snapshot has parent_uuid=S'.
New implementation shows A, B, C as snapshots for S', as orphaned
siblings (A, B, C pointing to deleted S) are also related.
2019-04-11 14:41:08 +02:00
my @ret = map( { vinfo_resolved($_, $snaproot, btrbk_direct_leaf => $btrbk_basename) // () }
2019-05-22 15:33:48 +02:00
@{get_related_nodes($svol, readonly => 1, omit_self => 1)} );
2015-04-23 16:19:34 +02:00
2019-04-11 15:56:37 +02:00
if($loglevel >= 4) { TRACE "get_related_snapshots: found: $_->{PRINT}" foreach(@ret); }
btrbk: treat all related readonly subvolumes within snapdir as "snapshots"
With this, previous snapshots (far relations) are still listed when
restoring a snapshot.
Example (S = source subvolume, readwrite):
After 3 snapshots:
A->S, B->S, C->S
Restore B: `btrfs subvol delete S; btrfs subvol snapshot B S'`
A->S, B->S, C->S, S'->B
Previous implementation would show now snapshots for S', as no
snapshot has parent_uuid=S'.
New implementation shows A, B, C as snapshots for S', as orphaned
siblings (A, B, C pointing to deleted S) are also related.
2019-04-11 14:41:08 +02:00
DEBUG "Found " . scalar(@ret) . " related snapshots of \"$svol->{PRINT}\" in: $snaproot->{PRINT}" . (defined($btrbk_basename) ? "/$btrbk_basename.*" : "");
2016-03-15 11:21:59 +01:00
return @ret;
}
2015-04-23 16:19:34 +02:00
2019-04-04 15:55:17 +02:00
sub _correlated_nodes($$)
2016-03-15 11:21:59 +01:00
{
my $droot = shift || die;
my $src_vol = shift || die;
2016-04-16 16:05:57 +02:00
my @ret;
2015-10-20 22:05:02 +02:00
2016-04-05 16:11:46 +02:00
if($src_vol->{node}{is_root}) {
2019-04-04 15:55:17 +02:00
DEBUG "Skip search for correlated targets: source subvolume is btrfs root: $src_vol->{PRINT}";
2016-04-16 16:05:57 +02:00
return @ret;
2016-04-05 16:11:46 +02:00
}
unless($src_vol->{node}{readonly}) {
2019-04-04 15:55:17 +02:00
DEBUG "Skip search for correlated targets: source subvolume is not read-only: $src_vol->{PRINT}";
2016-04-16 16:05:57 +02:00
return @ret;
2016-04-05 16:11:46 +02:00
}
2016-04-15 01:22:19 +02:00
# find matches by comparing uuid / received_uuid
my $uuid = $src_vol->{node}{uuid};
2016-04-16 16:05:57 +02:00
my $received_uuid = $src_vol->{node}{received_uuid};
$received_uuid = undef if($received_uuid eq '-');
2016-11-20 00:25:55 +01:00
2018-02-15 17:42:41 +01:00
my $received_uuid_hash = $droot->{node}{TREE_ROOT}{RECEIVED_UUID_HASH};
my $uuid_hash = $droot->{node}{TREE_ROOT}{UUID_HASH};
2016-04-16 16:05:57 +02:00
2018-02-15 17:42:41 +01:00
# match uuid/received_uuid combinations
my @match;
2018-06-25 13:30:14 +02:00
push(@match, @{ $received_uuid_hash->{$uuid} // [] }); # match src.uuid == target.received_uuid
2018-02-15 17:42:41 +01:00
if($received_uuid) {
2018-06-25 13:30:14 +02:00
push(@match, $uuid_hash->{$received_uuid} ); # match src.received_uuid == target.uuid
push(@match, @{ $received_uuid_hash->{$received_uuid} // [] }); # match src.received_uuid == target.received_uuid
2016-04-16 16:05:57 +02:00
}
2018-02-15 17:42:41 +01:00
@ret = grep($_->{readonly}, @match);
2019-05-03 18:28:06 +02:00
TRACE "correlated_nodes: droot=\"$droot->{PRINT}/\", src_vol=\"$src_vol->{PRINT}\": [" . join(", ", map _fs_path($_),@ret) . "]" if($loglevel >= 4);
2016-04-16 16:05:57 +02:00
return @ret;
}
2018-02-15 17:42:41 +01:00
# returns array of vinfo of receive targets matching btrbk name
sub get_receive_targets($$;@)
2016-04-16 16:05:57 +02:00
{
2018-02-15 17:42:41 +01:00
my $droot = shift || die;
my $src_vol = shift || die;
2016-04-16 16:05:57 +02:00
my %opts = @_;
2018-02-15 17:42:41 +01:00
my @ret;
2016-04-16 16:05:57 +02:00
2019-04-04 15:55:17 +02:00
my @correlated = _correlated_nodes($droot, $src_vol);
foreach (@correlated) {
2018-02-15 17:42:41 +01:00
my $vinfo = vinfo_resolved($_, $droot); # returns undef if not below $droot
if(exists($_->{BTRBK_RAW})) {
TRACE "get_receive_targets: found raw receive target: " . _fs_path($_);
}
2018-05-14 23:43:13 +02:00
elsif($vinfo && ($vinfo->{SUBVOL_PATH} eq $src_vol->{NAME})) { # direct leaf, (SUBVOL_DIR = "", matching NAME)
TRACE "get_receive_targets: found receive target (exact-match): $vinfo->{PRINT}";
}
elsif($vinfo && (not $opts{exact})) {
TRACE "get_receive_targets: found receive target (non-exact-match): $vinfo->{PRINT}";
2018-02-15 17:42:41 +01:00
}
else {
2018-05-14 23:43:13 +02:00
TRACE "get_receive_targets: skip unexpected match: " . _fs_path($_);
2018-02-15 17:42:41 +01:00
${$opts{ret_unexpected}} = 1 if($opts{ret_unexpected});
if($opts{warn}) {
WARN "Receive target of \"$src_vol->{PRINT}\" exists at unexpected location: " . ($vinfo ? $vinfo->{PRINT} : _fs_path($_));
2016-04-07 14:34:51 +02:00
}
2018-02-15 17:42:41 +01:00
next;
2016-04-07 14:34:51 +02:00
}
2018-02-15 17:42:41 +01:00
push(@ret, $vinfo);
2016-03-15 11:21:59 +01:00
}
2018-02-15 17:42:41 +01:00
return @ret;
2016-03-15 11:21:59 +01:00
}
2015-10-20 22:05:02 +02:00
2018-10-18 17:54:46 +02:00
# returns best correlated receive target within droot (independent of btrbk name)
2019-04-09 22:09:12 +02:00
sub get_best_correlated($$;@)
2018-10-18 17:54:46 +02:00
{
my $droot = shift || die;
my $src_vol = shift || die;
my %opts = @_;
2019-04-09 22:09:12 +02:00
my $inaccessible_nodes = $opts{push_inaccessible_nodes};
2018-10-18 17:54:46 +02:00
2019-04-04 15:55:17 +02:00
my @correlated = _correlated_nodes($droot, $src_vol); # all matching src_vol, from droot->TREE_ROOT
2018-10-18 17:54:46 +02:00
foreach (@correlated) {
2019-04-09 22:09:12 +02:00
my $vinfo = vinfo_resolved($_, $droot); # $vinfo is within $droot
return [ $src_vol, $vinfo ] if($vinfo);
2018-10-18 17:54:46 +02:00
}
if($opts{fallback_all_mountpoints}) {
foreach (@correlated) {
my $vinfo = vinfo_resolved_all_mountpoints($_, $droot); # $vinfo is within any mountpoint of filesystem at $droot
2019-04-09 22:09:12 +02:00
return [ $src_vol, $vinfo ] if($vinfo);
2018-10-18 17:54:46 +02:00
}
}
2019-04-09 22:09:12 +02:00
push @$inaccessible_nodes, @correlated if($inaccessible_nodes);
2018-10-18 17:54:46 +02:00
return undef;
}
2019-05-02 18:41:15 +02:00
# returns all related readonly nodes (by parent_uuid relationship), unsorted.
2019-05-22 15:33:48 +02:00
sub get_related_nodes($;@)
2018-10-18 17:52:01 +02:00
{
my $vol = shift // die;
2019-05-02 18:41:15 +02:00
my %opts = @_;
2018-10-18 17:52:01 +02:00
TRACE "related_nodes: resolving related subvolumes of: $vol->{PATH}";
# iterate parent chain
my @related_nodes;
my $uuid_hash = $vol->{node}{TREE_ROOT}{UUID_HASH};
2019-04-09 21:58:16 +02:00
my $parent_uuid_hash = $vol->{node}{TREE_ROOT}{PARENT_UUID_HASH};
my $node = $vol->{node};
my $uuid = $node->{uuid};
2019-05-03 15:35:25 +02:00
my $abort_distance = 4096;
2019-05-02 18:41:15 +02:00
# climb up parent chain
my $distance = 0; # parent distance
while(($distance < $abort_distance) && defined($node) && ($node->{parent_uuid} ne "-")) {
2019-04-09 21:58:16 +02:00
$uuid = $node->{parent_uuid};
$node = $uuid_hash->{$uuid};
2019-05-02 18:41:15 +02:00
TRACE "related_nodes: d=$distance uuid=$uuid : parent: " . ($node ? _fs_path($node) : "<deleted>") if($loglevel >= 4);
2018-10-18 17:52:01 +02:00
$distance++;
}
2019-05-02 18:41:15 +02:00
if($distance >= $abort_distance) {
my $logmsg = "Parent UUID chain exceeds depth=$abort_distance, ignoring related parents of uuid=$uuid for: $vol->{PATH}";
DEBUG $logmsg;
WARN_ONCE $logmsg unless($opts{nowarn});
}
TRACE "related_nodes: d=$distance uuid=$uuid : top of parent chain";
# push related children (even if parent node is missing -> siblings)
my @nn;
$abort_distance = $abort_distance;
$distance = $distance * (-1); # child distance (from top parent)
while($uuid) {
2019-05-22 15:33:48 +02:00
push @related_nodes, $node if($node && (!$opts{readonly} || $node->{readonly}));
2019-05-02 18:41:15 +02:00
my $children = $parent_uuid_hash->{$uuid};
if($children) {
if($distance >= $abort_distance) {
my $logmsg = "Parent/child relations exceed depth=$abort_distance, ignoring related children of uuid=$uuid for: $vol->{PATH}";
DEBUG $logmsg;
WARN_ONCE $logmsg unless($opts{nowarn});
} else {
push @nn, { MARK_UUID => $uuid, MARK_DISTANCE => ($distance + 1) }, @$children;
}
}
if($loglevel >= 4) {
if($node) {
if($node->{readonly}) {
TRACE "related_nodes: d=$distance uuid=$uuid : push related readonly: " . _fs_path($node);
} else {
2019-05-22 15:33:48 +02:00
TRACE "related_nodes: d=$distance uuid=$uuid : " . ($opts{readonly} ? "" : "push ") . "related not readonly: " . _fs_path($node);
2019-05-02 18:41:15 +02:00
}
} else {
TRACE "related_nodes: d=$distance uuid=$uuid : related missing: <deleted>";
}
if($children && ($distance < $abort_distance)) {
TRACE "related_nodes: d=$distance uuid=$uuid : postpone " . scalar(@$children) . " children";
}
}
$node = shift @nn;
if(exists($node->{MARK_DISTANCE})) {
# marker reached, restore distance
$distance = $node->{MARK_DISTANCE};
TRACE "related_nodes: d=$distance uuid=$node->{MARK_UUID} : processing children" if($loglevel >= 4);
$node = shift @nn;
}
$uuid = $node->{uuid};
}
2019-05-22 15:33:48 +02:00
if($opts{omit_self}) {
my $vol_node_id = $vol->{node}{id};
my @filtered = grep { $_->{id} != $vol_node_id } @related_nodes;
TRACE "related_nodes: found total=" . scalar(@filtered) . " related readonly subvolumes";
return \@filtered;
}
TRACE "related_nodes: found total=" . scalar(@related_nodes) . " related readonly subvolumes (including self)";
return \@related_nodes;
2018-02-15 17:42:41 +01:00
}
2019-04-09 22:09:12 +02:00
# returns parent, along with clone sources
2018-10-18 17:54:46 +02:00
sub get_best_parent($$$;@)
2018-02-15 17:42:41 +01:00
{
my $svol = shift // die;
2018-10-18 17:54:46 +02:00
my $snaproot = shift // die;
2018-02-15 17:42:41 +01:00
my $droot = shift || die;
2018-10-18 17:52:01 +02:00
my %opts = @_;
2019-04-09 22:09:12 +02:00
my $ret_clone_src = $opts{clone_src};
my $ret_clone_src_extra = $opts{clone_src_extra};
my $ret_target_parent_node = $opts{target_parent_node};
2019-04-09 22:15:18 +02:00
my $strict_related = $opts{strict_related};
2019-04-09 22:09:12 +02:00
TRACE "get_best_parent: resolving best common parent for subvolume: $svol->{PRINT} (droot=$droot->{PRINT})";
2018-10-18 17:54:46 +02:00
# honor incremental_resolve option
my $source_incremental_resolve = config_key($svol, "incremental_resolve");
my $target_incremental_resolve = config_key($droot, "incremental_resolve");
my $resolve_sroot = ($source_incremental_resolve eq "mountpoint") ? $snaproot->{VINFO_MOUNTPOINT} : $snaproot;
my $resolve_droot = ($source_incremental_resolve eq "mountpoint") ? $droot->{VINFO_MOUNTPOINT} : $droot;
# NOTE: Using parents from different mount points does NOT work, see
# <https://github.com/kdave/btrfs-progs/issues/96>.
# btrfs-progs-4.20.2 fails if the parent subvolume is not on same
# mountpoint as the source subvolume:
# - btrfs send -p: "ERROR: not on mount point: /path/to/mountpoint"
# - btrfs receive: "ERROR: parent subvol is not reachable from inside the root subvol"
my $source_fallback_all_mountpoints = ($source_incremental_resolve eq "_all_accessible");
my $target_fallback_all_mountpoints = ($target_incremental_resolve eq "_all_accessible");
2018-07-18 15:35:56 +02:00
2019-04-09 22:09:12 +02:00
my @inaccessible_nodes;
my %gbc_opts = ( push_inaccessible_nodes => \@inaccessible_nodes,
fallback_all_mountpoints => $target_fallback_all_mountpoints,
);
2018-10-18 17:52:01 +02:00
2019-05-02 18:41:15 +02:00
# resolve correlated subvolumes by parent_uuid relationship.
# no warnings on aborted search (due to deep relations), note that
# we could limit the search depth here for some performance
# improvements, as this only affects extra clones.
2019-04-09 22:09:12 +02:00
my %c_rel_id; # map id to c_related
my @c_related; # candidates for parent (correlated + related), unsorted
2019-05-22 15:33:48 +02:00
foreach (@{get_related_nodes($svol, readonly => 1, omit_self => 1, nowarn => 1)}) {
2018-10-18 17:54:46 +02:00
my $vinfo = vinfo_resolved($_, $resolve_sroot);
if((not $vinfo) && $source_fallback_all_mountpoints) { # related node is not under $resolve_sroot
$vinfo = vinfo_resolved_all_mountpoints($_, $svol);
}
if($vinfo) {
2019-05-19 19:01:50 +02:00
my $correlated = get_best_correlated($resolve_droot, $vinfo, %gbc_opts);
2019-04-09 22:09:12 +02:00
push @c_related, $correlated if($correlated);
$c_rel_id{$_->{id}} = $correlated;
2018-10-18 17:54:46 +02:00
} else {
DEBUG "Related subvolume is not accessible within $source_incremental_resolve \"$resolve_sroot->{PRINT}\": " . _fs_path($_);
}
2018-10-18 17:52:01 +02:00
}
2019-04-09 22:09:12 +02:00
# sort by cgen
my $cgen_ref = $svol->{node}{readonly} ? $svol->{node}{cgen} : $svol->{node}{gen};
my @c_related_older = sort { ($cgen_ref - $a->[0]{node}{cgen}) <=> ($cgen_ref - $b->[0]{node}{cgen}) }
grep { $_->[0]{node}{cgen} <= $cgen_ref } @c_related;
my @c_related_newer = sort { ($a->[0]{node}{cgen} - $cgen_ref) <=> ($b->[0]{node}{cgen} - $cgen_ref) }
grep { $_->[0]{node}{cgen} > $cgen_ref } @c_related;
2018-10-18 17:52:01 +02:00
2019-05-22 15:33:48 +02:00
# NOTE: While get_related_nodes() returns deep parent_uuid
2019-04-09 22:09:12 +02:00
# relations, there is always a chance that these relations get
# broken.
2018-10-18 17:54:46 +02:00
#
# Consider parent_uuid chain ($svol readonly)
# B->A, C->B, delete B: C has no relation to A.
# This is especially true for backups and archives (btrfs receive)
#
# For snapshots (here: S=$svol readwrite) the scenario is different:
# A->S, B->S, C->S, delete B: A still has a relation to C.
#
2019-04-09 22:09:12 +02:00
# resolve correlated subvolumes in same directory matching btrbk file name scheme
my (@c_snapdir_older, @c_snapdir_newer);
if(exists($svol->{node}{BTRBK_BASENAME})) {
2018-10-18 17:52:01 +02:00
my $snaproot_btrbk_direct_leaf = vinfo_subvol_list($snaproot, readonly => 1, btrbk_direct_leaf => $svol->{node}{BTRBK_BASENAME});
2019-04-09 22:09:12 +02:00
my @sbdl_older = sort { cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) }
grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) < 0 } @$snaproot_btrbk_direct_leaf;
my @sbdl_newer = sort { cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
grep { cmp_date($_->{node}{BTRBK_DATE}, $svol->{node}{BTRBK_DATE}) > 0 } @$snaproot_btrbk_direct_leaf;
2019-05-19 19:01:50 +02:00
@c_snapdir_older = map { $c_rel_id{$_->{node}{id}} // get_best_correlated($resolve_droot, $_, %gbc_opts) // () } @sbdl_older;
@c_snapdir_newer = map { $c_rel_id{$_->{node}{id}} // get_best_correlated($resolve_droot, $_, %gbc_opts) // () } @sbdl_newer;
2019-04-09 22:09:12 +02:00
}
if($loglevel >= 4) {
TRACE "get_best_parent: related reference cgen=$svol->{node}{cgen}";
TRACE "get_best_parent: related older: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen})" foreach(@c_related_older);
TRACE "get_best_parent: related newer: $_->[0]{PRINT} (cgen=$_->[0]{node}{cgen})" foreach(@c_related_newer);
TRACE "get_best_parent: snapdir older: $_->[0]{PRINT}" foreach(@c_snapdir_older);
TRACE "get_best_parent: snapdir newer: $_->[0]{PRINT}" foreach(@c_snapdir_newer);
}
if(scalar @inaccessible_nodes) { # populated by get_best_correlated()
WARN "Best common parent for \"$svol->{PRINT}\" is not accessible within target $target_incremental_resolve \"$resolve_droot->{PRINT}\", ignoring: " . join(", ", map('"' . _fs_path($_) . '"',@inaccessible_nodes));
}
# preferences for parent (and required clone sources):
# 1. closest older in snapdir (by btrbk timestamp), related
# 2. closest older related (by cgen)
# 3. closest newer related (by cgen)
# 4. closest older in snapdir (by btrbk timestamp)
# 5. closest newer in snapdir (by btrbk timestamp)
#
my @parent;
if(my $cc = shift @c_related_older) {
push @parent, $cc; # 2. closest older related (by cgen)
DEBUG "Resolved best common parent (closest older parent_uuid relationship): $cc->[0]{PRINT}";
}
if(my $cc = shift @c_related_newer) {
DEBUG ((scalar @parent ? "Adding clone source" : "Resolved best common parent") . " (closest newer parent_uuid relationship): $cc->[0]{PRINT}");
push @parent, $cc; # 3. closest newer related (by cgen)
}
if(my $cc = shift @c_snapdir_older) {
unless(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent) {
if($c_rel_id{$cc->[0]{node}{id}}) {
DEBUG "Resolved best common parent (closest older btrbk timestamp, with parent_uuid relationship): $cc->[0]{PRINT}";
unshift @parent, $cc; # 1. closest older in snapdir (by btrbk timestamp), related
}
else {
DEBUG ((scalar @parent ? "Adding clone source" : "Resolved best common parent") . " (closest older btrbk timestamp): $cc->[0]{PRINT}");
push @parent, $cc; # 4. closest older in snapdir (by btrbk timestamp)
}
}
}
if(my $cc = shift @c_snapdir_newer) {
unless(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent) {
DEBUG ((scalar @parent ? "Adding clone source" : "Resolved best common parent") . " (closest newer btrbk timestamp): $cc->[0]{PRINT}");
push @parent, $cc; # 5. closest newer in snapdir (by btrbk timestamp)
}
}
# assemble results
unless(scalar @parent) {
2018-10-18 17:54:46 +02:00
DEBUG("No common parents of \"$svol->{PRINT}\" found in src=\"$resolve_sroot->{PRINT}/\", target=\"$resolve_droot->{PRINT}/\"");
return undef;
}
2019-04-09 22:15:18 +02:00
if($strict_related && (not scalar(@c_related))) {
# all parents come from c_snapdir (btrbk_direct_leaf), no relations by parent_uuid found
WARN "No related common parent found (by parent_uuid relationship) for: $svol->{PRINT}";
WARN "Hint: setting option \"incremental\" to \"yes\" (instead of \"strict\") will use parent: " . join(", ", map { $_->[0]{PRINT} } @parent);
return undef;
}
2019-04-09 22:09:12 +02:00
my @extra_clones;
foreach my $cc (@c_related_older, @c_related_newer, grep { not exists($c_rel_id{$_->[0]{node}{id}}) } (@c_snapdir_older, @c_snapdir_newer)) {
push @extra_clones, $cc->[0] unless(grep { $_->[0]{node}{id} == $cc->[0]{node}{id} } @parent);
}
DEBUG "Resolved " . (scalar @extra_clones) . " extra clone sources";
if($loglevel >= 4) {
TRACE "get_best_parent: parent,clones: $_->[0]{PRINT}" foreach(@parent);
TRACE "get_best_parent: extra clone : $_->{PRINT}" foreach(@extra_clones);
}
my $ret_parent = shift @parent;
my @clone_src = map { $_->[0] } @parent;
$$ret_clone_src = \@clone_src if($ret_clone_src);
$$ret_clone_src_extra = \@extra_clones if($ret_clone_src_extra);
$$ret_target_parent_node = $ret_parent->[1]{node} if($ret_target_parent_node);
return $ret_parent->[0];
2015-06-02 22:16:33 +02:00
}
2019-04-11 15:56:37 +02:00
sub get_latest_related_snapshot($$;$)
2015-06-02 22:16:33 +02:00
{
2016-03-15 11:21:59 +01:00
my $sroot = shift || die;
my $svol = shift // die;
2018-02-15 02:56:09 +01:00
my $btrbk_basename = shift;
2016-03-15 11:21:59 +01:00
my $latest = undef;
my $gen = -1;
2019-04-11 15:56:37 +02:00
foreach (get_related_snapshots($sroot, $svol, $btrbk_basename)) {
2016-03-15 11:21:59 +01:00
if($_->{node}{cgen} > $gen) {
$latest = $_;
$gen = $_->{node}{cgen};
}
}
if($latest) {
DEBUG "Latest snapshot child for \"$svol->{PRINT}#$svol->{node}{gen}\" is: $latest->{PRINT}#$latest->{node}{cgen}";
} else {
DEBUG "No latest snapshots found for: $svol->{PRINT}";
}
return $latest;
}
2015-06-02 22:16:33 +02:00
2016-08-29 14:43:29 +02:00
sub check_file($$;@)
2016-03-15 11:21:59 +01:00
{
my $file = shift // die;
my $accept = shift || die;
2016-08-29 14:43:29 +02:00
my %opts = @_;
my $sanitize = $opts{sanitize};
2018-02-13 17:21:44 +01:00
my $error_statement = $opts{error_statement}; # if not defined, no error messages are printed
2015-06-02 22:16:33 +02:00
2016-04-25 14:23:15 +02:00
my $match = $file_match;
$match = $glob_match if($accept->{wildcards});
2016-03-16 13:25:19 +01:00
2016-04-25 14:23:15 +02:00
if($file =~ /^($match)$/) {
$file = $1;
2016-03-15 11:21:59 +01:00
if($accept->{absolute}) {
2016-04-25 14:23:15 +02:00
unless($file =~ /^\//) {
2018-02-13 17:21:44 +01:00
ERROR "Only absolute files allowed $error_statement" if(defined($error_statement));
2016-03-15 11:21:59 +01:00
return undef;
2016-01-14 18:02:53 +01:00
}
2016-03-15 11:21:59 +01:00
}
elsif($accept->{relative}) {
2016-04-25 14:23:15 +02:00
if($file =~ /^\//) {
2018-02-13 17:21:44 +01:00
ERROR "Only relative files allowed $error_statement" if(defined($error_statement));
2016-03-15 11:21:59 +01:00
return undef;
2016-01-14 18:02:53 +01:00
}
}
2016-03-15 11:21:59 +01:00
elsif($accept->{name_only}) {
2016-04-25 14:23:15 +02:00
if($file =~ /\//) {
2018-02-13 17:21:44 +01:00
ERROR "Invalid file name ${error_statement}: $file" if(defined($error_statement));
2016-03-15 11:21:59 +01:00
return undef;
}
}
2018-02-13 21:36:21 +01:00
elsif(not $accept->{wildcards}) {
2016-03-15 11:21:59 +01:00
die("accept_type must contain either 'relative' or 'absolute'");
}
2015-06-02 22:16:33 +02:00
}
2016-03-15 11:21:59 +01:00
else {
2018-02-13 17:21:44 +01:00
ERROR "Ambiguous file ${error_statement}: $file" if(defined($error_statement));
2016-03-15 11:21:59 +01:00
return undef;
2015-06-02 22:16:33 +02:00
}
2016-03-15 11:21:59 +01:00
# check directory traversal
if(($file =~ /^\.\.$/) || ($file =~ /^\.\.\//) || ($file =~ /\/\.\.\//) || ($file =~ /\/\.\.$/)) {
2018-02-13 17:21:44 +01:00
ERROR "Illegal directory traversal ${error_statement}: $file" if(defined($error_statement));
2016-03-15 11:21:59 +01:00
return undef;
}
2016-08-29 14:43:29 +02:00
if($sanitize) {
$file =~ s/\/+/\//g; # sanitize multiple slash
$file =~ s/\/\.\//\//g; # sanitize "/./" -> "/"
2018-07-07 15:53:03 +02:00
$file =~ s/\/\.$/\//; # sanitize trailing "/." -> "/"
2016-08-29 14:43:29 +02:00
$file =~ s/\/$// unless($file eq '/'); # remove trailing slash
}
2016-04-25 14:23:15 +02:00
return $file;
}
2018-02-13 17:21:44 +01:00
sub check_url($;@)
2016-04-25 14:23:15 +02:00
{
my $url = shift // die;
2018-02-13 17:21:44 +01:00
my %opts = @_;
2016-04-25 14:23:15 +02:00
my $url_prefix = "";
2016-04-25 16:07:40 +02:00
2018-07-09 16:13:48 +02:00
if($url =~ /^ssh:\/\//) {
2019-03-31 23:31:55 +02:00
if($url =~ s/^(ssh:\/\/($ip_addr_match|$host_name_match)(:[1-9][0-9]*)?)\//\//) {
2018-07-09 16:13:48 +02:00
$url_prefix = $1;
}
# if no match, treat it as file and let check_file() print errors
2016-04-25 16:07:40 +02:00
}
elsif($url =~ s/^($ip_addr_match|$host_name_match)://) {
# convert "my.host.com:/my/path" to ssh url
$url_prefix = "ssh://" . $1;
}
2019-04-17 15:56:35 +02:00
return ( $url_prefix, check_file($url, { absolute => 1, wildcards => $opts{accept_wildcards} }, sanitize => 1, %opts) );
2016-03-15 11:21:59 +01:00
}
2015-09-29 19:43:11 +02:00
2015-06-02 22:16:33 +02:00
2016-03-15 11:21:59 +01:00
sub config_key($$;@)
2014-12-14 19:23:02 +01:00
{
2016-03-15 11:21:59 +01:00
my $config = shift || die;
my $key = shift || die;
my %opts = @_;
my $orig_config = $config;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
2015-04-21 14:53:31 +02:00
2016-03-15 11:21:59 +01:00
if(exists($config_override{$key})) {
TRACE "config_key: OVERRIDE key=$key to value=" . ($config_override{$key} // "<undef>");
return $config_override{$key};
}
2015-04-21 14:53:31 +02:00
2016-03-15 11:21:59 +01:00
while(not exists($config->{$key})) {
2018-02-13 18:40:35 +01:00
# note: while all config keys exist in "meta" context (at least with default values),
2016-03-15 11:21:59 +01:00
# we also allow fake configs (CONTEXT="cmdline") which have no PARENT.
return undef unless($config->{PARENT});
$config = $config->{PARENT};
}
my $retval = $config->{$key};
$retval = $opts{prefix} . $retval if(defined($opts{prefix}) && defined($retval));
$retval .= $opts{postfix} if(defined($opts{postfix}) && defined($retval));
return $retval;
}
2016-03-11 14:55:22 +01:00
2015-03-13 17:54:08 +01:00
2017-09-28 14:02:06 +02:00
sub config_preserve_hash($$;@)
2016-03-15 11:21:59 +01:00
{
my $config = shift || die;
my $prefix = shift || die;
2017-09-28 14:02:06 +02:00
my %opts = @_;
if($opts{wipe}) {
2018-01-05 19:28:10 +01:00
return { hod => 0, dow => 'sunday', min => 'latest', min_q => 'latest' };
2017-09-28 14:02:06 +02:00
}
2019-03-04 16:05:38 +01:00
my $preserve = config_key($config, $prefix . "_preserve") // {};
my %ret = ( %$preserve, # make a copy (don't pollute config)
hod => config_key($config, "preserve_hour_of_day"),
dow => config_key($config, "preserve_day_of_week")
);
2016-04-12 19:55:29 +02:00
my $preserve_min = config_key($config, $prefix . "_preserve_min");
if(defined($preserve_min)) {
2019-03-04 16:05:38 +01:00
$ret{min} = $preserve_min; # used for raw schedule output
2016-04-12 20:35:57 +02:00
if(($preserve_min eq 'all') || ($preserve_min eq 'latest')) {
2019-03-04 16:05:38 +01:00
$ret{min_q} = $preserve_min;
2016-04-12 11:47:28 +02:00
}
2016-04-12 19:55:29 +02:00
elsif($preserve_min =~ /^([0-9]+)([hdwmy])$/) {
2019-03-04 16:05:38 +01:00
$ret{min_n} = $1;
$ret{min_q} = $2;
2016-04-12 11:47:28 +02:00
}
else { die; }
}
2019-03-04 16:05:38 +01:00
return \%ret;
2016-03-15 11:21:59 +01:00
}
2015-03-13 17:54:08 +01:00
2016-03-15 11:21:59 +01:00
2016-05-11 20:15:46 +02:00
sub config_compress_hash($$)
{
my $config = shift || die;
my $config_key = shift || die;
my $compress_key = config_key($config, $config_key);
return undef unless($compress_key);
return {
key => $compress_key,
level => config_key($config, $config_key . "_level"),
threads => config_key($config, $config_key . "_threads"),
};
}
2019-07-29 21:59:03 +02:00
sub config_stream_hash($$)
{
my $source = shift || die;
my $target = shift || die;
return {
stream_compress => config_compress_hash($target, "stream_compress"),
# for remote source, limits read rate of ssh stream output after decompress
# for remote target, limits read rate of "btrfs send"
# for both local, limits read rate of "btrfs send"
# for raw targets, limits read rate of "btrfs send | xz" (raw_target_compress)
local_sink => {
stream_buffer => config_key($target, "stream_buffer"),
rate_limit => config_key($target, "rate_limit"),
show_progress => $show_progress,
},
# limits read rate of "btrfs send"
rsh_source => { # limit read rate after "btrfs send", before compression
stream_buffer => config_key($source, "stream_buffer_remote"),
rate_limit => config_key($source, "rate_limit_remote"),
#rate_limit_out => config_key($source, "rate_limit_remote"), # limit write rate
},
# limits read rate of ssh stream output
rsh_sink => {
stream_buffer => config_key($target, "stream_buffer_remote"),
rate_limit => config_key($target, "rate_limit_remote"),
#rate_limit_in => config_key($target, "rate_limit_remote"),
},
};
}
2017-03-18 15:06:48 +01:00
sub config_encrypt_hash($$)
{
my $config = shift || die;
my $config_key = shift || die;
my $encrypt_type = config_key($config, $config_key);
return undef unless($encrypt_type);
return {
type => $encrypt_type,
keyring => config_key($config, "gpg_keyring"),
recipient => config_key($config, "gpg_recipient"),
2017-06-16 17:04:18 +02:00
iv_size => config_key($config, "openssl_iv_size"),
ciphername => config_key($config, "openssl_ciphername"),
keyfile => config_key($config, "openssl_keyfile"),
2017-06-30 14:35:20 +02:00
kdf_keygen_each => (config_key($config, "kdf_keygen") eq "each"),
kdf_backend => config_key($config, "kdf_backend"),
kdf_keysize => config_key($config, "kdf_keysize"),
2017-03-18 15:06:48 +01:00
};
}
2016-03-15 11:21:59 +01:00
sub config_dump_keys($;@)
{
my $config = shift || die;
my %opts = @_;
my @ret;
my $maxlen = 0;
$config = $config->{CONFIG} if($config->{CONFIG}); # accept vinfo for $config
foreach my $key (sort keys %config_options)
2014-12-14 19:23:02 +01:00
{
2016-03-15 11:21:59 +01:00
my $val;
2016-04-18 20:42:53 +02:00
next if($config_options{$key}->{deprecated});
2016-03-15 11:21:59 +01:00
if($opts{resolve}) {
$val = config_key($config, $key);
} else {
next unless exists($config->{$key});
$val = $config->{$key};
}
2018-02-13 19:26:54 +01:00
my @valary = (ref($val) eq "ARRAY") ? @$val : $val;
foreach(@valary) {
if(defined($_)) {
if($config_options{$key}->{accept_preserve_matrix}) {
2018-12-05 21:43:54 +01:00
$_ = format_preserve_matrix($_, format => "config");
2018-02-13 19:26:54 +01:00
}
2016-04-18 20:42:53 +02:00
}
2018-02-13 19:26:54 +01:00
$_ //= exists($config->{$key}) ? "no" : "<unset>";
my $len = length($key);
$maxlen = $len if($len > $maxlen);
push @ret, { key => $key, val => $_, len => $len };
2016-03-15 11:21:59 +01:00
}
2015-06-17 12:42:29 +02:00
}
2016-03-15 11:21:59 +01:00
# print as table
return map { ($opts{prefix} // "") . $_->{key} . (' ' x (1 + $maxlen - $_->{len})) . ' ' . $_->{val} } @ret;
}
2014-12-14 15:34:55 +01:00
2015-03-13 11:20:47 +01:00
2018-02-13 17:21:44 +01:00
sub append_config_option($$$$;@)
2016-03-15 11:21:59 +01:00
{
my $config = shift;
my $key = shift;
my $value = shift;
my $context = shift;
2018-02-13 17:21:44 +01:00
my %opts = @_;
my $error_statement = $opts{error_statement} // "";
2015-03-13 11:20:47 +01:00
2016-03-15 11:21:59 +01:00
my $opt = $config_options{$key};
# accept only keys listed in %config_options
unless($opt) {
2018-02-13 17:21:44 +01:00
ERROR "Unknown option \"$key\" $error_statement";
2016-03-15 11:21:59 +01:00
return undef;
}
2019-04-30 13:38:47 +02:00
if($opt->{context} && !grep(/^$context$/, @{$opt->{context}}) && ($context ne "OVERRIDE")) {
2018-02-13 17:21:44 +01:00
ERROR "Option \"$key\" is only allowed in " . join(" or ", map("\"$_\"", @{$opt->{context}})) . " context $error_statement";
2016-03-15 11:21:59 +01:00
return undef;
}
2016-03-16 13:25:19 +01:00
if($opt->{deny_glob_context} && $config->{GLOB_CONTEXT}) {
2018-02-13 17:21:44 +01:00
ERROR "Option \"$key\" is not allowed on section with wildcards $error_statement";
2016-03-16 13:25:19 +01:00
return undef;
}
2019-04-17 15:42:05 +02:00
if($value eq "") {
if(grep(/^yes$/, @{$opt->{accept}})) {
$value = "yes";
TRACE "option \"$key\" has no value, accepted map to \"yes\"";
}
else {
ERROR "Unsupported empty value for option \"$key\" $error_statement";
return undef;
}
}
elsif(grep(/^\Q$value\E$/, @{$opt->{accept}})) {
2016-03-15 11:21:59 +01:00
TRACE "option \"$key=$value\" found in accept list";
}
elsif($opt->{accept_numeric} && ($value =~ /^[0-9]+$/)) {
TRACE "option \"$key=$value\" is numeric, accepted";
}
elsif($opt->{accept_file})
{
# be very strict about file options, for security sake
2018-02-13 17:21:44 +01:00
$value = check_file($value, $opt->{accept_file}, sanitize => 1, error_statement => ($error_statement ? "for option \"$key\" $error_statement" : undef));
2016-04-25 14:23:15 +02:00
return undef unless(defined($value));
2015-03-13 11:20:47 +01:00
2016-03-15 11:21:59 +01:00
TRACE "option \"$key=$value\" is a valid file, accepted";
$value = "no" if($value eq "."); # maps to undef later
2015-04-21 14:53:31 +02:00
}
2016-03-15 11:21:59 +01:00
elsif($opt->{accept_regexp}) {
my $match = $opt->{accept_regexp};
if($value =~ m/$match/) {
TRACE "option \"$key=$value\" matched regexp, accepted";
2016-03-10 05:26:43 +01:00
}
else {
2018-02-13 17:21:44 +01:00
ERROR "Value \"$value\" failed input validation for option \"$key\" $error_statement";
2016-03-10 05:26:43 +01:00
return undef;
}
2014-12-14 22:03:31 +01:00
}
2016-04-14 14:15:12 +02:00
elsif($opt->{accept_preserve_matrix}) {
my %preserve;
my $s = ' ' . $value;
while($s =~ s/\s+(\*|[0-9]+)([hdwmyHDWMY])//) {
my $n = $1;
my $q = lc($2); # qw( h d w m y )
$n = 'all' if($n eq '*');
if(exists($preserve{$q})) {
2018-02-13 17:21:44 +01:00
ERROR "Value \"$value\" failed input validation for option \"$key\": multiple definitions of '$q' $error_statement";
2016-04-14 14:15:12 +02:00
return undef;
}
$preserve{$q} = $n;
}
unless($s eq "") {
2018-02-13 17:21:44 +01:00
ERROR "Value \"$value\" failed input validation for option \"$key\" $error_statement";
2016-04-14 14:15:12 +02:00
return undef;
}
2016-04-18 16:40:49 +02:00
TRACE "adding preserve matrix $context context:" . Data::Dumper->new([\%preserve], [ $key ])->Indent(0)->Pad(' ')->Quotekeys(0)->Pair('=>')->Dump() if($do_dumper);
2016-04-14 14:15:12 +02:00
$config->{$key} = \%preserve;
return $config;
}
2016-03-15 11:21:59 +01:00
else
{
2018-02-13 17:21:44 +01:00
ERROR "Unsupported value \"$value\" for option \"$key\" $error_statement";
2016-03-15 11:21:59 +01:00
return undef;
2016-03-14 16:39:13 +01:00
}
2016-05-03 14:34:04 +02:00
if($opt->{require_bin} && (not check_exe($opt->{require_bin}))) {
WARN "Found option \"$key\", but required executable \"$opt->{require_bin}\" does not exist on your system. Please install \"$opt->{require_bin}\".";
2018-02-13 17:21:44 +01:00
WARN "Ignoring option \"$key\" $error_statement";
2016-03-23 11:58:23 +01:00
$value = "no";
}
2016-03-15 11:21:59 +01:00
if($opt->{deprecated}) {
2016-04-14 13:01:28 +02:00
if(my $warn_msg = ($opt->{deprecated}->{$value}->{warn} || $opt->{deprecated}->{DEFAULT}->{warn})) {
2018-02-13 17:21:44 +01:00
WARN "Found deprecated option \"$key $value\" $error_statement: $warn_msg";
2016-04-14 13:01:28 +02:00
}
2016-04-13 17:13:03 +02:00
if($opt->{deprecated}->{$value}->{ABORT} || $opt->{deprecated}->{DEFAULT}->{ABORT}) {
2018-02-13 17:21:44 +01:00
ERROR "Deprecated (incompatible) option \"$key\" found $error_statement, refusing to continue";
2016-04-13 17:13:03 +02:00
return undef;
}
2016-04-14 13:01:28 +02:00
if($opt->{deprecated}->{$value}->{FAILSAFE_PRESERVE} || $opt->{deprecated}->{DEFAULT}->{FAILSAFE_PRESERVE}) {
unless($config_override{FAILSAFE_PRESERVE}) { # warn only once
WARN "Entering failsafe mode:";
WARN " - preserving ALL snapshots for ALL subvolumes";
WARN " - ignoring ALL targets (skipping backup creation)";
2016-04-16 21:08:07 +02:00
WARN " - please read \"doc/upgrade_to_v0.23.0.md\"";
2016-04-14 13:01:28 +02:00
$config_override{FAILSAFE_PRESERVE} = "Failsafe mode active (deprecated configuration)";
}
$config_override{snapshot_preserve_min} = 'all';
return $config;
}
2016-03-15 11:21:59 +01:00
my $replace_key = $opt->{deprecated}->{$value}->{replace_key};
my $replace_value = $opt->{deprecated}->{$value}->{replace_value};
if(defined($replace_key)) {
$key = $replace_key;
$value = $replace_value;
WARN "Using \"$key $value\"";
}
2016-03-11 14:55:22 +01:00
}
2016-03-15 11:21:59 +01:00
2018-02-13 19:26:54 +01:00
if($opt->{allow_multiple}) {
my $aref = $config->{$key} // [];
if($opt->{split}) {
push(@$aref, split($opt->{split}, $value));
}
else {
push(@$aref, $value);
}
TRACE "pushing option \"$key=$value\" to $aref=[" . join(',', @$aref) . "]";
$value = $aref;
}
elsif(exists($config->{$key})) {
2018-02-26 15:59:20 +01:00
unless($opt->{c_default}) { # note: computed defaults are already present
WARN "Option \"$key\" redefined $error_statement";
}
2018-02-13 19:26:54 +01:00
}
2016-03-15 11:21:59 +01:00
TRACE "adding option \"$key=$value\" to $context context";
$value = undef if($value eq "no"); # we don't want to check for "no" all the time
$config->{$key} = $value;
return $config;
2016-03-11 14:55:22 +01:00
}
2016-03-15 11:21:59 +01:00
sub parse_config_line($$$$$)
2014-12-14 19:23:02 +01:00
{
2016-03-15 11:21:59 +01:00
my ($file, $root, $cur, $key, $value) = @_;
2016-03-09 19:52:45 +01:00
2016-03-15 11:21:59 +01:00
if($key eq "volume")
{
$cur = $root;
TRACE "config: context forced to: $cur->{CONTEXT}";
2016-03-14 12:24:32 +01:00
2016-03-15 11:21:59 +01:00
# be very strict about file options, for security sake
2018-02-13 17:21:44 +01:00
my ($url_prefix, $path) = check_url($value, error_statement => "for option \"$key\" in \"$file\" line $.");
2016-04-25 14:23:15 +02:00
return undef unless(defined($path));
TRACE "config: adding volume \"$url_prefix$path\" to root context";
2016-03-15 11:21:59 +01:00
die unless($cur->{CONTEXT} eq "root");
2016-03-17 14:02:22 +01:00
my $volume = { CONTEXT => "volume",
PARENT => $cur,
SUBSECTION => [],
2016-04-25 14:23:15 +02:00
url => $url_prefix . $path,
2016-03-15 11:21:59 +01:00
};
push(@{$cur->{SUBSECTION}}, $volume);
$cur = $volume;
}
elsif($key eq "subvolume")
{
while($cur->{CONTEXT} ne "volume") {
if(($cur->{CONTEXT} eq "root") || (not $cur->{PARENT})) {
ERROR "Subvolume keyword outside volume context, in \"$file\" line $.";
return undef;
}
$cur = $cur->{PARENT} || die;
TRACE "config: context changed to: $cur->{CONTEXT}";
}
# be very strict about file options, for security sake
2018-02-13 17:21:44 +01:00
my $rel_path = check_file($value, { relative => 1, wildcards => 1 }, sanitize => 1, error_statement => "for option \"$key\" in \"$file\" line $.");
2016-04-25 14:23:15 +02:00
return undef unless(defined($rel_path));
2016-03-14 15:55:57 +01:00
2018-07-07 16:23:09 +02:00
# snapshot_name defaults to subvolume name (or volume name if subvolume=".")
my $default_snapshot_name = ($rel_path eq '.') ? $cur->{url} : $rel_path;
$default_snapshot_name =~ s/^.*\///;
$default_snapshot_name = 'ROOT' if($default_snapshot_name eq ""); # if volume="/"
2016-04-25 14:23:15 +02:00
TRACE "config: adding subvolume \"$rel_path\" to volume context: $cur->{url}";
2016-03-17 14:02:22 +01:00
my $subvolume = { CONTEXT => "subvolume",
PARENT => $cur,
# SUBSECTION => [], # handled by target propagation
2016-04-25 14:23:15 +02:00
rel_path => $rel_path,
url => $cur->{url} . '/' . $rel_path,
2018-07-07 16:23:09 +02:00
snapshot_name => $default_snapshot_name, # computed default (c_default)
2016-03-15 11:21:59 +01:00
};
2016-03-16 13:25:19 +01:00
$subvolume->{GLOB_CONTEXT} = 1 if($value =~ /\*/);
2016-03-15 11:21:59 +01:00
push(@{$cur->{SUBSECTION}}, $subvolume);
$cur = $subvolume;
}
elsif($key eq "target")
{
if($cur->{CONTEXT} eq "target") {
$cur = $cur->{PARENT} || die;
TRACE "config: context changed to: $cur->{CONTEXT}";
2016-03-14 15:55:57 +01:00
}
2019-04-24 13:51:52 +02:00
if($value =~ /^((?<target_type>\S+)\s+)?(?<url>\S+)$/)
2016-03-15 11:21:59 +01:00
{
2019-04-24 13:51:52 +02:00
# as of btrbk-0.28.0, target_type is optional and defaults to "send-receive"
my $target_type = $+{target_type} // "send-receive";
my $url = $+{url};
2016-03-15 11:21:59 +01:00
unless(grep(/^\Q$target_type\E$/, @config_target_types)) {
ERROR "Unknown target type \"$target_type\" in \"$file\" line $.";
return undef;
}
# be very strict about file options, for security sake
2018-02-13 17:21:44 +01:00
my ($url_prefix, $path) = check_url($url, error_statement => "for option \"$key\" in \"$file\" line $.");
2016-04-25 14:23:15 +02:00
return undef unless(defined($path));
2016-03-14 15:55:57 +01:00
2016-04-25 14:23:15 +02:00
TRACE "config: adding target \"$url_prefix$path\" (type=$target_type) to $cur->{CONTEXT} context" . ($cur->{url} ? ": $cur->{url}" : "");
2016-03-15 11:21:59 +01:00
my $target = { CONTEXT => "target",
PARENT => $cur,
target_type => $target_type,
2016-04-25 14:23:15 +02:00
url => $url_prefix . $path,
2016-03-15 11:21:59 +01:00
};
2016-03-17 14:02:22 +01:00
# NOTE: target sections are propagated to the apropriate SUBSECTION in _config_propagate_target()
$cur->{TARGET} //= [];
push(@{$cur->{TARGET}}, $target);
2016-03-15 11:21:59 +01:00
$cur = $target;
}
else
{
ERROR "Ambiguous target configuration, in \"$file\" line $.";
return undef;
2016-03-14 15:55:57 +01:00
}
2016-03-14 12:24:32 +01:00
}
2016-03-15 11:21:59 +01:00
else
{
2018-02-13 17:21:44 +01:00
return append_config_option($cur, $key, $value, $cur->{CONTEXT}, error_statement => "in \"$file\" line $.");
2016-03-10 05:26:43 +01:00
}
2015-03-13 11:44:04 +01:00
2016-03-15 11:21:59 +01:00
return $cur;
2016-03-14 16:39:13 +01:00
}
2016-03-17 14:02:22 +01:00
sub _config_propagate_target
{
my $cur = shift;
foreach my $subsection (@{$cur->{SUBSECTION}}) {
my @propagate_target;
foreach my $target (@{$cur->{TARGET}}) {
TRACE "propagating target \"$target->{url}\" from $cur->{CONTEXT} context to: $subsection->{CONTEXT} $subsection->{url}";
die if($target->{SUBSECTION});
# don't propagate if a target of same target_type and url already exists in subsection
if($subsection->{TARGET} &&
grep({ ($_->{url} eq $target->{url}) && ($_->{target_type} eq $target->{target_type}) } @{$subsection->{TARGET}}))
{
DEBUG "Skip propagation of \"target $target->{target_type} $target->{url}\" from $cur->{CONTEXT} context to \"$subsection->{CONTEXT} $subsection->{url}\": same target already exists";
next;
}
2016-04-24 15:59:17 +02:00
my %copy = ( %$target, PARENT => $subsection );
2016-03-17 14:02:22 +01:00
push @propagate_target, \%copy;
}
$subsection->{TARGET} //= [];
unshift @{$subsection->{TARGET}}, @propagate_target; # maintain config order: propagated targets go in front of already defined targets
if($subsection->{CONTEXT} eq "subvolume") {
# finally create missing SUBSECTION in subvolume context
die if($subsection->{SUBSECTION});
$subsection->{SUBSECTION} = $subsection->{TARGET};
}
else {
# recurse into SUBSECTION
_config_propagate_target($subsection);
}
}
delete $cur->{TARGET};
return $cur;
}
2019-07-15 18:19:33 +02:00
sub _config_collect_values
{
my $config = shift;
my $key = shift;
my @values;
push(@values, @{$config->{$key}}) if(ref($config->{$key}) eq "ARRAY");
foreach (@{$config->{SUBSECTION}}) {
push(@values, _config_collect_values($_, $key));
}
return @values;
}
2016-03-15 16:54:54 +01:00
sub init_config(@)
{
2018-02-13 18:40:35 +01:00
my %defaults = ( CONTEXT => "meta", @_ );
2016-03-15 16:54:54 +01:00
# set defaults
foreach (keys %config_options) {
next if $config_options{$_}->{deprecated}; # don't pollute hash with deprecated options
2018-02-13 18:40:35 +01:00
$defaults{$_} = $config_options{$_}->{default};
2016-03-15 16:54:54 +01:00
}
2018-02-13 18:40:35 +01:00
return { CONTEXT => "root", SUBSECTION => [], PARENT => \%defaults };
2016-03-15 16:54:54 +01:00
}
2019-08-05 14:31:48 +02:00
sub _config_file(@) {
2016-03-15 11:21:59 +01:00
my @config_files = @_;
2019-08-05 14:31:48 +02:00
foreach my $file (@config_files) {
TRACE "config: checking for file: $file";
return $file if(-r "$file");
2016-03-14 16:39:13 +01:00
}
2019-08-05 14:31:48 +02:00
return undef;
}
sub parse_config($)
{
my $file = shift;
2019-07-28 18:54:34 +02:00
return undef unless($file);
2016-03-10 19:10:57 +01:00
2016-03-15 16:54:54 +01:00
my $root = init_config(SRC_FILE => $file);
2016-03-15 11:21:59 +01:00
my $cur = $root;
2016-03-10 19:10:57 +01:00
2019-08-05 14:31:48 +02:00
TRACE "config: open configuration file: $file";
2016-03-15 11:21:59 +01:00
open(FILE, '<', $file) or die $!;
while (<FILE>) {
chomp;
2017-03-18 16:34:53 +01:00
s/#.*//; # remove comments
s/\s*$//; # remove trailing whitespace
2016-03-15 11:21:59 +01:00
next if /^\s*$/; # ignore empty lines
TRACE "config: parsing line $. with context=$cur->{CONTEXT}: \"$_\"";
2019-04-17 15:42:05 +02:00
if(/^(\s*)([a-zA-Z_]+)(\s+(.*))?$/)
2016-03-15 11:21:59 +01:00
{
# NOTE: we do not perform checks on indentation!
2019-04-17 15:42:05 +02:00
my ($indent, $key, $value) = (length($1), lc($2), $4 // "");
2016-03-15 11:21:59 +01:00
$cur = parse_config_line($file, $root, $cur, $key, $value);
unless(defined($cur)) {
# error, bail out
$root = undef;
last;
}
TRACE "line processed: new context=$cur->{CONTEXT}";
}
else
{
ERROR "Parse error in \"$file\" line $.";
$root = undef;
last;
}
2016-03-14 16:39:13 +01:00
}
2016-03-15 11:21:59 +01:00
close FILE || ERROR "Failed to close configuration file: $!";
2016-03-17 14:02:22 +01:00
_config_propagate_target($root);
2016-03-15 11:21:59 +01:00
return $root;
2015-04-21 14:53:31 +02:00
}
2016-03-08 15:25:35 +01:00
# sets $target->{CONFIG}->{ABORTED} on failure
2016-03-07 21:45:12 +01:00
# sets $target->{SUBVOL_RECEIVED}
2016-03-07 20:47:24 +01:00
sub macro_send_receive(@)
2015-03-31 19:07:33 +02:00
{
my %info = @_;
2016-03-01 21:49:59 +01:00
my $source = $info{source} || die;
2015-04-16 12:00:04 +02:00
my $target = $info{target} || die;
my $parent = $info{parent};
2019-04-09 22:09:12 +02:00
my @clone_src = @{ $info{clone_src} // [] }; # copy array
my $clone_src_extra = $info{clone_src_extra} // [];
2016-03-07 20:47:24 +01:00
my $config_target = $target->{CONFIG};
2015-06-02 22:16:33 +02:00
my $target_type = $config_target->{target_type} || die;
2015-04-19 11:36:40 +02:00
my $incremental = config_key($config_target, "incremental");
2015-03-31 19:07:33 +02:00
2015-05-15 16:06:36 +02:00
# check for existing target subvolume
2016-03-01 21:49:59 +01:00
if(my $err_vol = vinfo_subvol($target, $source->{NAME})) {
2019-07-15 18:19:33 +02:00
my $err_msg = "Please delete stray subvolume: \"btrfs subvolume delete $err_vol->{PRINT}\"";
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "Target subvolume \"$err_vol->{PRINT}\" already exists");
2019-07-15 18:19:33 +02:00
FIX_MANUALLY($config_target, $err_msg);
2019-04-17 15:20:18 +02:00
ERROR ABORTED_TEXT($config_target) . ", aborting send/receive of: $source->{PRINT}";
2019-07-15 18:19:33 +02:00
ERROR $err_msg;
2015-05-15 16:06:36 +02:00
return undef;
}
2015-03-31 19:07:33 +02:00
if($incremental)
{
# create backup from latest common
2015-04-16 12:00:04 +02:00
if($parent) {
2016-04-19 19:36:58 +02:00
INFO "Creating incremental backup...";
2015-03-31 19:07:33 +02:00
}
elsif($incremental ne "strict") {
2016-04-19 19:36:58 +02:00
INFO "No common parent subvolume present, creating full backup...";
2015-03-31 19:07:33 +02:00
}
else {
2016-04-19 19:36:58 +02:00
WARN "Backup to $target->{PRINT} failed: no common parent subvolume found for \"$source->{PRINT}\", and option \"incremental\" is set to \"strict\"";
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "No common parent subvolume found, and option \"incremental\" is set to \"strict\"");
2015-03-31 19:07:33 +02:00
return undef;
}
2019-04-09 22:09:12 +02:00
# add extra clone_src if "incremental_clones" is set
my $ic = config_key($target, "incremental_clones");
push @clone_src, map { --$ic < 0 ? () : $_ } @$clone_src_extra if($ic);
2015-03-31 19:07:33 +02:00
}
else {
2016-04-19 19:36:58 +02:00
INFO "Creating full backup...";
2015-09-26 19:51:38 +02:00
$parent = undef;
2019-04-09 22:09:12 +02:00
@clone_src = ();
2015-03-31 19:07:33 +02:00
delete $info{parent};
}
2015-06-02 22:16:33 +02:00
my $ret;
my $vol_received;
2017-06-16 17:43:17 +02:00
my $raw_info;
2015-06-02 22:16:33 +02:00
if($target_type eq "send-receive")
{
2019-04-09 22:09:12 +02:00
$ret = btrfs_send_receive($source, $target, $parent, \@clone_src, \$vol_received);
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "Failed to send/receive subvolume") unless($ret);
2015-06-02 22:16:33 +02:00
}
elsif($target_type eq "raw")
{
unless($dryrun) {
2016-03-01 21:49:59 +01:00
# make sure we know the source uuid
2016-04-13 22:04:53 +02:00
if($source->{node}{uuid} =~ /^$fake_uuid_prefix/) {
2016-03-01 21:49:59 +01:00
DEBUG "Fetching uuid of new subvolume: $source->{PRINT}";
2016-03-14 15:55:57 +01:00
my $detail = btrfs_subvolume_show($source);
2015-06-02 22:16:33 +02:00
die unless($detail->{uuid});
2016-03-15 14:46:25 +01:00
$source->{node}{uuid} = $detail->{uuid};
2016-04-15 02:38:41 +02:00
$uuid_cache{$detail->{uuid}} = $source->{node};
2015-06-02 22:16:33 +02:00
}
2015-05-09 16:00:41 +02:00
}
2017-06-16 17:43:17 +02:00
$ret = btrfs_send_to_file($source, $target, $parent, \$vol_received, \$raw_info);
2015-10-12 22:56:52 +02:00
ABORTED($config_target, "Failed to send subvolume to raw file") unless($ret);
2015-06-02 22:16:33 +02:00
}
else
{
die "Illegal target type \"$target_type\"";
}
2016-04-12 17:50:12 +02:00
# inject fake vinfo
2016-08-19 16:33:30 +02:00
# NOTE: it's not possible to add (and compare) correct target $detail
# from btrfs_send_receive(), as source detail also has fake uuid.
2016-04-22 20:25:30 +02:00
if($ret) {
vinfo_inject_child($target, $vol_received, {
# NOTE: this is not necessarily the correct parent_uuid (on
# receive, btrfs-progs picks the uuid of the first (lowest id)
# matching possible parent), whereas the target_parent is the
2019-04-04 15:55:17 +02:00
# first from _correlated_nodes().
2016-04-22 20:25:30 +02:00
#
# NOTE: the parent_uuid of an injected receive target is not used
# anywhere in btrbk at the time of writing
2018-02-15 17:42:41 +01:00
parent_uuid => $parent ? $info{target_parent_node}->{uuid} : '-',
2016-04-22 20:25:30 +02:00
received_uuid => $source->{node}{received_uuid} eq '-' ? $source->{node}{uuid} : $source->{node}{received_uuid},
readonly => 1,
TARGET_TYPE => $target_type,
FORCE_PRESERVE => 'preserve forced: created just now',
2017-06-16 17:43:17 +02:00
}, $raw_info);
2016-04-22 20:25:30 +02:00
}
2016-04-12 17:50:12 +02:00
2015-06-02 22:16:33 +02:00
# add info to $config->{SUBVOL_RECEIVED}
$info{received_type} = $target_type || die;
$info{received_subvolume} = $vol_received || die;
2016-03-07 21:45:12 +01:00
$target->{SUBVOL_RECEIVED} //= [];
push(@{$target->{SUBVOL_RECEIVED}}, \%info);
2015-06-02 22:16:33 +02:00
unless($ret) {
$info{ERROR} = 1;
2015-03-31 20:36:10 +02:00
return undef;
2015-03-31 19:07:33 +02:00
}
2015-06-02 22:16:33 +02:00
return 1;
2015-03-31 19:07:33 +02:00
}
2016-03-08 15:25:35 +01:00
# sets $result_vinfo->{CONFIG}->{ABORTED} on failure
# sets $result_vinfo->{SUBVOL_DELETED}
2016-04-03 20:46:29 +02:00
sub macro_delete($$$$$;@)
2016-03-02 00:03:54 +01:00
{
my $root_subvol = shift || die;
2016-04-03 20:46:29 +02:00
my $subvol_dir = shift // die;
my $subvol_basename = shift // die;
2016-03-08 15:25:35 +01:00
my $result_vinfo = shift || die;
2016-03-02 00:03:54 +01:00
my $schedule_options = shift || die;
2016-03-08 15:25:35 +01:00
my %delete_options = @_;
2016-04-03 20:46:29 +02:00
$subvol_dir =~ s/\/+$//;
2016-03-02 00:03:54 +01:00
my @schedule;
2016-03-10 05:26:43 +01:00
foreach my $vol (@{vinfo_subvol_list($root_subvol)}) {
2016-04-19 13:06:31 +02:00
unless($vol->{node}{BTRBK_DATE} &&
2016-04-03 20:46:29 +02:00
($vol->{SUBVOL_DIR} eq $subvol_dir) &&
2016-04-19 13:06:31 +02:00
($vol->{node}{BTRBK_BASENAME} eq $subvol_basename)) {
2016-03-02 00:03:54 +01:00
TRACE "Target subvolume does not match btrbk filename scheme, skipping: $vol->{PRINT}";
next;
}
push(@schedule, { value => $vol,
2016-03-08 18:22:58 +01:00
# name => $vol->{PRINT}, # only for logging
2016-04-19 13:06:31 +02:00
btrbk_date => $vol->{node}{BTRBK_DATE},
2016-04-12 17:50:12 +02:00
preserve => $vol->{node}{FORCE_PRESERVE},
2016-03-02 00:03:54 +01:00
});
}
my (undef, $delete) = schedule(
2016-04-22 20:51:31 +02:00
%$schedule_options,
2016-03-08 18:22:58 +01:00
schedule => \@schedule,
2016-04-22 20:51:31 +02:00
preserve_date_in_future => 1,
2016-03-02 00:03:54 +01:00
);
2017-09-27 20:23:08 +02:00
2017-10-02 14:00:09 +02:00
if($delete_options{qgroup}->{destroy}) {
# NOTE: we do not abort on qgroup destroy errors
btrfs_qgroup_destroy($_, %{$delete_options{qgroup}}) foreach(@$delete);
}
2017-09-27 20:23:08 +02:00
my @delete_success = btrfs_subvolume_delete($delete, %delete_options);
$subvol_dir .= '/' if($subvol_dir ne "");
INFO "Deleted " . scalar(@delete_success) . " subvolumes in: $root_subvol->{PRINT}/$subvol_dir$subvol_basename.*";
$result_vinfo->{SUBVOL_DELETED} //= [];
push @{$result_vinfo->{SUBVOL_DELETED}}, @delete_success;
if(scalar(@delete_success) == scalar(@$delete)) {
return 1;
2016-03-02 00:03:54 +01:00
}
else {
2016-03-08 15:25:35 +01:00
ABORTED($result_vinfo, "Failed to delete subvolume");
2016-03-02 00:03:54 +01:00
return undef;
}
}
2016-04-16 01:09:17 +02:00
sub macro_archive_target($$$;$)
2016-04-07 15:33:32 +02:00
{
my $sroot = shift || die;
my $droot = shift || die;
my $snapshot_name = shift // die;
my $schedule_options = shift // {};
my @schedule;
# NOTE: this is pretty much the same as "resume missing"
2018-02-15 17:42:41 +01:00
my $has_unexpected_location = 0;
2018-02-15 16:53:29 +01:00
foreach my $svol (@{vinfo_subvol_list($sroot, readonly => 1, btrbk_direct_leaf => $snapshot_name, sort => 'path')})
2016-04-07 15:33:32 +02:00
{
2018-05-14 23:43:13 +02:00
next if(get_receive_targets($droot, $svol, exact => 1, warn => 1, ret_unexpected => \$has_unexpected_location));
2019-08-04 21:37:17 +02:00
if(my $ff = vinfo_match(\@exclude_vf, $svol)) {
INFO "Skipping archive candidate \"$svol->{PRINT}\": Match on exclude pattern \"$ff->{unparsed}\"";
next;
}
2016-04-15 22:33:19 +02:00
DEBUG "Adding archive candidate: $svol->{PRINT}";
2016-04-07 15:33:32 +02:00
push @schedule, { value => $svol,
2016-04-19 13:06:31 +02:00
btrbk_date => $svol->{node}{BTRBK_DATE},
2016-04-15 22:33:19 +02:00
preserve => $svol->{node}{FORCE_PRESERVE},
2016-04-07 15:33:32 +02:00
};
}
2016-04-16 01:32:25 +02:00
2018-02-15 17:42:41 +01:00
if($has_unexpected_location) {
ABORTED($droot, "Receive targets of archive candidates exist at unexpected location");
2019-04-17 15:20:18 +02:00
WARN "Skipping archiving of \"$sroot->{PRINT}/${snapshot_name}.*\": " . ABORTED_TEXT($droot);
2018-02-15 17:42:41 +01:00
return undef;
}
2016-04-07 15:33:32 +02:00
2016-04-16 19:25:46 +02:00
# add all present archives as informative_only: these are needed for correct results of schedule()
2019-08-04 14:52:00 +02:00
my $last_dvol_date;
2018-02-15 16:53:29 +01:00
foreach my $dvol (@{vinfo_subvol_list($droot, readonly => 1, btrbk_direct_leaf => $snapshot_name)})
2016-04-15 22:33:19 +02:00
{
2019-08-04 14:52:00 +02:00
my $btrbk_date = $dvol->{node}{BTRBK_DATE};
2016-04-15 22:33:19 +02:00
push @schedule, { informative_only => 1,
value => $dvol,
2019-08-04 14:52:00 +02:00
btrbk_date => $btrbk_date,
2016-04-15 22:33:19 +02:00
};
2019-08-04 14:52:00 +02:00
# find last present archive (by btrbk_date, needed for archive_exclude_older below)
$last_dvol_date = $btrbk_date if((not defined($last_dvol_date)) || (cmp_date($btrbk_date, $last_dvol_date) > 0));
2016-04-15 22:33:19 +02:00
}
2016-04-07 15:33:32 +02:00
my ($preserve, undef) = schedule(
schedule => \@schedule,
preserve => config_preserve_hash($droot, "archive"),
2019-08-04 14:52:00 +02:00
preserve_threshold_date => (config_key($droot, "archive_exclude_older") ? $last_dvol_date : undef),
2016-04-07 15:33:32 +02:00
result_preserve_action_text => 'archive',
result_delete_action_text => '',
%$schedule_options
);
my @archive = grep defined, @$preserve; # remove entries with no value from list (archive subvolumes)
my $archive_total = scalar @archive;
my $archive_success = 0;
foreach my $svol (@archive)
{
2019-04-09 22:09:12 +02:00
my ($clone_src, $clone_src_extra, $target_parent_node);
my $parent = get_best_parent($svol, $sroot, $droot,
2019-04-09 22:15:18 +02:00
strict_related => 0,
2019-04-09 22:09:12 +02:00
clone_src => \$clone_src,
clone_src_extra => \$clone_src_extra,
target_parent_node => \$target_parent_node);
2016-04-07 15:33:32 +02:00
if(macro_send_receive(source => $svol,
target => $droot,
2018-02-15 17:42:41 +01:00
parent => $parent, # this is <undef> if no suitable parent found
2019-04-09 22:09:12 +02:00
clone_src => $clone_src,
clone_src_extra => $clone_src_extra,
2018-02-15 17:42:41 +01:00
target_parent_node => $target_parent_node,
2016-04-07 15:33:32 +02:00
))
{
$archive_success++;
}
else {
2019-04-18 16:28:31 +02:00
ERROR("Error while archiving subvolumes, aborting");
2016-04-07 15:33:32 +02:00
last;
}
}
if($archive_total) {
INFO "Archived $archive_success/$archive_total subvolumes";
} else {
2016-04-19 19:36:58 +02:00
INFO "No missing archives found";
2016-04-07 15:33:32 +02:00
}
return $archive_success;
}
2016-04-05 22:01:17 +02:00
sub cmp_date($$)
{
2016-04-20 22:45:11 +02:00
return (($_[0]->[0] <=> $_[1]->[0]) || # unix time
($_[0]->[1] <=> $_[1]->[1])); # NN
2016-04-05 22:01:17 +02:00
}
2015-04-02 15:53:53 +02:00
sub schedule(@)
2015-01-04 21:26:48 +01:00
{
my %args = @_;
2016-04-12 11:47:28 +02:00
my $schedule = $args{schedule} || die;
my $preserve = $args{preserve} || die;
2016-04-22 20:51:31 +02:00
my $preserve_date_in_future = $args{preserve_date_in_future};
2019-08-04 14:52:00 +02:00
my $preserve_threshold_date = $args{preserve_threshold_date};
2016-04-12 11:47:28 +02:00
my $results_list = $args{results};
my $result_hints = $args{result_hints} // {};
2016-04-14 15:39:50 +02:00
my $result_preserve_action_text = $args{result_preserve_action_text};
my $result_delete_action_text = $args{result_delete_action_text} // 'delete';
2016-04-12 11:47:28 +02:00
my $preserve_day_of_week = $preserve->{dow} || die;
2018-01-05 19:28:10 +01:00
my $preserve_hour_of_day = $preserve->{hod} // die;
2016-04-12 19:55:29 +02:00
my $preserve_min_n = $preserve->{min_n};
my $preserve_min_q = $preserve->{min_q};
2016-04-12 11:47:28 +02:00
my $preserve_hourly = $preserve->{h};
my $preserve_daily = $preserve->{d};
my $preserve_weekly = $preserve->{w};
my $preserve_monthly = $preserve->{m};
my $preserve_yearly = $preserve->{y};
DEBUG "Schedule: " . format_preserve_matrix($preserve, format => "debug_text");
2015-01-13 12:38:01 +01:00
2016-04-20 22:45:11 +02:00
# 0 1 2 3 4 5 6 7 8
# sec, min, hour, mday, mon, year, wday, yday, isdst
2015-04-02 16:24:13 +02:00
# sort the schedule, ascending by date
2016-04-15 22:33:19 +02:00
# regular entries come in front of informative_only
my @sorted_schedule = sort { cmp_date($a->{btrbk_date}, $b->{btrbk_date} ) ||
(($a->{informative_only} ? ($b->{informative_only} ? 0 : 1) : ($b->{informative_only} ? -1 : 0)))
} @$schedule;
2015-04-02 16:24:13 +02:00
2016-04-21 13:27:54 +02:00
DEBUG "Scheduler reference time: " . timestamp(\@tm_now, 'debug-iso');
2015-01-25 18:05:52 +01:00
# first, do our calendar calculations
2018-04-05 00:17:12 +02:00
# - days start on $preserve_hour_of_day (or 00:00 if timestamp_format=short)
2016-04-20 22:45:11 +02:00
# - weeks start on $preserve_day_of_week
2018-04-05 16:42:26 +02:00
# - months start on first $preserve_day_of_week of month
# - years start on first $preserve_day_of_week of year
# NOTE: leap hours are NOT taken into account for $delta_hours
2016-04-21 13:27:54 +02:00
my $now_h = timegm_nocheck( 0, 0, $tm_now[2], $tm_now[3], $tm_now[4], $tm_now[5] ); # use timelocal() here (and below) if you want to honor leap hours
2016-04-20 22:45:11 +02:00
2015-04-02 16:24:13 +02:00
foreach my $href (@sorted_schedule)
2015-01-13 12:38:01 +01:00
{
2018-04-05 00:17:12 +02:00
my @tm = localtime($href->{btrbk_date}->[0]);
my $has_exact_time = $href->{btrbk_date}->[2];
my $delta_hours_from_hod = $tm[2] - ($has_exact_time ? $preserve_hour_of_day : 0);
2016-04-21 13:27:54 +02:00
my $delta_days_from_eow = $tm[6] - $day_of_week_map{$preserve_day_of_week};
2018-01-05 19:28:10 +01:00
if($delta_hours_from_hod < 0) {
$delta_hours_from_hod += 24;
$delta_days_from_eow -= 1;
}
if($delta_days_from_eow < 0) {
$delta_days_from_eow += 7;
}
2018-04-05 16:42:26 +02:00
my $month_corr = $tm[4]; # [0..11]
my $year_corr = $tm[5];
if($tm[3] <= $delta_days_from_eow) {
# our month/year start on first $preserve_day_of_week, corrected value
$month_corr -= 1;
if($month_corr < 0) {
$month_corr = 11;
$year_corr -= 1;
}
}
2016-04-20 22:45:11 +02:00
2016-04-21 13:27:54 +02:00
# check timegm: ignores leap hours
my $delta_hours = int(($now_h - timegm_nocheck( 0, 0, $tm[2], $tm[3], $tm[4], $tm[5] ) ) / (60 * 60));
2018-01-05 19:28:10 +01:00
my $delta_days = int(($delta_hours + $delta_hours_from_hod) / 24); # days from beginning of day
2016-04-20 22:45:11 +02:00
my $delta_weeks = int(($delta_days + $delta_days_from_eow) / 7); # weeks from beginning of week
2018-04-05 16:42:26 +02:00
my $delta_years = ($tm_now[5] - $year_corr);
my $delta_months = $delta_years * 12 + ($tm_now[4] - $month_corr);
2016-04-20 22:45:11 +02:00
$href->{delta_hours} = $delta_hours;
$href->{delta_days} = $delta_days;
$href->{delta_weeks} = $delta_weeks;
$href->{delta_months} = $delta_months;
$href->{delta_years} = $delta_years;
2018-01-14 21:47:25 +01:00
# these are only needed for text output (format_preserve_delta)
2018-04-05 16:42:26 +02:00
$href->{year} = $year_corr + 1900;
$href->{month} = $month_corr + 1;
2018-01-14 21:47:25 +01:00
$href->{delta_hours_from_hod} = $delta_hours_from_hod;
$href->{delta_days_from_eow} = $delta_days_from_eow;
2018-04-05 00:17:12 +02:00
$href->{real_hod} = $preserve_hour_of_day if($has_exact_time);
2016-04-22 20:51:31 +02:00
2018-01-14 21:47:25 +01:00
if($preserve_date_in_future && ($delta_hours < 0)) {
$href->{preserve} = "preserve forced: " . -($delta_hours) . " hours in the future";
2016-04-22 20:51:31 +02:00
}
2015-01-04 21:26:48 +01:00
}
2016-04-11 19:54:56 +02:00
my %first_in_delta_hours;
my %first_in_delta_days;
2015-01-25 18:05:52 +01:00
my %first_in_delta_weeks;
2016-04-11 19:54:56 +02:00
my %first_weekly_in_delta_months;
my %first_monthly_in_delta_years;
2016-04-12 11:47:28 +02:00
# filter "preserve all within N days/weeks/..."
2015-04-02 16:24:13 +02:00
foreach my $href (@sorted_schedule) {
2016-04-12 20:35:57 +02:00
if($preserve_min_q) {
if($preserve_min_q eq 'all') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: all";
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'h') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_hours} hours ago" if($href->{delta_hours} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'd') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_days} days ago" if($href->{delta_days} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'w') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_weeks} weeks ago" if($href->{delta_weeks} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'm') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_months} months ago" if($href->{delta_months} <= $preserve_min_n);
2016-04-12 19:55:29 +02:00
} elsif($preserve_min_q eq 'y') {
2016-04-12 21:06:46 +02:00
$href->{preserve} = "preserve min: $href->{delta_years} years ago" if($href->{delta_years} <= $preserve_min_n);
2016-04-12 11:47:28 +02:00
}
}
2016-04-11 19:54:56 +02:00
$first_in_delta_hours{$href->{delta_hours}} //= $href;
2016-04-12 11:47:28 +02:00
}
2016-04-12 20:35:57 +02:00
if($preserve_min_q && ($preserve_min_q eq 'latest') && (scalar @sorted_schedule)) {
my $href = $sorted_schedule[-1];
2016-04-12 21:06:46 +02:00
$href->{preserve} = 'preserve min: latest';
2016-04-12 20:35:57 +02:00
}
2016-04-12 11:47:28 +02:00
# filter hourly, daily, weekly, monthly, yearly
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_in_delta_hours) {
my $href = $first_in_delta_hours{$_} || die;
2016-04-12 21:06:46 +02:00
if($preserve_hourly && (($preserve_hourly eq 'all') || ($href->{delta_hours} <= $preserve_hourly))) {
$href->{preserve} = "preserve hourly: first of hour, $href->{delta_hours} hours ago";
2016-04-12 11:47:28 +02:00
}
2016-04-11 19:54:56 +02:00
$first_in_delta_days{$href->{delta_days}} //= $href;
2016-04-12 11:47:28 +02:00
}
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_in_delta_days) {
my $href = $first_in_delta_days{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_daily && (($preserve_daily eq 'all') || ($href->{delta_days} <= $preserve_daily))) {
2018-04-05 00:17:12 +02:00
$href->{preserve} = "preserve daily: first of day" . ($href->{real_hod} ? sprintf(" (starting at %02u:00)", $href->{real_hod}) : "") . ", $href->{delta_days} days ago"
. (defined($href->{real_hod}) ? ($href->{delta_hours_from_hod} ? ", $href->{delta_hours_from_hod}h after " : ", at ") . sprintf("%02u:00", $href->{real_hod}) : "");
2015-01-25 18:05:52 +01:00
}
$first_in_delta_weeks{$href->{delta_weeks}} //= $href;
}
2015-12-17 19:00:45 +01:00
foreach (sort {$b <=> $a} keys %first_in_delta_weeks) {
2015-01-20 16:53:35 +01:00
my $href = $first_in_delta_weeks{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_weekly && (($preserve_weekly eq 'all') || ($href->{delta_weeks} <= $preserve_weekly))) {
2018-04-05 00:17:12 +02:00
$href->{preserve} = "preserve weekly: $href->{delta_weeks} weeks ago," . _format_preserve_delta($href, $preserve_day_of_week);
2015-01-04 21:26:48 +01:00
}
2016-04-11 19:54:56 +02:00
$first_weekly_in_delta_months{$href->{delta_months}} //= $href;
2015-01-04 21:26:48 +01:00
}
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_weekly_in_delta_months) {
my $href = $first_weekly_in_delta_months{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_monthly && (($preserve_monthly eq 'all') || ($href->{delta_months} <= $preserve_monthly))) {
2018-04-05 00:17:12 +02:00
$href->{preserve} = "preserve monthly: first weekly of month $href->{year}-" . sprintf("%02u", $href->{month}) . " ($href->{delta_months} months ago," . _format_preserve_delta($href, $preserve_day_of_week) . ")";
2015-01-13 12:38:01 +01:00
}
2016-04-11 19:54:56 +02:00
$first_monthly_in_delta_years{$href->{delta_years}} //= $href;
2016-02-29 18:00:55 +01:00
}
2016-04-11 19:54:56 +02:00
foreach (sort {$b <=> $a} keys %first_monthly_in_delta_years) {
my $href = $first_monthly_in_delta_years{$_} || die;
2016-04-12 11:47:28 +02:00
if($preserve_yearly && (($preserve_yearly eq 'all') || ($href->{delta_years} <= $preserve_yearly))) {
2018-04-05 00:17:12 +02:00
$href->{preserve} = "preserve yearly: first weekly of year $href->{year} ($href->{delta_years} years ago," . _format_preserve_delta($href, $preserve_day_of_week) . ")";
2016-02-29 18:00:55 +01:00
}
2015-01-04 21:26:48 +01:00
}
2015-01-25 18:05:52 +01:00
# assemble results
2015-01-13 12:38:01 +01:00
my @delete;
2015-04-02 15:53:53 +02:00
my @preserve;
2016-04-12 11:47:28 +02:00
my %result_base = ( %$preserve,
scheme => format_preserve_matrix($preserve),
2015-10-12 20:46:05 +02:00
%$result_hints,
);
2016-03-08 18:22:58 +01:00
my $count_defined = 0;
2015-04-02 16:24:13 +02:00
foreach my $href (@sorted_schedule)
2015-01-13 12:38:01 +01:00
{
2019-08-04 14:52:00 +02:00
my $result_reason_text = $href->{preserve};
my $result_action_text;
unless($href->{informative_only}) {
if($href->{preserve}) {
if($preserve_threshold_date && (cmp_date($href->{btrbk_date}, $preserve_threshold_date) <= 0)) {
# older than threshold, do not add to preserve list
2019-08-07 21:30:59 +02:00
$result_reason_text = "$result_reason_text, ignored (archive_exclude_older) older than existing archive";
2019-08-04 14:52:00 +02:00
}
else {
push(@preserve, $href->{value});
$result_action_text = $result_preserve_action_text;
}
}
else {
push(@delete, $href->{value});
$result_action_text = $result_delete_action_text;
}
$count_defined++;
2015-01-13 12:38:01 +01:00
}
2019-08-04 14:52:00 +02:00
TRACE join(" ", "schedule: $href->{value}{PRINT}", ($href->{informative_only} ? "(informative_only)" : uc($result_action_text || "-")), ($result_reason_text // "-")) if(($loglevel >= 4) && $href->{value} && $href->{value}{PRINT});
push @$results_list, { %result_base,
action => $result_action_text,
reason => $result_reason_text,
value => $href->{value},
} if($results_list);
2015-01-13 12:38:01 +01:00
}
2016-03-08 18:22:58 +01:00
DEBUG "Preserving " . @preserve . "/" . $count_defined . " items";
2015-04-02 15:53:53 +02:00
return (\@preserve, \@delete);
2015-01-04 21:26:48 +01:00
}
2018-01-14 21:47:25 +01:00
sub _format_preserve_delta($$$)
{
my $href = shift;
my $preserve_day_of_week = shift;
my $s = "";
$s .= " $href->{delta_days_from_eow}d" if($href->{delta_days_from_eow});
$s .= " $href->{delta_hours_from_hod}h" if($href->{delta_hours_from_hod});
2018-04-05 00:17:12 +02:00
return ($s ? "$s after " : " at ") . $preserve_day_of_week . (defined($href->{real_hod}) ? sprintf(" %02u:00", $href->{real_hod}) : "");
2018-01-14 21:47:25 +01:00
}
2016-04-12 11:47:28 +02:00
sub format_preserve_matrix($@)
2015-10-11 19:01:59 +02:00
{
2016-04-12 11:47:28 +02:00
my $preserve = shift || die;
my %opts = @_;
my $format = $opts{format} // "short";
2016-03-08 18:22:58 +01:00
if($format eq "debug_text") {
2016-04-12 11:47:28 +02:00
my @out;
my %trans = ( h => 'hours', d => 'days', w => 'weeks', m => 'months', y => 'years' );
2016-04-12 20:35:57 +02:00
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
2016-04-12 21:06:46 +02:00
push @out, "all forever";
2016-04-12 11:47:28 +02:00
}
else {
2016-04-12 21:06:46 +02:00
push @out, "latest" if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
push @out, "all within $preserve->{min_n} $trans{$preserve->{min_q}}" if($preserve->{min_n} && $preserve->{min_q});
2018-01-05 19:28:10 +01:00
push @out, "first of day (starting at " . sprintf("%02u:00", $preserve->{hod}) . ") for $preserve->{d} days" if($preserve->{d});
2016-04-12 11:47:28 +02:00
unless($preserve->{d} && ($preserve->{d} eq 'all')) {
push @out, "first daily in week (starting on $preserve->{dow}) for $preserve->{w} weeks" if($preserve->{w});
unless($preserve->{w} && ($preserve->{w} eq 'all')) {
2016-04-11 19:54:56 +02:00
push @out, "first weekly of month for $preserve->{m} months" if($preserve->{m});
2016-04-12 11:47:28 +02:00
unless($preserve->{m} && ($preserve->{m} eq 'all')) {
2016-04-11 19:54:56 +02:00
push @out, "first weekly of year for $preserve->{y} years" if($preserve->{y});
2016-04-12 11:47:28 +02:00
}
2016-03-08 18:22:58 +01:00
}
}
}
2016-04-12 21:06:46 +02:00
return 'preserving ' . join('; ', @out);
2016-03-08 18:22:58 +01:00
}
2016-04-12 11:47:28 +02:00
my $s = "";
2016-04-12 20:35:57 +02:00
if($preserve->{min_q} && ($preserve->{min_q} eq 'all')) {
2016-04-12 11:47:28 +02:00
$s = '*d+';
2015-10-11 19:01:59 +02:00
}
2016-04-12 11:47:28 +02:00
else {
2016-04-12 21:06:46 +02:00
# $s .= '.+' if($preserve->{min_q} && ($preserve->{min_q} eq 'latest'));
2016-04-12 19:55:29 +02:00
$s .= $preserve->{min_n} . $preserve->{min_q} . '+' if($preserve->{min_n} && $preserve->{min_q});
2016-04-12 11:47:28 +02:00
foreach (qw(h d w m y)) {
my $val = $preserve->{$_} // 0;
next unless($val);
$val = '*' if($val eq 'all');
$s .= ($s ? ' ' : '') . $val . $_;
}
2018-12-05 21:43:54 +01:00
if(($format ne "config") && ($preserve->{d} || $preserve->{w} || $preserve->{m} || $preserve->{y})) {
2018-01-05 19:28:10 +01:00
$s .= " ($preserve->{dow}, " . sprintf("%02u:00", $preserve->{hod}) . ")";
}
2016-04-12 11:47:28 +02:00
}
return $s;
2015-10-11 19:01:59 +02:00
}
2016-04-21 13:27:54 +02:00
sub timestamp($$;$)
{
my $time = shift // die; # unixtime, or arrayref from localtime()
my $format = shift;
my $tm_is_utc = shift;
my @tm = ref($time) ? @$time : localtime($time);
my $ts;
# NOTE: can't use POSIX::strftime(), as "%z" always prints offset of local timezone!
if($format eq "short") {
return sprintf('%04u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3]);
}
elsif($format eq "long") {
return sprintf('%04u%02u%02uT%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1]);
}
elsif($format eq "long-iso") {
$ts = sprintf('%04u%02u%02uT%02u%02u%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
}
elsif($format eq "debug-iso") {
$ts = sprintf('%04u-%02u-%02uT%02u:%02u:%02u', $tm[5] + 1900, $tm[4] + 1, $tm[3], $tm[2], $tm[1], $tm[0]);
}
else { die; }
if($tm_is_utc) {
$ts .= '+0000'; # or 'Z'
} else {
my $offset = timegm(@tm) - timelocal(@tm);
if($offset < 0) { $ts .= '-'; $offset = -$offset; } else { $ts .= '+'; }
2016-04-23 14:37:24 +02:00
my $h = int($offset / (60 * 60));
die if($h > 24); # sanity check, something went really wrong
$ts .= sprintf('%02u%02u', $h, int($offset / 60) % 60);
2016-04-21 13:27:54 +02:00
}
return $ts;
return undef;
}
2015-09-20 14:25:20 +02:00
sub print_header(@)
{
2015-05-26 20:05:40 +02:00
my %args = @_;
my $config = $args{config};
print "--------------------------------------------------------------------------------\n";
2016-04-15 01:22:19 +02:00
print "$args{title} ($VERSION_INFO)\n\n";
2015-05-26 20:05:40 +02:00
if($args{time}) {
print " Date: " . localtime($args{time}) . "\n";
}
if($config) {
2018-02-13 18:40:35 +01:00
print " Config: " . config_key($config, "SRC_FILE") . "\n";
2015-10-10 15:13:32 +02:00
}
if($dryrun) {
print " Dryrun: YES\n";
}
if($config && $config->{CMDLINE_FILTER_LIST}) {
2019-04-17 15:56:35 +02:00
my @list = @{$config->{CMDLINE_FILTER_LIST}};
2015-10-10 15:13:32 +02:00
print " Filter: ";
2019-04-17 15:56:35 +02:00
print join("\n ", @list);
2015-10-10 15:13:32 +02:00
print "\n";
2015-05-26 20:05:40 +02:00
}
if($args{info}) {
print "\n" . join("\n", grep(defined, @{$args{info}})) . "\n";
}
2017-08-21 13:23:20 +02:00
if($args{options} && (scalar @{$args{options}})) {
print "\nOptions:\n ";
print join("\n ", @{$args{options}});
print "\n";
}
2015-05-26 20:05:40 +02:00
if($args{legend}) {
print "\nLegend:\n ";
print join("\n ", @{$args{legend}});
print "\n";
}
print "--------------------------------------------------------------------------------\n";
}
2019-07-15 18:19:33 +02:00
sub print_footer($$)
{
my $config = shift;
my $exit_status = shift;
if($exit_status) {
print "\nNOTE: Some errors occurred, which may result in missing backups!\n";
print "Please check warning and error messages above.\n";
my @fix_manually_text = _config_collect_values($config, "FIX_MANUALLY");
if(scalar(@fix_manually_text)) {
my @unique = do { my %seen; grep { !$seen{$_}++ } @fix_manually_text };
print join("\n", @unique) . "\n";
}
}
if($dryrun) {
print "\nNOTE: Dryrun was active, none of the operations above were actually executed!\n";
}
}
2016-01-15 02:06:03 +01:00
sub print_table($;$)
{
my $data = shift;
my $spacing = shift // " ";
my $maxlen = 0;
foreach (@$data) {
$maxlen = length($_->[0]) if($maxlen < length($_->[0]));
}
foreach (@$data) {
print $_->[0] . ((' ' x ($maxlen - length($_->[0]))) . $spacing) . $_->[1] . "\n";
}
}
2015-10-11 19:01:59 +02:00
sub print_formatted(@)
2015-10-11 01:44:13 +02:00
{
2015-10-13 01:10:06 +02:00
my $format_key = shift || die;
my $data = shift || die;
my $default_format = "table";
2015-10-11 19:01:59 +02:00
my %args = @_;
2015-10-12 20:46:05 +02:00
my $title = $args{title};
2015-10-13 01:10:06 +02:00
my $format = $args{output_format} || $output_format || $default_format;
2019-04-01 03:24:00 +02:00
my $key_defs = $table_formats{$format_key}->{$format};
2015-10-19 22:10:08 +02:00
my $ralign = $table_formats{$format_key}->{RALIGN} // {};
2015-10-20 20:16:34 +02:00
my $fh = $args{outfile} // *STDOUT;
2015-10-19 22:10:08 +02:00
my $table_spacing = 2;
2015-10-11 01:44:13 +02:00
2019-04-01 03:24:00 +02:00
unless($key_defs) {
2015-10-13 01:10:06 +02:00
WARN "Unsupported output format \"$format\", defaulting to \"$default_format\" format.";
2019-04-01 03:24:00 +02:00
$key_defs = $table_formats{$format_key}->{$default_format} || die;
2015-10-13 01:10:06 +02:00
$format = $default_format;
2015-10-11 15:38:43 +02:00
}
2019-04-01 03:24:00 +02:00
my @keys;
my %print_row;
foreach (@$key_defs) {
2019-08-06 17:29:45 +02:00
my $kd = $_;
$print_row{$kd} = 1 unless($kd =~ s/^-//); # strip leading "-"
push @keys, $kd;
2019-04-01 03:24:00 +02:00
}
2015-10-11 15:38:43 +02:00
2015-10-11 19:01:59 +02:00
if($format eq "raw")
{
# output: key0="value0" key1="value1" ...
foreach my $row (@$data) {
2015-10-20 20:16:34 +02:00
print $fh "format=\"$format_key\" ";
2019-04-01 03:24:00 +02:00
print $fh join(' ', map { "$_=\"" . ($row->{$_} // "") . "\""; } @keys) . "\n";
2015-10-11 19:01:59 +02:00
}
}
2016-04-25 19:40:11 +02:00
elsif(($format eq "tlog") || ($format eq "syslog"))
2015-10-13 18:24:30 +02:00
{
# output: value0 value1, ...
2015-10-20 20:16:34 +02:00
unless($args{no_header}) {
2019-04-01 03:24:00 +02:00
print $fh join(' ', @keys) . "\n";
2015-10-20 20:16:34 +02:00
}
foreach my $row (@$data) {
2019-04-01 03:24:00 +02:00
my $line = join(' ', map { ((defined($row->{$_}) && ($_ eq "message")) ? '# ' : '') . ($row->{$_} // "-") } @keys);
2016-04-25 19:40:11 +02:00
if($format eq "syslog") { # dirty hack, ignore outfile on syslog format
syslog($line);
} else {
print $fh ($line . "\n");
}
2015-10-13 18:24:30 +02:00
}
}
2015-10-11 19:01:59 +02:00
else
2015-10-11 01:44:13 +02:00
{
2015-10-11 15:38:43 +02:00
# sanitize and calculate maxlen for each column
# NOTE: this is destructive on data!
2015-10-11 01:44:13 +02:00
my %maxlen;
2015-10-11 15:38:43 +02:00
my @sane_data;
2019-04-01 03:24:00 +02:00
foreach my $key (@keys) {
2015-10-11 15:38:43 +02:00
$maxlen{$key} = length($key); # initialize with size of key
}
2015-10-11 01:44:13 +02:00
foreach my $row (@$data) {
2019-04-01 03:24:00 +02:00
foreach my $key (@keys) {
2015-10-11 15:38:43 +02:00
my $val = $row->{$key};
if(ref $val eq "ARRAY") {
$val = join(',', @{$val});
}
2019-04-01 03:24:00 +02:00
$print_row{$key} = 1 if(defined($val));
2015-10-11 15:38:43 +02:00
$val //= "-";
$val = "-" if($val eq "");
$row->{$key} = $val; # write back the sanitized value
$maxlen{$key} = length($val) if($maxlen{$key} < length($val));
2015-10-11 01:44:13 +02:00
}
}
2018-10-31 14:06:14 +01:00
# print title
if($title) {
print $fh "$title\n";
}
2015-10-11 15:38:43 +02:00
# print keys (headings)
2016-04-15 21:42:38 +02:00
unless($args{no_header}) {
my $fill = 0;
2019-04-01 03:24:00 +02:00
foreach (@keys) {
next unless($print_row{$_});
2015-10-20 20:16:34 +02:00
print $fh ' ' x $fill;
2016-04-15 21:42:38 +02:00
$fill = $maxlen{$_} - length($_);
if($ralign->{$_}) {
print $fh ' ' x $fill;
$fill = 0;
}
print $fh $_;
$fill += $table_spacing;
2015-10-19 22:10:08 +02:00
}
2016-04-15 21:42:38 +02:00
print $fh "\n";
2019-04-01 03:24:00 +02:00
$fill = 0;
foreach (@keys) {
next unless($print_row{$_});
print $fh ' ' x $fill;
print $fh '-' x $maxlen{$_};
$fill = $table_spacing;
}
# alternatively (all above in one line ;)
#print $fh join(' ' x $table_spacing, map { '-' x ($maxlen{$_}) } @keys) . "\n";
print $fh "\n";
2015-10-13 01:39:58 +02:00
}
2015-10-11 01:44:13 +02:00
# print values
foreach my $row (@$data) {
2015-10-19 22:10:08 +02:00
my $fill = 0;
2019-04-01 03:24:00 +02:00
foreach (@keys) {
next unless($print_row{$_});
2015-10-11 15:38:43 +02:00
my $val = $row->{$_};
2015-10-20 20:16:34 +02:00
print $fh ' ' x $fill;
2015-10-19 22:10:08 +02:00
$fill = $maxlen{$_} - length($val);
if($ralign->{$_}) {
2015-10-20 20:16:34 +02:00
print $fh ' ' x $fill;
2015-10-19 22:10:08 +02:00
$fill = 0;
}
2015-10-20 20:16:34 +02:00
print $fh $val;
2015-10-19 22:10:08 +02:00
$fill += $table_spacing;
2015-10-11 01:44:13 +02:00
}
2015-10-20 20:16:34 +02:00
print $fh "\n";
2015-10-11 01:44:13 +02:00
}
2018-10-31 14:33:07 +01:00
# print additional newline for paragraphs
if($args{paragraph}) {
print $fh "\n";
}
2015-10-11 01:44:13 +02:00
}
}
2016-03-15 11:21:59 +01:00
sub _origin_tree
2015-10-14 16:51:39 +02:00
{
2016-03-15 11:21:59 +01:00
my $prefix = shift;
2016-04-15 22:00:10 +02:00
my $node = shift // die;
2016-03-15 11:21:59 +01:00
my $lines = shift;
2016-04-15 22:00:10 +02:00
my $nodelist = shift;
my $depth = shift // 0;
my $seen = shift // [];
my $norecurse = shift;
my $uuid = $node->{uuid} || die;
# cache a bit, this might be large
2019-07-15 18:36:06 +02:00
# note: root subvolumes dont have REL_PATH
$nodelist //= [ (sort { ($a->{REL_PATH} // "") cmp ($b->{REL_PATH} // "") } values %uuid_cache) ];
2015-10-14 16:51:39 +02:00
2016-04-15 22:00:10 +02:00
my $out_path;
2018-02-14 22:09:45 +01:00
$out_path = _fs_path($node);
2016-04-15 22:00:10 +02:00
my $prefix_spaces = ' ' x (($depth * 4) - ($prefix ? 4 : 0));
push(@$lines, { tree => "${prefix_spaces}${prefix}$out_path",
uuid => $node->{uuid},
parent_uuid => $node->{parent_uuid},
received_uuid => $node->{received_uuid},
});
# handle deep recursion
return 0 if(grep /^$uuid$/, @$seen);
2015-10-14 16:51:39 +02:00
2016-03-15 11:21:59 +01:00
if($node->{parent_uuid} ne '-') {
2016-04-15 22:00:10 +02:00
my $parent_node = $uuid_cache{$node->{parent_uuid}};
if($parent_node) {
if($norecurse) {
push(@$lines,{ tree => "${prefix_spaces} ^-- ...",
uuid => $parent_node->{uuid},
parent_uuid => $parent_node->{parent_uuid},
received_uuid => $parent_node->{received_uuid},
recursion => 'stop_recursion',
});
return 0;
}
if($parent_node->{readonly}) {
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1, undef, 1); # end recursion
}
else {
_origin_tree("^-- ", $parent_node, $lines, $nodelist, $depth + 1);
}
}
else {
2019-07-15 18:36:12 +02:00
push(@$lines,{ tree => "${prefix_spaces} ^-- <unknown uuid=$node->{parent_uuid}>" });
2016-04-15 22:00:10 +02:00
}
2016-03-07 19:20:15 +01:00
}
2016-04-15 22:00:10 +02:00
return 0 if($norecurse);
push(@$seen, $uuid);
if($node->{received_uuid} ne '-') {
my $received_uuid = $node->{received_uuid};
my @receive_parents; # there should be only one!
my @receive_twins;
foreach (@$nodelist) {
next if($_->{uuid} eq $uuid);
if($received_uuid eq $_->{uuid} && $_->{readonly}) {
_origin_tree("", $_, \@receive_parents, $nodelist, $depth, $seen);
}
elsif(($_->{received_uuid} ne '-') && ($received_uuid eq $_->{received_uuid}) && $_->{readonly}) {
_origin_tree("", $_, \@receive_twins, $nodelist, $depth, $seen, 1); # end recursion
}
}
push @$lines, @receive_twins;
push @$lines, @receive_parents;
}
return 0;
2016-03-15 11:21:59 +01:00
}
2016-03-07 19:20:15 +01:00
2016-03-15 11:21:59 +01:00
sub exit_status
{
my $config = shift;
foreach my $subsection (@{$config->{SUBSECTION}}) {
2019-04-17 15:20:18 +02:00
return 10 if(IS_ABORTED($subsection, "abort_"));
2019-07-15 18:19:33 +02:00
return 10 if(defined($subsection->{FIX_MANUALLY})); # treated as errors
2016-03-15 11:21:59 +01:00
return 10 if(exit_status($subsection));
}
return 0;
2016-03-07 17:35:17 +01:00
}
2014-12-11 18:03:10 +01:00
MAIN:
{
2017-09-25 16:05:42 +02:00
# NOTE: Since v0.26.0, btrbk does not enable taint mode (perl -T) by
# default, and does not hardcode $PATH anymore.
#
# btrbk still does all taint checks, and can be run in taint mode.
# In order to enable taint mode, run `perl -T btrbk`.
#
# see: perlrun(1), perlsec(1)
#
my $taint_mode_enabled = eval '${^TAINT}';
if($taint_mode_enabled) {
# we are running in tainted mode (perl -T), sanitize %ENV
delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
# in taint mode, perl needs an untainted $PATH.
$ENV{PATH} = '/sbin:/bin:/usr/sbin:/usr/bin';
}
2015-05-18 21:18:57 +02:00
2015-08-15 17:51:00 +02:00
Getopt::Long::Configure qw(gnu_getopt);
2015-01-17 14:55:46 +01:00
my $start_time = time;
2016-04-21 13:27:54 +02:00
@tm_now = localtime($start_time);
2014-12-11 18:03:10 +01:00
2018-02-13 19:26:54 +01:00
my @config_override_cmdline;
2019-04-17 16:10:15 +02:00
my @exclude_cmdline;
2019-08-04 22:48:50 +02:00
my ($config_cmdline, $lockfile_cmdline, $print_schedule,
$preserve_snapshots, $preserve_backups, $wipe_snapshots, $skip_snapshots, $skip_backups,
$archive_raw,
$resume_only_DEPRECATED, # as of btrbk-v0.26.0
);
2015-08-15 17:51:00 +02:00
unless(GetOptions(
2017-08-21 13:23:20 +02:00
'help|h' => sub { VERSION_MESSAGE(); HELP_MESSAGE(0); exit 0; },
'version' => sub { VERSION_MESSAGE(); exit 0; },
'config|c=s' => \$config_cmdline,
'dry-run|n' => \$dryrun,
2019-04-17 16:10:15 +02:00
'exclude=s' => \@exclude_cmdline,
2018-05-10 18:58:17 +02:00
'preserve|p' => sub { $preserve_snapshots = "preserve", $preserve_backups = "preserve" },
'preserve-snapshots' => sub { $preserve_snapshots = "preserve-snapshots" },
'preserve-backups' => sub { $preserve_backups = "preserve-backups" },
2017-09-28 14:02:06 +02:00
'wipe' => \$wipe_snapshots,
2017-08-21 13:23:20 +02:00
'resume-only|r' => \$resume_only_DEPRECATED,
'quiet|q' => \$quiet,
2019-04-05 19:29:37 +02:00
'verbose|v' => sub { $loglevel = ($loglevel =~ /^[0-9]+$/) ? $loglevel+1 : 2; },
2017-08-21 13:23:20 +02:00
'loglevel|l=s' => \$loglevel,
'progress' => \$show_progress,
'table|t' => sub { $output_format = "table" },
2019-08-02 22:39:35 +02:00
'long|L' => sub { $output_format = "long" },
2017-08-21 13:23:20 +02:00
'format=s' => \$output_format,
2018-10-15 16:25:07 +02:00
'print-schedule|S' => \$print_schedule,
2017-08-21 13:23:20 +02:00
'lockfile=s' => \$lockfile_cmdline,
2018-02-13 19:26:54 +01:00
'override=s' => \@config_override_cmdline, # e.g. --override=incremental=no
2019-08-04 22:48:50 +02:00
'raw' => \$archive_raw,
2015-08-15 17:51:00 +02:00
))
{
2015-01-10 16:02:35 +01:00
VERSION_MESSAGE();
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-01-10 16:02:35 +01:00
}
2014-12-13 15:15:58 +01:00
my $command = shift @ARGV;
2015-08-15 17:51:00 +02:00
unless($command) {
VERSION_MESSAGE();
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-08-15 17:51:00 +02:00
}
2014-12-12 12:32:04 +01:00
# assign command line options
2016-04-18 16:40:49 +02:00
@config_src = ( $config_cmdline ) if($config_cmdline);
2014-12-13 19:34:03 +01:00
if (lc($loglevel) eq "warn") { $loglevel = 1; }
elsif(lc($loglevel) eq "info") { $loglevel = 2; }
elsif(lc($loglevel) eq "debug") { $loglevel = 3; }
elsif(lc($loglevel) eq "trace") { $loglevel = 4; }
2015-08-15 17:51:00 +02:00
elsif($loglevel =~ /^[0-9]+$/) { ; }
else { $loglevel = 1; }
2016-04-28 13:03:15 +02:00
require_data_dumper() if(($loglevel >= 4) || ($VERSION =~ /-dev$/));
2014-12-11 18:03:10 +01:00
2017-08-21 13:23:20 +02:00
# DEPRECATED options
if($resume_only_DEPRECATED) {
WARN "Found deprecated command line option \"-r, --resume-only\": Use \"btrbk resume --preserve\"";
2018-05-10 18:58:17 +02:00
$skip_snapshots = "resume-only";
$preserve_backups = "resume-only";
$preserve_snapshots = "resume-only";
2017-08-21 13:23:20 +02:00
}
2014-12-12 12:32:04 +01:00
# check command line options
2019-07-28 15:04:23 +02:00
if($show_progress && (not check_exe('mbuffer'))) {
WARN 'Found option "--progress", but required executable "mbuffer" does not exist on your system. Please install "mbuffer".';
2015-08-15 18:23:48 +02:00
$show_progress = 0;
}
2019-07-28 18:54:37 +02:00
my ($action_run, $action_usage, $action_resolve, $action_diff, $action_origin, $action_config_print, $action_list, $action_clean, $action_archive, $action_ls);
2015-09-02 11:04:22 +02:00
my @filter_args;
2019-04-17 15:11:49 +02:00
my @subvol_args;
2019-07-28 18:54:28 +02:00
my @dir_args;
2015-10-22 17:45:27 +02:00
my $args_expected_min = 0;
my $args_expected_max = 9999;
2019-07-28 18:54:34 +02:00
my $fallback_default_config;
2015-02-08 13:47:31 +01:00
if(($command eq "run") || ($command eq "dryrun")) {
$action_run = 1;
2014-12-13 15:15:58 +01:00
$dryrun = 1 if($command eq "dryrun");
2015-09-02 11:04:22 +02:00
@filter_args = @ARGV;
2014-12-13 15:15:58 +01:00
}
2017-08-21 13:23:20 +02:00
elsif($command eq "snapshot") {
$action_run = 1;
2018-05-10 18:58:17 +02:00
$skip_backups = "snapshot";
$preserve_backups = "snapshot";
2017-08-21 13:23:20 +02:00
@filter_args = @ARGV;
}
elsif($command eq "resume") {
$action_run = 1;
2018-05-10 18:58:17 +02:00
$skip_snapshots = "resume";
2017-08-21 13:23:20 +02:00
@filter_args = @ARGV;
}
2017-09-28 13:18:40 +02:00
elsif($command eq "prune") {
$action_run = 1;
2018-05-10 18:58:17 +02:00
$skip_snapshots = "prune";
$skip_backups = "prune";
2017-09-28 13:18:40 +02:00
@filter_args = @ARGV;
}
2016-01-14 15:52:33 +01:00
elsif ($command eq "clean") {
$action_clean = 1;
@filter_args = @ARGV;
}
2016-04-16 01:09:17 +02:00
elsif ($command eq "archive") {
$action_archive = 1;
2019-08-05 14:31:48 +02:00
$fallback_default_config = 1;
2016-04-07 15:33:32 +02:00
$args_expected_min = $args_expected_max = 2;
2019-04-17 15:11:49 +02:00
@subvol_args = @ARGV;
2016-04-07 15:33:32 +02:00
}
2015-10-19 22:10:08 +02:00
elsif ($command eq "usage") {
$action_usage = 1;
2015-09-02 11:04:22 +02:00
@filter_args = @ARGV;
2015-01-20 19:18:38 +01:00
}
2019-07-28 18:54:37 +02:00
elsif ($command eq "ls") {
$action_ls = 1;
$fallback_default_config = 1;
$args_expected_min = $args_expected_max = 1;
@dir_args = @ARGV;
}
2015-01-04 19:30:41 +01:00
elsif ($command eq "diff") {
2015-01-03 21:25:46 +01:00
$action_diff = 1;
2019-07-28 18:54:34 +02:00
$fallback_default_config = 1;
2015-03-01 14:28:26 +01:00
$args_expected_min = $args_expected_max = 2;
2019-04-17 15:11:49 +02:00
@subvol_args = @ARGV;
2014-12-14 21:29:22 +01:00
}
2015-01-26 17:31:18 +01:00
elsif ($command eq "origin") {
$action_origin = 1;
2015-03-01 14:28:26 +01:00
$args_expected_min = $args_expected_max = 1;
2019-04-17 15:11:49 +02:00
@subvol_args = @ARGV;
2015-01-26 17:31:18 +01:00
}
2015-10-11 02:02:45 +02:00
elsif($command eq "list") {
2015-10-22 17:45:27 +02:00
my $subcommand = shift @ARGV // "";
if(($subcommand eq "config") ||
($subcommand eq "volume") ||
($subcommand eq "source") ||
($subcommand eq "target"))
{
$action_list = $subcommand;
}
elsif(($subcommand eq "snapshots") ||
($subcommand eq "backups") ||
($subcommand eq "latest"))
{
$action_resolve = $subcommand;
}
else {
$action_list = "config";
unshift @ARGV, $subcommand if($subcommand ne "");
2015-10-12 14:59:02 +02:00
}
2015-10-11 02:02:45 +02:00
@filter_args = @ARGV;
}
2016-01-15 02:06:03 +01:00
elsif($command eq "stats") {
$action_resolve = "stats";
@filter_args = @ARGV;
}
2015-09-24 13:51:15 +02:00
elsif ($command eq "config") {
2015-10-10 21:26:59 +02:00
my $subcommand = shift @ARGV // "";
2015-10-22 17:45:27 +02:00
@filter_args = @ARGV;
2015-10-10 21:26:59 +02:00
if(($subcommand eq "print") || ($subcommand eq "print-all")) {
$action_config_print = $subcommand;
2015-10-22 17:45:27 +02:00
}
elsif($subcommand eq "list") {
$action_list = "config";
2015-10-10 21:26:59 +02:00
}
else {
ERROR "Unknown subcommand for \"config\" command: $subcommand";
2015-09-24 13:51:15 +02:00
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-09-24 13:51:15 +02:00
}
}
2014-12-13 15:15:58 +01:00
else {
ERROR "Unrecognized command: $command";
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2014-12-13 13:52:43 +01:00
}
2015-03-01 14:28:26 +01:00
if(($args_expected_min > scalar(@ARGV)) || ($args_expected_max < scalar(@ARGV))) {
2015-02-28 13:49:36 +01:00
ERROR "Incorrect number of arguments";
HELP_MESSAGE(0);
2015-09-30 14:00:39 +02:00
exit 2;
2015-02-28 13:49:36 +01:00
}
# input validation
2019-04-17 15:11:49 +02:00
foreach (@subvol_args) {
my ($url_prefix, $path) = check_url($_);
2019-04-17 15:56:35 +02:00
unless(defined($path)) {
ERROR "Bad argument: not a subvolume declaration: $_";
HELP_MESSAGE(0);
exit 2;
2019-04-17 15:11:49 +02:00
}
2019-04-17 15:56:35 +02:00
$_ = $url_prefix . $path;
}
2019-07-28 18:54:28 +02:00
foreach (@dir_args) {
# map relative path to absolute
my $path = $_;
if(-d $path) {
$path = `readlink -e -q '$path'`;
}
$path = check_file($path, { absolute => 1 });
unless($path) {
ERROR "Bad argument: not a directory: $_";
HELP_MESSAGE(0);
exit 2;
}
$_ = $path;
}
2019-04-17 15:56:35 +02:00
my @filter_vf;
foreach (@filter_args) {
my $vf = vinfo_filter_statement($_);
unless($vf) {
ERROR "Bad argument: invalid filter statement: $_";
HELP_MESSAGE(0);
exit 2;
}
push @filter_vf, $vf;
2015-02-28 13:49:36 +01:00
}
2019-04-17 16:10:15 +02:00
foreach (@exclude_cmdline) {
my $vf = vinfo_filter_statement($_);
unless($vf) {
ERROR "Bad argument: invalid filter statement: --exclude='$_'";
HELP_MESSAGE(0);
exit 2;
}
push @exclude_vf, $vf;
}
2018-02-13 19:26:54 +01:00
foreach(@config_override_cmdline) {
if(/(.*?)=(.*)/) {
my $key = $1;
my $value = $2;
DEBUG "config_override: \"$key=$value\"";
2019-04-30 13:38:47 +02:00
unless(append_config_option(\%config_override, $key, $value, "OVERRIDE", error_statement => "in option \"--override\"")) {
2018-02-13 19:26:54 +01:00
HELP_MESSAGE(0);
exit 2;
}
}
else {
ERROR "Option \"override\" requires \"<config_option>=<value>\" format";
2015-10-23 14:43:36 +02:00
HELP_MESSAGE(0);
exit 2;
}
}
2016-06-07 16:17:02 +02:00
if(defined($lockfile_cmdline)) {
if($lockfile_cmdline =~ /^($file_match)$/) {
$lockfile = $1; # untaint argument
} else {
ERROR "Option \"--lockfile\" is not a valid file name: \"$lockfile_cmdline\"";
HELP_MESSAGE(0);
exit 2;
}
}
2015-02-28 13:49:36 +01:00
2014-12-13 13:52:43 +01:00
2016-04-15 01:22:19 +02:00
INFO "$VERSION_INFO (" . localtime($start_time) . ")";
2019-04-18 16:28:53 +02:00
action("startup", status => "v$VERSION", message => $VERSION_INFO, time => $start_time);
2014-12-14 21:29:22 +01:00
2019-07-28 18:54:34 +02:00
#
# parse config file
#
2019-08-05 14:31:48 +02:00
my $config;
if(my $config_file = _config_file(@config_src)) {
INFO "Using configuration: $config_file";
$config = parse_config($config_file);
exit 2 unless($config);
}
elsif($fallback_default_config) {
INFO "Configuration file not found, falling back to defaults";
$config = init_config();
2019-07-28 18:54:34 +02:00
}
2019-08-05 14:31:48 +02:00
else {
ERROR "Configuration file not found: " . join(', ', @config_src);
exit 2;
}
2019-07-28 18:54:34 +02:00
unless(ref($config->{SUBSECTION}) eq "ARRAY") {
ERROR "No volumes defined in configuration file";
exit 2;
}
2014-12-14 21:29:22 +01:00
if($action_diff)
{
2015-01-04 19:30:41 +01:00
#
# print snapshot diff
#
2019-04-17 15:11:49 +02:00
my $src_url = $subvol_args[0] || die;
my $target_url = $subvol_args[1] || die;
2016-03-15 16:54:54 +01:00
# NOTE: ssh://{src,target} uses default config
2015-01-03 21:25:46 +01:00
2019-07-28 18:54:34 +02:00
my $src_vol = vinfo($src_url, $config);
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($src_vol)) { ERROR "Failed to fetch subvolume detail for '$src_vol->{PRINT}'" . ($err ? ": $err" : ""); exit 1; }
2016-03-14 16:39:13 +01:00
if($src_vol->{node}{is_root}) { ERROR "Subvolume is btrfs root: $src_vol->{PRINT}"; exit 1; }
2015-01-03 21:25:46 +01:00
2019-07-28 18:54:34 +02:00
my $target_vol = vinfo($target_url, $config);
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($target_vol)) { ERROR "Failed to fetch subvolume detail for '$target_vol->{PRINT}'" . ($err ? ": $err" : ""); exit 1; }
2016-03-14 16:39:13 +01:00
if($target_vol->{node}{is_root}) { ERROR "Subvolume is btrfs root: $target_vol->{PRINT}"; exit 1; }
2015-01-03 21:25:46 +01:00
2018-02-15 02:57:07 +01:00
unless(_is_same_fs_tree($src_vol->{node}, $target_vol->{node})) {
2016-03-14 16:39:13 +01:00
ERROR "Subvolumes are not on the same btrfs filesystem!";
2015-04-21 14:53:31 +02:00
exit 1;
}
2015-01-03 21:25:46 +01:00
# NOTE: in some cases "cgen" differs from "gen", even for read-only snapshots (observed: gen=cgen+1)
2016-05-03 13:19:42 +02:00
my $lastgen = $src_vol->{node}{gen} + 1;
2015-01-03 21:25:46 +01:00
2014-12-14 22:03:31 +01:00
# dump files, sorted and unique
2015-04-23 16:19:34 +02:00
my $ret = btrfs_subvolume_find_new($target_vol, $lastgen);
2015-01-03 21:25:46 +01:00
exit 1 unless(ref($ret));
2015-05-26 20:05:40 +02:00
print_header(title => "Subvolume Diff",
time => $start_time,
info => [
"Showing changed files for subvolume:",
2016-03-14 16:39:13 +01:00
" $target_vol->{PRINT} (gen=$target_vol->{node}{gen})",
2015-05-26 20:05:40 +02:00
"",
2019-08-07 21:30:59 +02:00
"Starting at generation after subvolume:",
" $src_vol->{PRINT} (gen=$src_vol->{node}{gen})",
2015-05-26 20:05:40 +02:00
"",
2016-03-14 16:39:13 +01:00
"This will show all files modified within generation range: [$lastgen..$target_vol->{node}{gen}]",
2015-05-26 20:05:40 +02:00
"Newest file generation (transid marker) was: $ret->{transid_marker}",
($ret->{parse_errors} ? "Parse errors: $ret->{parse_errors}" : undef),
],
legend => [
"+.. file accessed at offset 0 (at least once)",
".c. flags COMPRESS or COMPRESS|INLINE set (at least once)",
"..i flags INLINE or COMPRESS|INLINE set (at least once)",
"<count> file was modified in <count> generations",
"<size> file was modified for a total of <size> bytes",
]
);
2015-01-03 21:25:46 +01:00
my $files = $ret->{files};
# calculate the character offsets
2016-03-01 21:29:19 +01:00
my $total_len = 0;
2015-01-03 21:25:46 +01:00
my $len_charlen = 0;
my $gen_charlen = 0;
foreach (values %$files) {
my $len = length($_->{len});
2015-02-10 13:31:43 +01:00
my $gen = length(scalar(keys(%{$_->{gen}})));
2015-01-03 21:25:46 +01:00
$len_charlen = $len if($len > $len_charlen);
$gen_charlen = $gen if($gen > $gen_charlen);
2016-03-01 21:29:19 +01:00
$total_len += $_->{len};
2015-01-03 21:25:46 +01:00
}
# finally print the output
foreach my $name (sort keys %$files) {
print ($files->{$name}->{new} ? '+' : '.');
print ($files->{$name}->{flags}->{compress} ? 'c' : '.');
print ($files->{$name}->{flags}->{inline} ? 'i' : '.');
# make nice table
2015-02-10 13:31:43 +01:00
my $gens = scalar(keys(%{$files->{$name}->{gen}}));
2015-01-03 21:25:46 +01:00
my $len = $files->{$name}->{len};
print " " . (' ' x ($gen_charlen - length($gens))) . $gens;
print " " . (' ' x ($len_charlen - length($len))) . $len;
print " $name\n";
}
2016-03-01 21:29:19 +01:00
print "\nTotal size: $total_len bytes\n";
2014-12-14 21:29:22 +01:00
exit 0;
}
2019-07-28 18:54:37 +02:00
if($action_ls)
{
#
# print accessible subvolumes for local path
#
my $path = $dir_args[0] || die;
my $root_vol = vinfo($path, $config);
# map url to real path (we need to match against mount points below)
my $root_path = system_realpath($root_vol);
unless($root_path) {
ERROR "Cannot find real path for: $root_vol->{PATH}";
exit 1;
}
$root_vol = vinfo($root_path, $config);
$root_path .= '/' unless($root_path =~ /\/$/); # append trailing slash
my $mountinfo = system_list_mountinfo($root_vol) || die;
$mountinfo_cache{$root_vol->{MACHINE_ID}} = $mountinfo;
my @data;
my @path_hidden;
foreach my $mnt (reverse @$mountinfo) {
my $mnt_path = $mnt->{mount_point};
$mnt_path .= '/' unless($mnt_path =~ /\/$/); # append trailing slash
if(($mnt->{fs_type} eq "btrfs") &&
(($root_path =~ /^\Q$mnt_path\E/) || ($mnt_path =~ /^\Q$root_path\E/)))
{
$realpath_cache{$mnt->{mount_point}} = $mnt->{mount_point}; # we know those are real paths, prevents calling readlink in btrfs_mountpoint
my $vol = vinfo($mnt->{mount_point}, $config);
unless(vinfo_init_root($vol)) {
ERROR "Failed to fetch subvolume detail for: $vol->{PRINT}" . ($err ? ": $err" : "");
exit 1;
}
my $subvol_list = vinfo_subvol_list($vol);
foreach my $svol ($vol, @$subvol_list) {
my $svol_path = $svol->{PATH};
$svol_path .= '/' unless($svol_path =~ /\/$/); # append trailing slash
next unless($svol_path =~ /^\Q$root_path\E/);
if(grep { $svol_path =~ /^\Q$_\E/ } @path_hidden) {
DEBUG "subvolume is hidden by another mount point: $svol->{PRINT}";
next;
}
push @data, {
%{$svol->{node}}, # copy node
mount_point => $svol->{VINFO_MOUNTPOINT}{PATH},
mount_source => $svol->{node}{TREE_ROOT}{host_mount_source},
mount_subvolid => $mnt->{MNTOPS}{subvolid},
2019-08-02 16:47:56 +02:00
mount_subvol => $mnt->{MNTOPS}{subvol},
2019-07-28 18:54:37 +02:00
subvolume_path => $svol->{node}{path},
subvolume_rel_path => $svol->{node}{REL_PATH},
path => $svol->{PATH},
flags => ($svol->{node}{readonly} ? "readonly" : undef),
};
}
}
last if($root_path =~ /^\Q$mnt_path\E/);
push @path_hidden, ($mnt->{mount_point} . '/');
}
my @sorted = sort { $a->{path} cmp $b->{path} } @data;
if($output_format) {
print_formatted("fs_list", \@sorted);
} else {
print join("\n", map { $_->{path} } @sorted) . "\n";
}
exit 0;
}
2015-01-20 19:18:38 +01:00
2016-06-07 16:17:02 +02:00
#
# try exclusive lock if set in config or command-line option
#
$lockfile //= config_key($config, "lockfile");
if(defined($lockfile) && (not $dryrun)) {
unless(open(LOCKFILE, ">>$lockfile")) {
# NOTE: the lockfile is never deleted by design
ERROR "Failed to open lock file '$lockfile': $!";
exit 3;
}
unless(flock(LOCKFILE, 6)) { # exclusive, non-blocking (LOCK_EX | LOCK_NB)
ERROR "Failed to take lock (another btrbk instance is running): $lockfile";
exit 3;
}
}
2015-01-20 19:18:38 +01:00
2016-04-16 01:09:17 +02:00
if($action_archive)
2016-04-07 15:33:32 +02:00
{
#
2016-04-16 01:09:17 +02:00
# archive (clone) tree
2016-04-07 15:33:32 +02:00
#
# NOTE: This is intended to work without a config file! The only
# thing used from the configuration is the SSH and transaction log
# stuff.
#
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
init_transaction_log(config_key($config, "transaction_log"),
config_key($config, "transaction_syslog"));
2016-04-07 15:33:32 +02:00
2019-04-17 15:11:49 +02:00
my $src_url = $subvol_args[0] || die;
my $archive_url = $subvol_args[1] || die;
2016-04-07 15:33:32 +02:00
# FIXME: add command line options for preserve logic
$config->{SUBSECTION} = []; # clear configured subsections, we build them dynamically
2016-04-16 01:09:17 +02:00
my $src_root = vinfo($src_url, $config);
2019-04-24 23:46:44 +02:00
unless(vinfo_init_root($src_root)) {
2016-04-16 01:09:17 +02:00
ERROR "Failed to fetch subvolume detail for '$src_root->{PRINT}'" . ($err ? ": $err" : "");
2016-04-07 15:33:32 +02:00
exit 1;
}
2016-04-16 17:13:19 +02:00
my $archive_root = vinfo($archive_url, $config);
2019-08-04 22:48:50 +02:00
unless($archive_raw ? vinfo_init_raw_root($archive_root) : vinfo_init_root($archive_root)) {
ERROR "Failed to fetch " . ($archive_raw ? "raw target metadata" : "subvolume detail") . " for '$archive_root->{PRINT}'" . ($err ? ": $err" : "");
2016-04-07 15:33:32 +02:00
exit 1;
}
my %name_uniq;
2016-04-16 01:09:17 +02:00
my @subvol_list = @{vinfo_subvol_list($src_root)};
2016-04-07 15:33:32 +02:00
my @sorted = sort { ($a->{subtree_depth} <=> $b->{subtree_depth}) || ($a->{SUBVOL_DIR} cmp $b->{SUBVOL_DIR}) } @subvol_list;
foreach my $vol (@sorted) {
next unless($vol->{node}{readonly});
2016-04-19 13:06:31 +02:00
my $snapshot_name = $vol->{node}{BTRBK_BASENAME};
2016-04-07 15:33:32 +02:00
unless(defined($snapshot_name)) {
WARN "Skipping subvolume (not a btrbk subvolume): $vol->{PRINT}";
next;
}
my $subvol_dir = $vol->{SUBVOL_DIR};
next if($name_uniq{"$subvol_dir/$snapshot_name"});
$name_uniq{"$subvol_dir/$snapshot_name"} = 1;
2016-04-16 17:13:19 +02:00
my $droot_url = $archive_url . ($subvol_dir eq "" ? "" : "/$subvol_dir");
2016-04-14 18:46:35 +02:00
my $sroot_url = $src_url . ($subvol_dir eq "" ? "" : "/$subvol_dir");
2016-04-16 01:09:17 +02:00
my $config_sroot = { CONTEXT => "archive_source",
PARENT => $config,
url => $sroot_url, # ABORTED() needs this
snapshot_name => $snapshot_name,
};
2019-04-18 16:40:25 +02:00
my $config_droot = { CONTEXT => "archive_target",
2016-04-16 01:09:17 +02:00
PARENT => $config_sroot,
2019-08-04 22:48:50 +02:00
target_type => ($archive_raw ? "raw" : "send-receive"), # macro_send_receive checks this
2016-04-16 01:09:17 +02:00
url => $droot_url, # ABORTED() needs this
};
$config_sroot->{SUBSECTION} = [ $config_droot ];
push(@{$config->{SUBSECTION}}, $config_sroot);
my $sroot = vinfo($sroot_url, $config_sroot);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($sroot);
2019-04-24 23:46:44 +02:00
unless(vinfo_init_root($sroot)) {
2016-04-07 15:33:32 +02:00
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping archive source \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
2016-04-07 15:33:32 +02:00
next;
}
2016-04-16 01:09:17 +02:00
my $droot = vinfo($droot_url, $config_droot);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($droot);
2019-08-04 22:48:50 +02:00
unless($archive_raw ? vinfo_init_raw_root($droot) : vinfo_init_root($droot)) {
DEBUG "Failed to fetch " . ($archive_raw ? "raw target metadata" : "subvolume detail") . " for '$droot->{PRINT}'" . ($err ? ": $err" : "");
2016-04-14 18:46:35 +02:00
unless(system_mkdir($droot)) {
ABORTED($droot, "Failed to create directory: $droot->{PRINT}/");
2019-04-17 15:20:18 +02:00
WARN "Skipping archive target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
2016-04-14 18:46:35 +02:00
next;
}
2016-04-16 17:13:19 +02:00
$droot->{SUBDIR_CREATED} = 1;
2016-04-14 18:46:35 +02:00
if($dryrun) {
# we need to fake this directory on dryrun
2016-04-16 17:13:19 +02:00
$droot->{node} = $archive_root->{node};
2016-04-14 18:46:35 +02:00
$droot->{NODE_SUBDIR} = $subvol_dir;
2019-04-18 17:24:38 +02:00
$droot->{VINFO_MOUNTPOINT} = $archive_root->{VINFO_MOUNTPOINT};
2016-04-14 18:46:35 +02:00
}
else {
# after directory is created, try to init again
2019-08-04 22:48:50 +02:00
unless($archive_raw ? vinfo_init_raw_root($droot) : vinfo_init_root($droot)) {
2016-04-14 18:46:35 +02:00
ABORTED($droot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping archive target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
2016-04-14 18:46:35 +02:00
next;
}
}
2016-04-07 15:33:32 +02:00
}
2018-02-15 02:57:07 +01:00
if(_is_same_fs_tree($droot->{node}, $vol->{node})) {
2016-04-07 15:33:32 +02:00
ERROR "Source and target subvolumes are on the same btrfs filesystem!";
exit 1;
}
}
2019-04-17 16:10:15 +02:00
# translate archive_exclude globs, add to exclude args
my $archive_exclude = config_key($config, 'archive_exclude') // [];
push @exclude_vf, map(vinfo_filter_statement($_), (@$archive_exclude));
2018-02-13 21:36:21 +01:00
# create archives
2016-04-14 15:39:50 +02:00
my $schedule_results = [];
2016-04-19 18:53:44 +02:00
my $aborted;
2016-04-16 01:09:17 +02:00
foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
2016-04-19 18:53:44 +02:00
if($aborted) {
# abort all subsequent sources on any abort (we don't want to go on hammering on "disk full" errors)
ABORTED($sroot, $aborted);
next;
}
2018-02-13 21:36:21 +01:00
my $snapshot_name = config_key($sroot, "snapshot_name") // die;
2019-04-17 16:10:15 +02:00
# skip on archive_exclude and --exclude option
if(vinfo_match(\@exclude_vf, $sroot) ||
vinfo_match(\@exclude_vf, vinfo_child($sroot, $snapshot_name)))
{
2019-08-04 21:37:17 +02:00
ABORTED($sroot, "skip_archive_exclude", "Match on exclude pattern");
2019-04-17 16:10:15 +02:00
INFO "Skipping archive subvolumes \"$sroot->{PRINT}/${snapshot_name}.*\": " . ABORTED_TEXT($sroot);
next;
2018-02-13 21:36:21 +01:00
}
2019-04-18 16:40:25 +02:00
foreach my $droot (vinfo_subsection($sroot, 'archive_target')) {
2016-04-07 15:33:32 +02:00
INFO "Archiving subvolumes: $sroot->{PRINT}/${snapshot_name}.*";
2016-04-16 01:09:17 +02:00
macro_archive_target($sroot, $droot, $snapshot_name, { results => $schedule_results });
2019-04-17 15:20:18 +02:00
if(IS_ABORTED($droot)) {
2016-04-16 00:45:16 +02:00
# also abort $sroot
2016-04-19 18:53:44 +02:00
$aborted = "At least one target aborted earlier";
ABORTED($sroot, $aborted);
2019-04-17 15:20:18 +02:00
WARN "Skipping archiving of \"$sroot->{PRINT}/\": " . ABORTED_TEXT($sroot);
2016-04-16 00:45:16 +02:00
last;
}
2016-04-07 15:33:32 +02:00
}
}
2018-02-13 21:36:21 +01:00
# delete archives
2017-08-21 13:23:20 +02:00
my $del_schedule_results;
if($preserve_backups) {
2016-04-19 18:53:44 +02:00
INFO "Preserving all archives (option \"-p\" or \"-r\" present)";
}
else
{
2017-08-21 13:23:20 +02:00
$del_schedule_results = [];
2016-04-19 18:53:44 +02:00
foreach my $sroot (vinfo_subsection($config, 'archive_source')) {
2018-02-13 21:36:21 +01:00
my $snapshot_name = config_key($sroot, "snapshot_name") // die;
2019-04-18 16:40:25 +02:00
foreach my $droot (vinfo_subsection($sroot, 'archive_target')) {
2016-04-19 18:53:44 +02:00
INFO "Cleaning archive: $droot->{PRINT}/${snapshot_name}.*";
macro_delete($droot, "", $snapshot_name, $droot,
{ preserve => config_preserve_hash($droot, "archive"),
results => $del_schedule_results,
result_hints => { topic => "archive", root_path => $droot->{PATH} },
},
commit => config_key($droot, "btrfs_commit_delete"),
type => "delete_archive",
2017-10-02 14:00:09 +02:00
qgroup => { destroy => config_key($droot, "archive_qgroup_destroy"),
type => "qgroup_destroy_archive" },
2016-04-19 18:53:44 +02:00
);
}
}
}
2016-04-07 15:33:32 +02:00
my $exit_status = exit_status($config);
my $time_elapsed = time - $start_time;
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
action("finished",
status => $exit_status ? "partial" : "success",
duration => $time_elapsed,
message => $exit_status ? "At least one backup task aborted" : undef,
);
close_transaction_log();
unless($quiet)
{
2016-04-14 15:39:50 +02:00
# print scheduling results
if($print_schedule) {
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
2018-10-31 14:33:07 +01:00
print_formatted("schedule", \@data, title => "ARCHIVE SCHEDULE", paragraph => 1);
2016-04-14 15:39:50 +02:00
}
2017-08-21 13:23:20 +02:00
if($print_schedule && $del_schedule_results) {
2016-04-19 18:53:44 +02:00
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$del_schedule_results;
2018-10-31 14:33:07 +01:00
print_formatted("schedule", \@data, title => "DELETE SCHEDULE", paragraph => 1);
2016-04-19 18:53:44 +02:00
}
2016-04-07 15:33:32 +02:00
# print summary
$output_format ||= "custom";
if($output_format eq "custom")
{
my @out;
2016-04-19 18:53:44 +02:00
foreach my $sroot (vinfo_subsection($config, 'archive_source', 1)) {
2019-04-18 16:40:25 +02:00
foreach my $droot (vinfo_subsection($sroot, 'archive_target', 1)) {
2016-04-07 15:33:32 +02:00
my @subvol_out;
2016-04-16 17:13:19 +02:00
if($droot->{SUBDIR_CREATED}) {
push @subvol_out, "++. $droot->{PRINT}/";
}
2016-04-07 15:33:32 +02:00
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
my $create_mode = "***";
$create_mode = ">>>" if($_->{parent});
$create_mode = "!!!" if($_->{ERROR});
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
}
2017-09-27 20:41:51 +02:00
foreach(@{$droot->{SUBVOL_DELETED} // []}) {
2016-04-19 18:53:44 +02:00
push @subvol_out, "--- $_->{PRINT}";
}
2019-04-17 15:20:18 +02:00
if(IS_ABORTED($droot, "abort_") || IS_ABORTED($sroot, "abort_")) {
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . (ABORTED_TEXT($droot) || ABORTED_TEXT($sroot));
}
elsif(IS_ABORTED($sroot, "skip_archive_exclude")) {
push @subvol_out, "<archive_exclude>";
2016-04-07 15:33:32 +02:00
}
2018-02-14 00:12:36 +01:00
unless(@subvol_out) {
push @subvol_out, "[-] $droot->{PRINT}/$sroot->{CONFIG}->{snapshot_name}.*";
2016-04-07 15:33:32 +02:00
}
2018-02-14 00:12:36 +01:00
push @out, "$sroot->{PRINT}/$sroot->{CONFIG}->{snapshot_name}.*", @subvol_out, "";
2016-04-07 15:33:32 +02:00
}
}
2019-04-17 16:10:15 +02:00
my @cmdline_options = map { "exclude: $_" } @exclude_cmdline;
2017-08-21 13:23:20 +02:00
push @cmdline_options, "preserve: Preserved all archives" if($preserve_backups);
2016-04-16 01:09:17 +02:00
print_header(title => "Archive Summary",
2016-04-07 15:33:32 +02:00
time => $start_time,
2017-08-21 13:23:20 +02:00
options => \@cmdline_options,
2016-04-07 15:33:32 +02:00
legend => [
2016-04-16 17:13:19 +02:00
"++. created directory",
2016-04-19 18:53:44 +02:00
"--- deleted subvolume",
2016-04-07 15:33:32 +02:00
"*** received subvolume (non-incremental)",
">>> received subvolume (incremental)",
2018-02-14 00:12:36 +01:00
"[-] no action",
2016-04-07 15:33:32 +02:00
],
);
print join("\n", @out);
2019-07-15 18:19:33 +02:00
print_footer($config, $exit_status);
2016-04-07 15:33:32 +02:00
}
else
{
# print action log (without transaction start messages)
2017-09-27 19:35:43 +02:00
my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
2016-04-07 15:33:32 +02:00
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
}
}
exit $exit_status;
}
2016-03-16 13:25:19 +01:00
#
# expand subvolume globs (wildcards)
#
foreach my $config_vol (@{$config->{SUBSECTION}}) {
die unless($config_vol->{CONTEXT} eq "volume");
# read-in subvolume list (and expand globs) only if needed
next unless(grep defined($_->{GLOB_CONTEXT}), @{$config_vol->{SUBSECTION}});
my $sroot = vinfo($config_vol->{url}, $config_vol);
unless(vinfo_init_root($sroot)) {
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
2016-03-16 13:25:19 +01:00
next;
}
my @vol_subsection_expanded;
foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
die unless($config_subvol->{CONTEXT} eq "subvolume");
if($config_subvol->{GLOB_CONTEXT}) {
my $globs = $config_subvol->{rel_path};
INFO "Expanding wildcards: $sroot->{PRINT}/$globs";
# support "*some*file*", "*/*"
my $match = join('[^\/]*', map(quotemeta($_), split(/\*+/, $globs, -1)));
TRACE "translated globs \"$globs\" to regex \"$match\"";
my $expand_count = 0;
2016-04-03 20:46:29 +02:00
foreach my $vol (@{vinfo_subvol_list($sroot, sort => 'path')})
2016-03-16 13:25:19 +01:00
{
if($vol->{node}{readonly}) {
TRACE "skipping readonly subvolume: $vol->{PRINT}";
next;
}
unless($vol->{SUBVOL_PATH} =~ /^$match$/) {
TRACE "skipping non-matching subvolume: $vol->{PRINT}";
next;
}
2017-09-11 18:49:14 +02:00
unless(defined(check_file($vol->{SUBVOL_PATH}, { relative => 1 }))) {
WARN "Ambiguous subvolume path \"$vol->{SUBVOL_PATH}\" while expanding \"$globs\", ignoring";
next;
}
2016-03-16 13:25:19 +01:00
INFO "Found source subvolume: $vol->{PRINT}";
my %conf = ( %$config_subvol,
rel_path_glob => $globs,
rel_path => $vol->{SUBVOL_PATH},
url => $vol->{URL},
snapshot_name => $vol->{NAME}, # snapshot_name defaults to subvolume name
);
# deep copy of target subsection
my @subsection_copy = map { { %$_, PARENT => \%conf }; } @{$config_subvol->{SUBSECTION}};
$conf{SUBSECTION} = \@subsection_copy;
push @vol_subsection_expanded, \%conf;
$expand_count += 1;
}
unless($expand_count) {
WARN "No subvolumes found matching: $sroot->{PRINT}/$globs";
}
}
else {
push @vol_subsection_expanded, $config_subvol;
}
}
$config_vol->{SUBSECTION} = \@vol_subsection_expanded;
}
2016-04-18 16:40:49 +02:00
TRACE(Data::Dumper->Dump([$config], ["config"])) if($do_dumper);
2016-03-16 13:25:19 +01:00
2016-03-07 17:35:17 +01:00
#
# create vinfo nodes (no readin yet)
#
2016-03-07 17:46:53 +01:00
foreach my $config_vol (@{$config->{SUBSECTION}}) {
die unless($config_vol->{CONTEXT} eq "volume");
2016-03-07 17:35:17 +01:00
my $sroot = vinfo($config_vol->{url}, $config_vol);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($sroot);
2016-03-07 17:46:53 +01:00
foreach my $config_subvol (@{$config_vol->{SUBSECTION}}) {
die unless($config_subvol->{CONTEXT} eq "subvolume");
2016-05-10 15:51:44 +02:00
my $svol = vinfo_child($sroot, $config_subvol->{rel_path}, $config_subvol);
2018-02-15 00:17:01 +01:00
# TODO: add config option "snapshot_path", reuse snaproot with same URL
my $snapshot_dir = config_key($svol, "snapshot_dir", prefix => '/') // "";
my $snaproot = vinfo($config_vol->{url} . $snapshot_dir, $config_subvol);
vinfo_assign_config($svol, $snaproot);
2016-03-07 17:46:53 +01:00
foreach my $config_target (@{$config_subvol->{SUBSECTION}}) {
die unless($config_target->{CONTEXT} eq "target");
2016-03-07 17:35:17 +01:00
my $droot = vinfo($config_target->{url}, $config_target);
2016-05-10 15:51:44 +02:00
vinfo_assign_config($droot);
2016-03-07 17:35:17 +01:00
}
}
}
2015-05-27 15:00:25 +02:00
#
2019-04-17 15:43:08 +02:00
# filter subvolumes matching command line arguments, handle noauto option
2015-05-27 15:00:25 +02:00
#
2019-04-17 15:56:35 +02:00
if(scalar @filter_vf)
2015-05-27 15:00:25 +02:00
{
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
2015-09-20 14:25:20 +02:00
my $found_vol = 0;
2019-04-17 15:56:35 +02:00
if(vinfo_match(\@filter_vf, $sroot, flag_matched => '_matched')) {
next;
2015-05-27 15:00:25 +02:00
}
2016-03-07 21:54:51 +01:00
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
2015-09-20 14:25:20 +02:00
my $found_subvol = 0;
2019-04-17 15:56:35 +02:00
my $snaproot = vinfo_snapshot_root($svol);
2016-03-07 21:54:51 +01:00
my $snapshot_name = config_key($svol, "snapshot_name") // die;
2019-04-17 15:56:35 +02:00
if(vinfo_match(\@filter_vf, $svol, flag_matched => '_matched') ||
vinfo_match(\@filter_vf, vinfo_child($snaproot, $snapshot_name), flag_matched => '_matched'))
{
$found_vol = 1;
next;
}
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
2019-04-17 15:56:35 +02:00
if(vinfo_match(\@filter_vf, $droot, flag_matched => '_matched') ||
vinfo_match(\@filter_vf, vinfo_child($droot, $snapshot_name), flag_matched => '_matched'))
{
$found_subvol = 1;
$found_vol = 1;
2015-09-20 14:25:20 +02:00
}
2019-04-17 15:56:35 +02:00
else {
2019-04-17 15:20:18 +02:00
ABORTED($droot, "skip_cmdline_filter", "No match on filter command line argument");
DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
2015-09-20 14:25:20 +02:00
}
}
unless($found_subvol) {
2019-04-17 15:20:18 +02:00
ABORTED($svol, "skip_cmdline_filter", "No match on filter command line argument");
DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2015-05-27 15:00:25 +02:00
}
}
2015-09-20 14:25:20 +02:00
unless($found_vol) {
2019-04-17 15:20:18 +02:00
ABORTED($sroot, "skip_cmdline_filter", "No match on filter command line argument");
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
2015-05-27 15:00:25 +02:00
}
}
# make sure all args have a match
2019-04-17 15:56:35 +02:00
my @nomatch = map { $_->{_matched} ? () : $_->{unparsed} } @filter_vf;
2015-05-27 15:00:25 +02:00
if(@nomatch) {
foreach(@nomatch) {
2019-04-17 15:56:35 +02:00
ERROR "Filter argument \"$_\" does not match any volume, subvolume, target or group declaration";
2015-05-27 15:00:25 +02:00
}
2015-09-30 14:00:39 +02:00
exit 2;
2015-05-27 15:00:25 +02:00
}
2019-04-17 15:56:35 +02:00
$config->{CMDLINE_FILTER_LIST} = [ map { $_->{unparsed} } @filter_vf ];
2015-05-27 15:00:25 +02:00
}
2019-04-17 15:43:08 +02:00
elsif(not $action_config_print)
{
# no filter_args present, abort "noauto" contexts
if(config_key($config, "noauto")) {
WARN "Option \"noauto\" is set in root context, and no filter argument present, exiting";
exit 0;
}
foreach my $sroot (vinfo_subsection($config, 'volume')) {
if(config_key($sroot, "noauto")) {
ABORTED($sroot, "skip_noauto", 'option "noauto" is set');
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
next;
}
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
if(config_key($svol, "noauto")) {
ABORTED($svol, "skip_noauto", 'option "noauto" is set');
DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
next;
}
foreach my $droot (vinfo_subsection($svol, 'target')) {
if(config_key($droot, "noauto")) {
ABORTED($droot, "skip_noauto", 'option "noauto" is set');
DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
}
}
}
}
}
2015-05-27 15:00:25 +02:00
2019-04-17 16:10:15 +02:00
if(scalar @exclude_vf)
{
# handle --exclude command line option
foreach my $sroot (vinfo_subsection($config, 'volume')) {
if(my $ff = vinfo_match(\@exclude_vf, $sroot)) {
ABORTED($sroot, "skip_cmdline_exclude", "command line argument \"--exclude=$ff->{unparsed}\"");
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
next;
}
my $all_svol_aborted = 1;
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snaproot = vinfo_snapshot_root($svol);
my $snapshot_name = config_key($svol, "snapshot_name") // die;
if(my $ff = (vinfo_match(\@exclude_vf, $svol) ||
vinfo_match(\@exclude_vf, vinfo_child($snaproot, $snapshot_name))))
{
ABORTED($svol, "skip_cmdline_exclude", "command line argument \"--exclude=$ff->{unparsed}\"");
DEBUG "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
next;
}
$all_svol_aborted = 0;
foreach my $droot (vinfo_subsection($svol, 'target')) {
if(my $ff = (vinfo_match(\@exclude_vf, $droot) ||
vinfo_match(\@exclude_vf, vinfo_child($droot, $snapshot_name))))
{
ABORTED($droot, "skip_cmdline_exclude", "command line argument \"--exclude=$ff->{unparsed}\"");
DEBUG "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
next;
}
}
}
if($all_svol_aborted) {
ABORTED($sroot, "skip_cmdline_exclude", "All subvolumes excluded");
DEBUG "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
}
}
}
2015-01-20 19:18:38 +01:00
2015-10-19 22:10:08 +02:00
if($action_usage)
2015-01-20 19:18:38 +01:00
{
#
# print filesystem information
#
2015-10-19 22:10:08 +02:00
my @data;
2015-01-20 19:18:38 +01:00
my %processed;
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2015-10-19 22:10:08 +02:00
unless($processed{$sroot->{URL}}) {
my $usage = btrfs_filesystem_usage($sroot) // {};
push @data, { %$usage,
type => "source",
vinfo_prefixed_keys("", $sroot),
};
2015-04-23 15:30:33 +02:00
$processed{$sroot->{URL}} = 1;
2015-01-20 19:18:38 +01:00
}
}
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-19 22:10:08 +02:00
unless($processed{$droot->{URL}}) {
my $usage = btrfs_filesystem_usage($droot) // {};
push @data, { %$usage,
type => "target",
vinfo_prefixed_keys("", $droot),
};
2015-04-23 15:30:33 +02:00
$processed{$droot->{URL}} = 1;
2015-01-20 19:18:38 +01:00
}
}
}
}
2015-10-19 22:10:08 +02:00
print_formatted("usage", \@data);
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2015-01-20 19:18:38 +01:00
}
2015-04-14 13:52:16 +02:00
2015-10-10 21:26:59 +02:00
if($action_config_print)
{
my $resolve = ($action_config_print eq "print-all");
#
# print configuration lines, machine readable
#
my @out;
2018-02-13 18:40:35 +01:00
push @out, config_dump_keys($config, resolve => $resolve);
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2015-10-10 21:26:59 +02:00
push @out, "\nvolume $sroot->{URL}";
2016-03-07 21:54:51 +01:00
push @out, config_dump_keys($sroot, prefix => "\t", resolve => $resolve);
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-16 13:25:19 +01:00
push @out, ""; # newline
push @out, "\t# subvolume $svol->{CONFIG}->{rel_path_glob}" if(defined($svol->{CONFIG}->{rel_path_glob}));
push @out, "\tsubvolume $svol->{SUBVOL_PATH}";
2016-03-07 21:54:51 +01:00
push @out, config_dump_keys($svol, prefix => "\t\t", resolve => $resolve);
foreach my $droot (vinfo_subsection($svol, 'target')) {
push @out, "\n\t\ttarget $droot->{CONFIG}->{target_type} $droot->{URL}";
push @out, config_dump_keys($droot, prefix => "\t\t\t", resolve => $resolve);
2015-10-10 21:26:59 +02:00
}
}
}
print_header(title => "Configuration Dump",
config => $config,
2019-04-17 16:10:15 +02:00
options => [ map { "exclude: $_" } @exclude_cmdline ],
2015-10-10 21:26:59 +02:00
time => $start_time,
);
print join("\n", @out) . "\n";
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2015-10-10 21:26:59 +02:00
}
2015-10-11 01:44:13 +02:00
2015-10-11 02:02:45 +02:00
if($action_list)
2015-09-24 13:51:15 +02:00
{
2015-10-11 19:01:59 +02:00
my @vol_data;
my @subvol_data;
my @target_data;
my @mixed_data;
2015-10-11 01:44:13 +02:00
my %target_uniq;
2015-09-24 13:51:15 +02:00
#
# print configuration lines, machine readable
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2015-10-12 23:58:38 +02:00
my $volh = { vinfo_prefixed_keys("volume", $sroot) };
2015-10-11 19:01:59 +02:00
push @vol_data, $volh;
2015-09-24 13:51:15 +02:00
2016-03-07 21:54:51 +01:00
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
2015-10-11 01:44:13 +02:00
my $subvolh = { %$volh,
2015-10-12 23:58:38 +02:00
vinfo_prefixed_keys("source", $svol),
2018-02-15 00:17:01 +01:00
snapshot_path => $snaproot->{PATH},
2016-03-07 21:54:51 +01:00
snapshot_name => config_key($svol, "snapshot_name"),
2016-03-08 18:22:58 +01:00
snapshot_preserve => format_preserve_matrix(config_preserve_hash($svol, "snapshot")),
2015-10-11 01:44:13 +02:00
};
2015-10-11 19:01:59 +02:00
push @subvol_data, $subvolh;
2015-09-24 13:51:15 +02:00
2015-10-11 19:01:59 +02:00
my $found = 0;
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-11 01:44:13 +02:00
my $targeth = { %$subvolh,
2015-10-12 23:58:38 +02:00
vinfo_prefixed_keys("target", $droot),
2016-03-08 18:22:58 +01:00
target_preserve => format_preserve_matrix(config_preserve_hash($droot, "target")),
2015-10-11 01:44:13 +02:00
};
2015-10-13 01:10:06 +02:00
if($action_list eq "target") {
2015-10-11 01:44:13 +02:00
next if($target_uniq{$droot->{URL}});
$target_uniq{$droot->{URL}} = 1;
2015-09-24 13:51:15 +02:00
}
2015-10-11 19:01:59 +02:00
push @target_data, $targeth;
push @mixed_data, $targeth;
$found = 1;
2015-09-24 13:51:15 +02:00
}
2015-10-11 19:01:59 +02:00
# make sure the subvol is always printed (even if no targets around)
push @mixed_data, $subvolh unless($found);
2015-09-24 13:51:15 +02:00
}
}
2015-10-12 14:59:02 +02:00
if($action_list eq "volume") {
2015-10-13 01:10:06 +02:00
print_formatted("list_volume", \@vol_data);
2015-10-12 14:59:02 +02:00
}
elsif($action_list eq "source") {
2015-10-13 01:10:06 +02:00
print_formatted("list_source", \@subvol_data);
2015-10-12 14:59:02 +02:00
}
elsif($action_list eq "target") {
2015-10-13 01:10:06 +02:00
print_formatted("list_target", \@target_data);
2015-10-12 14:59:02 +02:00
}
else {
# default format
2015-10-13 01:10:06 +02:00
print_formatted("list", \@mixed_data);
2015-10-12 14:59:02 +02:00
}
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2015-09-24 13:51:15 +02:00
}
2015-04-14 13:52:16 +02:00
#
2015-04-19 11:36:40 +02:00
# fill vinfo hash, basic checks on configuration
2015-04-14 13:52:16 +02:00
#
2016-03-09 19:52:45 +01:00
# read volume btrfs tree, and make sure subvolume exist
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
2016-03-08 16:41:02 +01:00
DEBUG "Initializing volume section: $sroot->{PRINT}";
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($sroot)) {
2016-03-07 17:36:02 +01:00
ABORTED($sroot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping volume \"$sroot->{PRINT}\": " . ABORTED_TEXT($sroot);
2015-04-19 11:36:40 +02:00
next;
}
2016-03-07 20:47:24 +01:00
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-08 16:41:02 +01:00
DEBUG "Initializing subvolume section: $svol->{PRINT}";
2016-03-14 16:39:13 +01:00
unless(vinfo_init_root($svol)) {
ABORTED($svol, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2017-07-29 19:03:23 +02:00
next;
}
2018-06-29 12:49:04 +02:00
if((not $svol->{node}{uuid}) || ($svol->{node}{uuid} eq '-')) {
ABORTED($svol, "subvolume has no UUID");
2019-04-17 15:20:18 +02:00
ERROR "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2018-06-29 12:49:04 +02:00
next;
}
2017-07-29 19:03:23 +02:00
if($svol->{node}{readonly}) {
ABORTED($svol, "subvolume is readonly");
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2017-07-29 19:03:23 +02:00
next;
}
if($svol->{node}{received_uuid} ne '-') {
ABORTED($svol, "\"Received UUID\" is set");
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2016-03-14 16:39:13 +01:00
next;
2016-03-07 19:20:15 +01:00
}
2018-06-29 12:49:04 +02:00
if(_is_child_of($sroot->{node}, $svol->{node}{uuid}) ||
($svol->{node}{uuid} eq $sroot->{node}{uuid}))
2018-06-29 04:36:10 +02:00
{
DEBUG "Found \"$svol->{PRINT}\" (id=$svol->{node}{id}) in btrfs tree of: $sroot->{PRINT}";
2016-03-14 16:39:13 +01:00
} else {
ABORTED($svol, "Not a child subvolume of: $sroot->{PRINT}");
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2016-03-14 16:39:13 +01:00
next;
2015-04-19 11:36:40 +02:00
}
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
unless(vinfo_init_root($snaproot)) {
ABORTED($svol, "Failed to fetch subvolume detail for snapshot_dir" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2018-02-15 00:17:01 +01:00
next;
}
unless(_is_same_fs_tree($snaproot->{node}, $svol->{node})) {
ABORTED($svol, "Snapshot path is not on same filesystem");
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume \"$svol->{PRINT}\": " . ABORTED_TEXT($svol);
2018-02-15 00:17:01 +01:00
next;
}
2016-03-09 19:52:45 +01:00
}
}
2015-04-19 11:36:40 +02:00
2016-03-09 19:52:45 +01:00
# read target btrfs tree
2018-05-09 12:33:10 +02:00
if($action_run && $skip_backups && $preserve_snapshots && $preserve_backups) {
# if running "btrbk snapshot --preserve", there is no need to
# initialize targets, and we don't want to fail on missing targets.
DEBUG "Skipping target tree readin (preserving all snapshots and backups)";
}
else {
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
foreach my $droot (vinfo_subsection($svol, 'target')) {
DEBUG "Initializing target section: $droot->{PRINT}";
my $target_type = $droot->{CONFIG}->{target_type} || die;
if($target_type eq "send-receive")
{
2019-04-24 23:46:44 +02:00
unless(vinfo_init_root($droot)) {
2018-05-09 12:33:10 +02:00
ABORTED($droot, "Failed to fetch subvolume detail" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
2018-05-09 12:33:10 +02:00
next;
}
2015-06-02 22:16:33 +02:00
}
2018-05-09 12:33:10 +02:00
elsif($target_type eq "raw")
{
unless(vinfo_init_raw_root($droot)) {
ABORTED($droot, "Failed to fetch raw target metadata" . ($err ? ": $err" : ""));
2019-04-17 15:20:18 +02:00
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
2018-05-09 12:33:10 +02:00
next;
}
2015-06-02 22:16:33 +02:00
}
2016-04-14 13:01:28 +02:00
2018-05-09 12:33:10 +02:00
if($config_override{FAILSAFE_PRESERVE}) {
ABORTED($droot, $config_override{FAILSAFE_PRESERVE});
2019-04-17 15:20:18 +02:00
WARN "Skipping target \"$droot->{PRINT}\": " . ABORTED_TEXT($droot);
2018-05-09 12:33:10 +02:00
}
2016-04-14 13:01:28 +02:00
}
2016-03-06 17:46:46 +01:00
}
}
}
# check for duplicate snapshot locations
2016-03-07 19:20:15 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2016-03-06 17:46:46 +01:00
# check for duplicate snapshot locations
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
2016-03-07 17:36:02 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2019-04-01 00:33:02 +02:00
my $snaproot_subdir_path = (defined($snaproot->{NODE_SUBDIR}) ? $snaproot->{NODE_SUBDIR} . '/' : "") . $snapshot_basename;
if(my $prev = $snaproot->{node}->{_SNAPSHOT_CHECK}->{$snaproot_subdir_path}) {
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same snapshot: $snaproot->{PRINT}/${snapshot_basename}.*";
2016-03-06 17:46:46 +01:00
ERROR "Please fix \"snapshot_name\" configuration options!";
exit 1;
}
2019-04-01 00:33:02 +02:00
$snaproot->{node}->{_SNAPSHOT_CHECK}->{$snaproot_subdir_path} = $svol->{PRINT};
2016-03-06 17:46:46 +01:00
2016-03-07 19:20:15 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-04-18 20:18:11 +02:00
# check for duplicate snapshot locations
2019-04-01 00:33:02 +02:00
my $droot_subdir_path = (defined($droot->{NODE_SUBDIR}) ? $droot->{NODE_SUBDIR} . '/' : "") . $snapshot_basename;
if(my $prev = $droot->{node}->{_BACKUP_CHECK}->{$droot_subdir_path}) {
ERROR "Subvolume \"$prev\" and \"$svol->{PRINT}\" will create same backup target: $droot->{PRINT}/${snapshot_basename}.*";
2015-04-20 18:19:55 +02:00
ERROR "Please fix \"snapshot_name\" or \"target\" configuration options!";
2015-04-18 20:18:11 +02:00
exit 1;
}
2019-04-01 00:33:02 +02:00
$droot->{node}->{_BACKUP_CHECK}->{$droot_subdir_path} = $svol->{PRINT};
2015-01-10 16:02:35 +01:00
}
2014-12-13 20:33:31 +01:00
}
2014-12-13 13:52:43 +01:00
}
2015-01-04 19:30:41 +01:00
2015-01-26 17:31:18 +01:00
if($action_origin)
{
#
# print origin information
#
2019-04-17 15:11:49 +02:00
my $url = $subvol_args[0] || die;
2016-03-15 16:54:54 +01:00
my $vol = vinfo($url, $config);
2016-03-09 19:52:45 +01:00
unless(vinfo_init_root($vol)) {
ERROR "Failed to fetch subvolume detail for: $url" . ($err ? ": $err" : "");
exit 1;
2015-04-20 17:08:59 +02:00
}
2016-03-14 16:39:13 +01:00
if($vol->{node}{is_root}) {
2015-04-16 12:00:04 +02:00
ERROR "Subvolume is btrfs root: $url\n";
2015-01-26 17:31:18 +01:00
exit 1;
}
2015-04-21 14:53:31 +02:00
2015-01-26 17:31:18 +01:00
my $lines = [];
2016-04-15 22:00:10 +02:00
_origin_tree("", $vol->{node}, $lines);
$output_format ||= "custom";
if($output_format eq "custom") {
print_header(title => "Origin Tree",
config => $config,
time => $start_time,
legend => [
"^-- : parent subvolume",
"newline : received-from relationship with subvolume (identical content)",
]
);
print join("\n", map { $_->{tree} } @$lines) . "\n";
2015-01-26 17:31:18 +01:00
}
2016-04-15 22:00:10 +02:00
else {
print_formatted('origin_tree', $lines );
2015-01-26 17:31:18 +01:00
}
2015-03-13 17:54:08 +01:00
exit 0;
2015-01-26 17:31:18 +01:00
}
2015-10-14 17:02:25 +02:00
if($action_resolve)
2014-12-12 12:32:04 +01:00
{
2015-10-20 16:33:23 +02:00
my @data;
2016-01-15 02:06:03 +01:00
my @stats_data;
my $stats_snapshots_total = 0;
my $stats_backups_total = 0;
2017-06-25 21:16:08 +02:00
my $stats_backups_total_correlated = 0;
2016-01-15 02:06:03 +01:00
my $stats_backups_total_incomplete = 0;
my $stats_backups_total_orphaned = 0;
2015-10-20 16:33:23 +02:00
if($action_resolve eq "snapshots")
{
2015-10-14 17:02:25 +02:00
#
# print all snapshots and their receive targets
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snapshot_name = config_key($svol, "snapshot_name") // die;
2018-05-14 23:43:13 +02:00
# note: we list all snapshot children within $sroot here, not only the ones matching btrbk naming
2019-04-11 15:56:37 +02:00
foreach my $snapshot (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } get_related_snapshots($sroot, $svol)) {
2015-10-22 14:49:20 +02:00
my $snapshot_data = { type => "snapshot",
2016-03-14 16:39:13 +01:00
status => ($snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
2015-10-22 14:49:20 +02:00
vinfo_prefixed_keys("source", $svol),
vinfo_prefixed_keys("snapshot", $snapshot),
snapshot_name => $snapshot_name,
};
2015-10-20 15:59:16 +02:00
my $found = 0;
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2015-10-14 17:02:25 +02:00
foreach (sort { $a->{SUBVOL_PATH} cmp $b->{SUBVOL_PATH} } get_receive_targets($droot, $snapshot)) {
2015-10-22 14:49:20 +02:00
push @data, { %$snapshot_data,
2015-10-20 15:59:16 +02:00
type => "received",
2019-04-01 16:27:39 +02:00
target_type => $_->{CONFIG}{target_type}, # "send-receive" or "raw"
2015-10-20 15:59:16 +02:00
vinfo_prefixed_keys("target", $_),
};
$found = 1;
2015-10-14 17:02:25 +02:00
}
}
2015-10-22 14:49:20 +02:00
push @data, $snapshot_data unless($found);
2015-10-14 17:02:25 +02:00
}
}
}
}
2016-01-15 02:06:03 +01:00
elsif(($action_resolve eq "backups") || ($action_resolve eq "stats"))
2015-10-20 16:33:23 +02:00
{
2015-10-14 17:02:25 +02:00
#
# print all targets and their corresponding source snapshots
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snapshot_name = config_key($svol, "snapshot_name") // die;
2018-05-14 23:43:13 +02:00
# note: we list all snapshot children within $sroot here, not only the ones matching btrbk naming
2019-04-11 15:56:37 +02:00
my @related_snapshots = get_related_snapshots($sroot, $svol);
2016-01-17 18:56:14 +01:00
my $stats_snapshot_uptodate = "";
2019-04-11 15:56:37 +02:00
foreach my $snapshot (@related_snapshots) {
2016-03-14 16:39:13 +01:00
if($snapshot->{node}{cgen} == $svol->{node}{gen}) {
2016-01-17 18:56:14 +01:00
$stats_snapshot_uptodate = " (up-to-date)";
last;
}
}
2019-04-11 15:56:37 +02:00
push @stats_data, [ $svol->{PRINT}, sprintf("%4u snapshots$stats_snapshot_uptodate", scalar(@related_snapshots)) ];
$stats_snapshots_total += scalar(@related_snapshots); # NOTE: this adds ALL related snaphots under $sroot (not only the ones created by btrbk!)
2016-01-17 18:56:14 +01:00
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2017-06-25 21:16:08 +02:00
my $stats_correlated = 0;
2016-01-15 02:06:03 +01:00
my $stats_orphaned = 0;
my $stats_incomplete = 0;
2017-06-25 20:17:25 +02:00
my $target_up_to_date = 0;
2016-04-03 20:46:29 +02:00
foreach my $target_vol (@{vinfo_subvol_list($droot, sort => 'path')}) {
2015-10-14 17:02:25 +02:00
my $parent_snapshot;
2016-01-13 15:32:22 +01:00
my $incomplete_backup;
2019-04-11 15:56:37 +02:00
foreach (@related_snapshots) {
2016-04-15 01:22:19 +02:00
if($target_vol->{node}{received_uuid} eq '-') {
# incomplete received (garbled) subvolumes have no received_uuid (as of btrfs-progs v4.3.1).
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
$parent_snapshot = undef;
$incomplete_backup = 1;
last;
}
2018-02-07 16:23:46 +01:00
if(_is_correlated($_->{node}, $target_vol->{node})) {
2016-04-15 01:22:19 +02:00
$parent_snapshot = $_;
last;
2015-10-14 17:02:25 +02:00
}
}
if($parent_snapshot) {
2017-06-25 21:16:08 +02:00
$stats_correlated++;
2017-06-25 20:17:25 +02:00
my $up_to_date = ($parent_snapshot->{node}{cgen} == $svol->{node}{gen});
2015-10-20 15:59:16 +02:00
push @data, { type => "received",
2019-04-01 16:27:39 +02:00
target_type => $_->{CONFIG}{target_type}, # "send-receive" or "raw"
2015-10-20 15:59:16 +02:00
vinfo_prefixed_keys("target", $target_vol),
vinfo_prefixed_keys("snapshot", $parent_snapshot),
vinfo_prefixed_keys("source", $svol),
2017-06-25 20:17:25 +02:00
status => $up_to_date ? "up-to-date" : undef,
2015-10-20 15:59:16 +02:00
};
2017-06-25 20:17:25 +02:00
$target_up_to_date ||= $up_to_date;
2015-10-14 17:02:25 +02:00
}
else {
2015-10-20 16:33:23 +02:00
# don't display all subvolumes in $droot, only the ones matching snapshot_name
2018-02-15 16:53:29 +01:00
if(vinfo_is_btrbk_snapshot($target_vol, $snapshot_name)) {
2016-01-15 02:06:03 +01:00
if($incomplete_backup) { $stats_incomplete++; } else { $stats_orphaned++; }
2015-10-20 16:33:23 +02:00
push @data, { type => "received",
2019-04-01 16:27:39 +02:00
target_type => $_->{CONFIG}{target_type}, # "send-receive" or "raw"
2016-04-30 13:01:12 +02:00
# suppress "orphaned" status here (snapshot column is empty anyways)
# status => ($incomplete_backup ? "incomplete" : "orphaned"),
status => ($incomplete_backup ? "incomplete" : undef),
2015-10-20 16:33:23 +02:00
vinfo_prefixed_keys("target", $target_vol),
vinfo_prefixed_keys("source", $svol),
};
}
2015-10-14 17:02:25 +02:00
}
2014-12-13 16:51:30 +01:00
}
2017-06-25 21:16:08 +02:00
my $stats_total = $stats_correlated + $stats_incomplete + $stats_orphaned;
2016-01-15 02:06:03 +01:00
$stats_backups_total += $stats_total;
2017-06-25 21:16:08 +02:00
$stats_backups_total_correlated += $stats_correlated;
2016-01-15 02:06:03 +01:00
$stats_backups_total_incomplete += $stats_incomplete;
$stats_backups_total_orphaned += $stats_orphaned;
my @stats_detail;
2017-06-25 20:17:25 +02:00
push @stats_detail, "up-to-date" if($target_up_to_date);
2017-06-25 21:16:08 +02:00
push @stats_detail, "$stats_correlated correlated" if($stats_correlated);
2016-01-15 02:06:03 +01:00
push @stats_detail, "$stats_incomplete incomplete" if($stats_incomplete);
my $stats_detail_print = join(', ', @stats_detail);
2016-01-17 18:56:14 +01:00
$stats_detail_print = " ($stats_detail_print)" if($stats_detail_print);
push @stats_data, [ "^-- $droot->{PRINT}/$snapshot_name.*", sprintf("%4u backups$stats_detail_print", $stats_total) ];
2014-12-13 16:51:30 +01:00
}
}
}
2015-10-14 17:02:25 +02:00
}
elsif($action_resolve eq "latest")
{
#
# print latest common
#
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2015-10-14 17:02:25 +02:00
my $found = 0;
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
2018-02-15 17:42:41 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2019-04-11 15:56:37 +02:00
my @related_snapshots = sort({ cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } # sort descending
get_related_snapshots($snaproot, $svol, $snapshot_basename));
2016-03-07 21:54:51 +01:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
2019-04-11 15:56:37 +02:00
foreach my $snapshot (@related_snapshots) {
my @receive_targets = get_receive_targets($droot, $snapshot, exact => 1);
2018-02-15 17:42:41 +01:00
if(scalar(@receive_targets)) {
foreach(@receive_targets) {
push @data, { type => "latest_common",
2019-04-01 16:27:39 +02:00
target_type => $_->{CONFIG}{target_type}, # "send-receive" or "raw"
2019-04-11 15:56:37 +02:00
status => ($snapshot->{node}{cgen} == $svol->{node}{gen}) ? "up-to-date" : undef,
2018-02-15 17:42:41 +01:00
vinfo_prefixed_keys("source", $svol),
2019-04-11 15:56:37 +02:00
vinfo_prefixed_keys("snapshot", $snapshot),
2018-02-15 17:42:41 +01:00
vinfo_prefixed_keys("target", $_),
};
}
$found = 1;
last;
}
2015-10-14 17:02:25 +02:00
}
}
2018-02-15 17:42:41 +01:00
if(!$found) {
2019-04-11 15:56:37 +02:00
my $latest_snapshot = $related_snapshots[0];
2015-10-20 15:59:16 +02:00
push @data, { type => "latest_snapshot",
2016-11-12 13:21:33 +01:00
status => ($latest_snapshot && ($latest_snapshot->{node}{cgen} == $svol->{node}{gen})) ? "up-to-date" : undef,
2015-10-22 14:49:20 +02:00
vinfo_prefixed_keys("source", $svol),
vinfo_prefixed_keys("snapshot", $latest_snapshot), # all unset if no $latest_snapshot
};
2015-10-14 17:02:25 +02:00
}
}
2015-03-24 13:13:00 +01:00
}
2015-10-11 15:38:43 +02:00
}
else {
2015-10-14 17:02:25 +02:00
die;
2015-09-23 11:27:36 +02:00
}
2015-10-20 16:33:23 +02:00
2016-01-15 02:06:03 +01:00
if($action_resolve eq "stats") {
2016-03-07 21:54:51 +01:00
print_header(title => "Statistics",
2016-01-15 02:06:03 +01:00
config => $config,
2016-03-07 21:54:51 +01:00
time => $start_time,
2016-04-30 13:01:12 +02:00
legend => [
2017-06-25 20:17:25 +02:00
"up-to-date: latest snapshot/backup is up to date with source subvolume",
2018-02-07 16:23:46 +01:00
"correlated: corresponding (received-from) source snapshot is present",
2016-04-30 13:01:12 +02:00
],
2016-01-15 02:06:03 +01:00
);
print_table(\@stats_data, " ");
print "\n";
my $stats_filter = $config->{CMDLINE_FILTER_LIST} ? join("; ", @{$config->{CMDLINE_FILTER_LIST}}) : "";
my @stats_total_detail;
2017-06-25 21:16:08 +02:00
push @stats_total_detail, "$stats_backups_total_correlated correlated" if($stats_backups_total_correlated);
2016-01-15 02:06:03 +01:00
push @stats_total_detail, "$stats_backups_total_incomplete incomplete" if($stats_backups_total_incomplete);
my $stats_total_detail_print = join(', ', @stats_total_detail);
$stats_total_detail_print = " ($stats_total_detail_print)" if($stats_total_detail_print);
print "Total" . ($stats_filter ? " ($stats_filter)" : "") . ":\n";
my $maxlen = ($stats_snapshots_total > $stats_backups_total) ? length($stats_snapshots_total) : length($stats_backups_total);
printf("%" . $maxlen . "u snapshots\n", $stats_snapshots_total);
printf("%" . $maxlen . "u backups$stats_total_detail_print\n", $stats_backups_total);
}
else {
print_formatted("resolved", \@data);
}
2015-10-20 16:33:23 +02:00
2015-10-14 16:51:39 +02:00
exit exit_status($config);
2014-12-13 13:52:43 +01:00
}
2015-01-03 21:25:46 +01:00
2016-01-14 15:52:33 +01:00
if($action_clean)
{
#
# identify and delete incomplete backups
#
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
init_transaction_log(config_key($config, "transaction_log"),
config_key($config, "transaction_syslog"));
2016-04-06 20:19:12 +02:00
2016-01-14 15:52:33 +01:00
my @out;
2016-03-07 21:54:51 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
my $snapshot_name = config_key($svol, "snapshot_name") // die;
foreach my $droot (vinfo_subsection($svol, 'target')) {
2016-01-14 15:52:33 +01:00
INFO "Cleaning incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
push @out, "$droot->{PRINT}/$snapshot_name.*";
my @delete;
2018-02-15 16:53:29 +01:00
foreach my $target_vol (@{vinfo_subvol_list($droot, btrbk_direct_leaf => $snapshot_name, sort => 'path')}) {
# incomplete received (garbled) subvolumes are not readonly and have no received_uuid (as of btrfs-progs v4.3.1).
2016-01-14 15:52:33 +01:00
# a subvolume in droot matching our naming is considered incomplete if received_uuid is not set!
2016-03-22 19:05:12 +01:00
if($target_vol->{node}{received_uuid} eq '-') {
2016-01-14 15:52:33 +01:00
DEBUG "Found incomplete target subvolume: $target_vol->{PRINT}";
push(@delete, $target_vol);
}
}
2017-09-27 20:23:08 +02:00
my @delete_success = btrfs_subvolume_delete(\@delete, commit => config_key($droot, "btrfs_commit_delete"), type => "delete_garbled");
INFO "Deleted " . scalar(@delete_success) . " incomplete backups in: $droot->{PRINT}/$snapshot_name.*";
$droot->{SUBVOL_DELETED} //= [];
push @{$droot->{SUBVOL_DELETED}}, @delete_success;
push @out, map("--- $_->{PRINT}", @delete_success);
if(scalar(@delete_success) != scalar(@delete)) {
2016-03-07 21:54:51 +01:00
ABORTED($droot, "Failed to delete incomplete target subvolume");
2019-04-17 15:20:18 +02:00
push @out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED_TEXT($droot);
2016-01-14 15:52:33 +01:00
}
push(@out, "<no_action>") unless(scalar(@delete));
push(@out, "");
}
}
}
my $exit_status = exit_status($config);
my $time_elapsed = time - $start_time;
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
action("finished",
status => $exit_status ? "partial" : "success",
duration => $time_elapsed,
message => $exit_status ? "At least one delete operation failed" : undef,
);
close_transaction_log();
#
# print summary
#
unless($quiet)
{
$output_format ||= "custom";
if($output_format eq "custom")
{
2016-03-07 21:54:51 +01:00
print_header(title => "Cleanup Summary",
2016-01-14 15:52:33 +01:00
config => $config,
2016-03-07 21:54:51 +01:00
time => $start_time,
2016-01-14 15:52:33 +01:00
legend => [
"--- deleted subvolume (incomplete backup)",
],
);
print join("\n", @out);
2019-07-15 18:19:33 +02:00
print_footer($config, $exit_status);
2016-01-14 15:52:33 +01:00
}
else
{
# print action log (without transaction start messages)
2017-09-27 19:35:43 +02:00
my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
2016-01-14 15:52:33 +01:00
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
}
}
exit $exit_status;
}
2015-02-08 13:47:31 +01:00
if($action_run)
2014-12-13 13:52:43 +01:00
{
btrbk: add transaction logging to syslog
Add configuration option transaction_syslog, which can be set to a short
name of a syslog facility, like user or local5. Most of the ones besides
localX do not really make sense, but whatever, let the user decide.
The only logging that is relevant for logging to syslog is the logging
generated inside sub action, so it's easy to hijack all messages in
there and also send them to syslog if needed.
All output is done via print_formatted, which expects a file handle.
So, abuse a file handle to a string to be able to change as less code as
needed for this feature.
Since syslog already adds the timestamps for us, I added a syslog
formatting pattern, which is very similar to tlog, omitting the
timestap.
2016-04-22 23:11:00 +02:00
init_transaction_log(config_key($config, "transaction_log"),
config_key($config, "transaction_syslog"));
2016-04-06 20:19:12 +02:00
2017-08-21 13:23:20 +02:00
if($skip_snapshots) {
INFO "Skipping snapshot creation (btrbk resume)";
2015-05-15 20:24:14 +02:00
}
else
2014-12-13 13:52:43 +01:00
{
2015-05-15 20:24:14 +02:00
#
# create snapshots
#
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
2016-03-07 20:47:24 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2019-04-12 20:52:46 +02:00
DEBUG "Evaluating snapshot creation for: $svol->{PRINT}";
2014-12-14 20:35:15 +01:00
2015-05-15 20:24:14 +02:00
# check if we need to create a snapshot
2016-03-07 20:47:24 +01:00
my $snapshot_create = config_key($svol, "snapshot_create");
2015-05-25 14:38:32 +02:00
if(not $snapshot_create) {
DEBUG "Snapshot creation disabled (snapshot_create=no)";
2015-05-15 13:36:18 +02:00
next;
}
2015-05-25 14:38:32 +02:00
elsif($snapshot_create eq "always") {
DEBUG "Snapshot creation enabled (snapshot_create=always)";
}
elsif($snapshot_create eq "onchange") {
2018-02-15 00:17:01 +01:00
# check if latest (btrbk only!) snapshot is up-to-date with source subvolume (by generation)
2019-04-11 15:56:37 +02:00
my $latest = get_latest_related_snapshot($snaproot, $svol, $snapshot_basename);
2015-05-26 18:09:36 +02:00
if($latest) {
2016-03-14 16:39:13 +01:00
if($latest->{node}{cgen} == $svol->{node}{gen}) {
2015-05-26 18:09:36 +02:00
INFO "Snapshot creation skipped: snapshot_create=onchange, snapshot is up-to-date: $latest->{PRINT}";
2016-03-07 21:45:12 +01:00
$svol->{SNAPSHOT_UP_TO_DATE} = $latest;
2015-05-26 18:09:36 +02:00
next;
}
2016-03-14 16:39:13 +01:00
DEBUG "Snapshot creation enabled: snapshot_create=onchange, gen=$svol->{node}{gen} > snapshot_cgen=$latest->{node}{cgen}";
2015-05-26 18:09:36 +02:00
}
else {
DEBUG "Snapshot creation enabled: snapshot_create=onchange, no snapshots found";
2015-05-25 14:38:32 +02:00
}
2015-05-20 20:20:14 +02:00
}
elsif($snapshot_create eq "ondemand") {
2015-05-25 14:45:56 +02:00
# check if at least one target is present
2016-03-07 20:47:24 +01:00
if(scalar vinfo_subsection($svol, 'target')) {
2015-06-02 22:16:33 +02:00
DEBUG "Snapshot creation enabled (snapshot_create=ondemand): at least one target is present";
2015-05-20 20:20:14 +02:00
}
else {
2015-06-02 22:16:33 +02:00
INFO "Snapshot creation skipped: snapshot_create=ondemand, and no target is present for: $svol->{PRINT}";
2015-05-20 20:20:14 +02:00
next;
}
}
else {
die "illegal value for snapshot_create configuration option: $snapshot_create";
}
2015-05-15 13:36:18 +02:00
2015-05-15 20:24:14 +02:00
# find unique snapshot name
2016-04-21 13:27:54 +02:00
my $timestamp = timestamp(\@tm_now, config_key($svol, "timestamp_format"));
2015-05-15 20:24:14 +02:00
my @unconfirmed_target_name;
2018-02-15 00:17:01 +01:00
my @lookup = map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($snaproot)};
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
2019-04-17 15:20:18 +02:00
if(IS_ABORTED($droot)) {
2016-03-07 20:47:24 +01:00
push(@unconfirmed_target_name, $droot);
2015-05-15 20:24:14 +02:00
next;
}
2016-03-10 05:26:43 +01:00
push(@lookup, map { $_->{SUBVOL_PATH} } @{vinfo_subvol_list($droot)});
2015-05-15 20:24:14 +02:00
}
@lookup = grep /^\Q$snapshot_basename.$timestamp\E(_[0-9]+)?$/ ,@lookup;
TRACE "Present snapshot names for \"$svol->{PRINT}\": " . join(', ', @lookup);
@lookup = map { /_([0-9]+)$/ ? $1 : 0 } @lookup;
@lookup = sort { $b <=> $a } @lookup;
my $postfix_counter = $lookup[0] // -1;
$postfix_counter++;
my $snapshot_name = $snapshot_basename . '.' . $timestamp . ($postfix_counter ? "_$postfix_counter" : "");
if(@unconfirmed_target_name) {
2015-09-20 14:25:20 +02:00
INFO "Assuming non-present subvolume \"$snapshot_name\" in skipped targets: " . join(", ", map { "\"$_->{PRINT}\"" } @unconfirmed_target_name);
2015-05-15 20:24:14 +02:00
}
# finally create the snapshot
INFO "Creating subvolume snapshot for: $svol->{PRINT}";
2018-02-15 00:17:01 +01:00
my $snapshot = vinfo_child($snaproot, "$snapshot_name");
2016-04-12 17:50:12 +02:00
if(btrfs_subvolume_snapshot($svol, $snapshot))
{
2018-02-15 00:17:01 +01:00
vinfo_inject_child($snaproot, $snapshot, {
2016-04-12 17:50:12 +02:00
parent_uuid => $svol->{node}{uuid},
received_uuid => '-',
readonly => 1,
FORCE_PRESERVE => 'preserve forced: created just now',
});
2016-03-07 23:53:47 +01:00
$svol->{SNAPSHOT_CREATED} = $snapshot;
2015-05-15 20:24:14 +02:00
}
else {
2018-02-15 00:17:01 +01:00
ABORTED($svol, "Failed to create snapshot: $svol->{PRINT} -> $snapshot->{PRINT}");
2019-04-17 15:20:18 +02:00
WARN "Skipping subvolume section: " . ABORTED_TEXT($svol);
2015-05-15 20:24:14 +02:00
}
2014-12-19 13:31:31 +01:00
}
2014-12-13 15:15:58 +01:00
}
2014-12-13 13:52:43 +01:00
}
2014-12-13 15:15:58 +01:00
#
# create backups
#
2017-08-21 13:23:20 +02:00
if($skip_backups) {
INFO "Skipping backup creation (btrbk snapshot)";
}
else {
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
2017-08-21 13:23:20 +02:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2019-04-11 15:56:37 +02:00
my @related_snapshots = sort({ cmp_date($a->{node}{BTRBK_DATE}, $b->{node}{BTRBK_DATE}) }
get_related_snapshots($snaproot, $svol, $snapshot_basename));
2017-08-21 13:23:20 +02:00
foreach my $droot (vinfo_subsection($svol, 'target')) {
INFO "Checking for missing backups of subvolume \"$svol->{PRINT}\" in \"$droot->{PRINT}/\"";
my @schedule;
my $resume_total = 0;
my $resume_success = 0;
2015-05-15 16:06:36 +02:00
2019-04-11 15:56:37 +02:00
foreach my $snapshot (@related_snapshots)
2017-08-21 13:23:20 +02:00
{
2019-04-11 15:56:37 +02:00
if(get_receive_targets($droot, $snapshot, exact => 1, warn => 1)){
DEBUG "Found correlated target of: $snapshot->{PRINT}";
2016-04-12 19:43:12 +02:00
next;
}
2017-08-21 13:23:20 +02:00
2019-04-11 15:56:37 +02:00
DEBUG "Adding backup candidate: $snapshot->{PRINT}";
push(@schedule, { value => $snapshot,
btrbk_date => $snapshot->{node}{BTRBK_DATE},
2017-08-21 13:23:20 +02:00
# not enforcing resuming of latest snapshot anymore (since v0.23.0)
2019-04-11 15:56:37 +02:00
# preserve => $snapshot->{node}{FORCE_PRESERVE},
2016-04-07 14:34:51 +02:00
});
2015-06-02 22:16:33 +02:00
}
2017-08-21 13:23:20 +02:00
if(scalar @schedule)
2015-06-02 22:16:33 +02:00
{
2017-08-21 13:23:20 +02:00
DEBUG "Checking schedule for backup candidates";
# add all present backups as informative_only: these are needed for correct results of schedule()
2018-02-15 16:53:29 +01:00
foreach my $vol (@{vinfo_subvol_list($droot, btrbk_direct_leaf => $snapshot_basename)}) {
2017-08-21 13:23:20 +02:00
push(@schedule, { informative_only => 1,
value => $vol,
btrbk_date => $vol->{node}{BTRBK_DATE},
});
2016-04-16 19:25:46 +02:00
}
2017-08-21 13:23:20 +02:00
my ($preserve, undef) = schedule(
schedule => \@schedule,
preserve => config_preserve_hash($droot, "target"),
);
my @resume = grep defined, @$preserve; # remove entries with no value from list (target subvolumes)
$resume_total = scalar @resume;
2019-04-11 15:56:37 +02:00
foreach my $snapshot (sort { $a->{node}{cgen} <=> $b->{node}{cgen} } @resume)
2015-04-02 15:53:53 +02:00
{
2017-08-21 13:23:20 +02:00
# Continue gracefully (skip instead of abort) on existing (possibly garbled) target
2019-07-15 18:19:33 +02:00
if(my $err_vol = vinfo_subvol($droot, $snapshot->{NAME})) {
my $err_msg = "Please delete stray subvolumes: \"btrbk clean $droot->{PRINT}\"";
FIX_MANUALLY($droot, $err_msg);
2019-04-11 15:56:37 +02:00
WARN "Target subvolume \"$err_vol->{PRINT}\" exists, but is not a receive target of \"$snapshot->{PRINT}\"";
2019-07-15 18:19:33 +02:00
WARN $err_msg;
2019-04-11 15:56:37 +02:00
WARN "Skipping backup of: $snapshot->{PRINT}";
2017-08-21 13:23:20 +02:00
$droot->{SUBVOL_RECEIVED} //= [];
2019-07-15 18:19:33 +02:00
push(@{$droot->{SUBVOL_RECEIVED}}, { ERROR => 1, received_subvolume => $err_vol });
2017-08-21 13:23:20 +02:00
next;
}
2019-04-11 15:56:37 +02:00
INFO "Creating subvolume backup (send-receive) for: $snapshot->{PRINT}";
2019-04-09 22:09:12 +02:00
my ($clone_src, $clone_src_extra, $target_parent_node);
2019-04-11 15:56:37 +02:00
my $parent = get_best_parent($snapshot, $snaproot, $droot,
2019-04-09 22:15:18 +02:00
strict_related => ((config_key($droot, "incremental") // "") eq "strict"),
2019-04-09 22:09:12 +02:00
clone_src => \$clone_src,
clone_src_extra => \$clone_src_extra,
target_parent_node => \$target_parent_node);
2019-04-11 15:56:37 +02:00
if(macro_send_receive(source => $snapshot,
2017-08-21 13:23:20 +02:00
target => $droot,
2018-02-15 17:42:41 +01:00
parent => $parent, # this is <undef> if no suitable parent found
2019-04-09 22:09:12 +02:00
clone_src => $clone_src,
clone_src_extra => $clone_src_extra,
2018-02-15 17:42:41 +01:00
target_parent_node => $target_parent_node,
2017-08-21 13:23:20 +02:00
))
{
$resume_success++;
}
else {
# note: ABORTED flag is already set by macro_send_receive()
ERROR("Error while resuming backups, aborting");
last;
}
2015-03-31 13:37:56 +02:00
}
2015-06-02 22:16:33 +02:00
}
2015-04-02 16:24:13 +02:00
2017-08-21 13:23:20 +02:00
if($resume_total) {
INFO "Created $resume_success/$resume_total missing backups";
} else {
INFO "No missing backups found";
}
2016-04-12 19:43:12 +02:00
}
2014-12-12 14:05:37 +01:00
}
2014-12-12 12:32:04 +01:00
}
}
2015-01-12 17:56:35 +01:00
2015-01-04 21:26:48 +01:00
2015-01-04 19:30:41 +01:00
#
2015-01-13 12:38:01 +01:00
# remove backups following a preserve daily/weekly/monthly scheme
2015-01-04 19:30:41 +01:00
#
2017-08-21 13:23:20 +02:00
my $schedule_results;
if($preserve_snapshots && $preserve_backups) {
INFO "Preserving all snapshots and backups";
2015-02-28 12:02:28 +01:00
}
else
2015-01-04 19:30:41 +01:00
{
2017-08-21 13:23:20 +02:00
$schedule_results = [];
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume')) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume')) {
2018-02-15 00:17:01 +01:00
my $snaproot = vinfo_snapshot_root($svol);
2016-03-07 20:47:24 +01:00
my $snapshot_basename = config_key($svol, "snapshot_name") // die;
2015-02-28 12:02:28 +01:00
my $target_aborted = 0;
2019-04-11 15:56:37 +02:00
my @related_snapshots = sort({ cmp_date($b->{node}{BTRBK_DATE}, $a->{node}{BTRBK_DATE}) } # sort descending
get_related_snapshots($snaproot, $svol, $snapshot_basename));
2015-05-20 20:20:14 +02:00
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
2019-04-17 15:20:18 +02:00
if(IS_ABORTED($droot)) {
if(IS_ABORTED($droot, "skip_cmdline_")) {
2015-09-20 14:25:20 +02:00
$target_aborted ||= -1;
} else {
$target_aborted = 1;
}
2015-02-28 12:02:28 +01:00
next;
}
2016-04-13 14:47:38 +02:00
2018-10-02 17:22:57 +02:00
# preserve latest common snapshot/backup (for incremental targets)
if(config_key($droot, "incremental")) {
2019-04-11 15:56:37 +02:00
foreach my $snapshot (@related_snapshots) {
my @receive_targets = get_receive_targets($droot, $snapshot, exact => 1);
2018-10-02 17:22:57 +02:00
if(scalar(@receive_targets)) {
2019-04-11 15:56:37 +02:00
DEBUG "Force preserve for latest common snapshot: $snapshot->{PRINT}";
$snapshot->{node}{FORCE_PRESERVE} = 'preserve forced: latest common snapshot';
2018-10-02 17:22:57 +02:00
foreach(@receive_targets) {
DEBUG "Force preserve for latest common target: $_->{PRINT}";
$_->{node}{FORCE_PRESERVE} = 'preserve forced: latest common target';
}
last;
2016-04-13 14:47:38 +02:00
}
2015-09-29 14:07:58 +02:00
}
2015-06-02 22:16:33 +02:00
}
2015-02-28 12:02:28 +01:00
2017-08-21 13:23:20 +02:00
if($preserve_backups) {
INFO "Preserving all backups";
}
else {
#
# delete backups
#
INFO "Cleaning backups of subvolume \"$svol->{PRINT}\": $droot->{PRINT}/$snapshot_basename.*";
unless(macro_delete($droot, "", $snapshot_basename, $droot,
{ preserve => config_preserve_hash($droot, "target"),
results => $schedule_results,
result_hints => { topic => "backup", root_path => $droot->{PATH} },
},
commit => config_key($droot, "btrfs_commit_delete"),
type => "delete_target",
2017-10-02 14:00:09 +02:00
qgroup => { destroy => config_key($droot, "target_qgroup_destroy"),
type => "qgroup_destroy_target" },
2017-08-21 13:23:20 +02:00
))
{
$target_aborted = 1;
}
2015-02-28 12:02:28 +01:00
}
2015-01-16 17:41:57 +01:00
}
2015-01-12 17:56:35 +01:00
#
2015-02-28 12:02:28 +01:00
# delete snapshots
2015-01-12 17:56:35 +01:00
#
2017-08-21 13:23:20 +02:00
if($preserve_snapshots) {
INFO "Preserving all snapshots";
}
elsif($target_aborted) {
2015-09-20 14:25:20 +02:00
if($target_aborted == -1) {
INFO "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target is skipped by command line argument";
} else {
WARN "Skipping cleanup of snapshots for subvolume \"$svol->{PRINT}\", as at least one target aborted earlier";
}
2015-02-28 12:02:28 +01:00
}
2017-08-21 13:23:20 +02:00
else {
2018-02-15 00:17:01 +01:00
INFO "Cleaning snapshots" . ($wipe_snapshots ? " (wipe)" : "") . ": $snaproot->{PRINT}/$snapshot_basename.*";
macro_delete($snaproot, "", $snapshot_basename, $svol,
2017-09-28 14:02:06 +02:00
{ preserve => config_preserve_hash($svol, "snapshot", wipe => $wipe_snapshots),
2017-08-21 13:23:20 +02:00
results => $schedule_results,
2018-02-15 00:17:01 +01:00
result_hints => { topic => "snapshot", root_path => $snaproot->{PATH} },
2017-08-21 13:23:20 +02:00
},
commit => config_key($svol, "btrfs_commit_delete"),
type => "delete_snapshot",
2017-10-02 14:00:09 +02:00
qgroup => { destroy => config_key($svol, "snapshot_qgroup_destroy"),
type => "qgroup_destroy_snapshot" },
2017-08-21 13:23:20 +02:00
);
}
2015-01-04 19:30:41 +01:00
}
}
}
2015-01-13 14:38:44 +01:00
2015-10-14 16:51:39 +02:00
my $exit_status = exit_status($config);
2015-01-17 14:55:46 +01:00
my $time_elapsed = time - $start_time;
2015-01-20 21:07:28 +01:00
INFO "Completed within: ${time_elapsed}s (" . localtime(time) . ")";
2015-10-13 18:24:30 +02:00
action("finished",
2015-10-14 16:51:39 +02:00
status => $exit_status ? "partial" : "success",
2015-10-13 18:24:30 +02:00
duration => $time_elapsed,
2015-10-14 16:51:39 +02:00
message => $exit_status ? "At least one backup task aborted" : undef,
2015-10-13 18:24:30 +02:00
);
close_transaction_log();
2015-01-13 17:51:24 +01:00
2015-10-12 20:46:05 +02:00
2015-01-13 17:51:24 +01:00
unless($quiet)
{
2015-10-12 20:46:05 +02:00
#
# print scheduling results
#
2017-08-21 13:23:20 +02:00
if($print_schedule && $schedule_results) {
2015-10-19 22:10:08 +02:00
my @data = map { { %$_, vinfo_prefixed_keys("", $_->{value}) }; } @$schedule_results;
2015-10-23 21:28:58 +02:00
my @data_snapshot = grep { $_->{topic} eq "snapshot" } @data;
my @data_backup = grep { $_->{topic} eq "backup" } @data;
2015-10-12 20:46:05 +02:00
2015-10-21 21:58:30 +02:00
if(scalar(@data_snapshot)) {
2018-10-31 14:33:07 +01:00
print_formatted("schedule", \@data_snapshot, title => "SNAPSHOT SCHEDULE", paragraph => 1);
2015-10-21 21:58:30 +02:00
}
if(scalar(@data_backup)) {
2018-10-31 14:33:07 +01:00
print_formatted("schedule", \@data_backup, title => "BACKUP SCHEDULE", paragraph => 1);
2015-10-21 21:58:30 +02:00
}
2015-10-12 20:46:05 +02:00
}
#
# print summary
#
2015-10-12 22:26:36 +02:00
$output_format ||= "custom";
if($output_format eq "custom")
2015-01-13 17:51:24 +01:00
{
2015-10-12 22:26:36 +02:00
my @out;
2016-03-07 20:47:24 +01:00
foreach my $sroot (vinfo_subsection($config, 'volume', 1)) {
foreach my $svol (vinfo_subsection($sroot, 'subvolume', 1)) {
2015-10-12 22:26:36 +02:00
my @subvol_out;
2016-03-07 21:45:12 +01:00
if($svol->{SNAPSHOT_UP_TO_DATE}) {
push @subvol_out, "=== $svol->{SNAPSHOT_UP_TO_DATE}->{PRINT}";
2015-10-11 15:38:43 +02:00
}
2016-03-07 21:45:12 +01:00
if($svol->{SNAPSHOT_CREATED}) {
push @subvol_out, "+++ $svol->{SNAPSHOT_CREATED}->{PRINT}";
2015-03-31 19:07:33 +02:00
}
2017-09-27 20:41:51 +02:00
foreach(@{$svol->{SUBVOL_DELETED} // []}) {
2016-03-07 21:45:12 +01:00
push @subvol_out, "--- $_->{PRINT}";
2015-01-13 17:51:24 +01:00
}
2016-03-07 20:47:24 +01:00
foreach my $droot (vinfo_subsection($svol, 'target', 1)) {
2016-03-07 21:45:12 +01:00
foreach(@{$droot->{SUBVOL_RECEIVED} // []}) {
2015-10-12 22:26:36 +02:00
my $create_mode = "***";
$create_mode = ">>>" if($_->{parent});
# substr($create_mode, 0, 1, '%') if($_->{resume});
$create_mode = "!!!" if($_->{ERROR});
push @subvol_out, "$create_mode $_->{received_subvolume}->{PRINT}";
}
2015-03-31 19:07:33 +02:00
2017-09-27 20:41:51 +02:00
foreach(@{$droot->{SUBVOL_DELETED} // []}) {
2016-03-07 21:45:12 +01:00
push @subvol_out, "--- $_->{PRINT}";
2015-10-12 22:26:36 +02:00
}
2019-04-17 15:20:18 +02:00
if(IS_ABORTED($droot, "abort_")) {
push @subvol_out, "!!! Target \"$droot->{PRINT}\" aborted: " . ABORTED_TEXT($droot);
2015-10-12 22:26:36 +02:00
}
}
2019-04-17 15:20:18 +02:00
2019-09-07 13:44:25 +02:00
if(IS_ABORTED($sroot, "abort_")) {
# repeat volume errors in subvolume context
push @subvol_out, "!!! Volume \"$sroot->{PRINT}\" aborted: " . ABORTED_TEXT($sroot);
}
if(IS_ABORTED($svol, "abort_")) {
# don't print "<no_action>" on skip_cmdline or skip_noauto
push @subvol_out, "!!! Aborted: " . ABORTED_TEXT($svol);
}
2019-04-17 15:20:18 +02:00
2019-09-07 13:44:25 +02:00
# print "<no_action>" for subvolume, unless aborted by "skip_"
unless(scalar(@subvol_out) || IS_ABORTED($sroot, "skip_") || IS_ABORTED($svol, "skip_")) {
@subvol_out = "<no_action>";
2015-03-31 19:07:33 +02:00
}
2015-05-15 16:06:36 +02:00
2015-10-12 22:26:36 +02:00
if(@subvol_out) {
push @out, "$svol->{PRINT}", @subvol_out, "";
}
2015-01-13 17:51:24 +01:00
}
}
2015-04-20 18:53:44 +02:00
2019-04-17 16:10:15 +02:00
my @cmdline_options = map { "exclude: $_" } @exclude_cmdline;
2018-05-10 18:58:17 +02:00
push @cmdline_options, "$skip_snapshots: No snapshots created" if($skip_snapshots);
push @cmdline_options, "$skip_backups: No backups created" if($skip_backups);
push @cmdline_options, "$preserve_snapshots: Preserved all snapshots" if($preserve_snapshots);
push @cmdline_options, "$preserve_backups: Preserved all backups" if($preserve_backups);
2017-08-21 13:23:20 +02:00
2015-09-23 11:27:36 +02:00
print_header(title => "Backup Summary",
config => $config,
time => $start_time,
2017-08-21 13:23:20 +02:00
options => \@cmdline_options,
2015-09-23 11:27:36 +02:00
legend => [
"=== up-to-date subvolume (source snapshot)",
"+++ created subvolume (source snapshot)",
"--- deleted subvolume",
"*** received subvolume (non-incremental)",
">>> received subvolume (incremental)",
],
);
print join("\n", @out);
2019-07-15 18:19:33 +02:00
print_footer($config, $exit_status);
2015-01-16 17:41:57 +01:00
}
2015-10-11 15:38:43 +02:00
else
{
2015-10-20 18:23:54 +02:00
# print action log (without transaction start messages)
2017-09-27 19:35:43 +02:00
my @data = grep { $_->{status} !~ /starting$/ } @transaction_log;
2015-10-20 18:23:54 +02:00
print_formatted("transaction", \@data, title => "TRANSACTION LOG");
2015-10-11 15:38:43 +02:00
}
2015-01-13 17:51:24 +01:00
}
2015-09-30 14:00:39 +02:00
2015-10-14 16:51:39 +02:00
exit $exit_status if($exit_status);
2015-01-04 19:30:41 +01:00
}
2014-12-11 18:03:10 +01:00
}
2014-12-14 22:45:23 +01:00
2014-12-11 18:03:10 +01:00
1;