def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): catalog = CloudBackupCatalog(cloud_interface=cloud_interface, server_name=config.server_name) if not cloud_interface.test_connectivity(): raise SystemExit(1) # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise SystemExit(1) backup_list = catalog.get_backup_list() # Output if config.format == "console": COLUMNS = "{:<20}{:<25}{:<30}" print(COLUMNS.format("Backup ID", "End Time", "Begin Wal")) for backup_id in sorted(backup_list): item = backup_list[backup_id] if item and item.status == BackupInfo.DONE: print( COLUMNS.format( item.backup_id, item.end_time.strftime("%Y-%m-%d %H:%M:%S"), item.begin_wal, )) else: print( json.dumps({ "backups_list": [ backup_list[backup_id].to_json() for backup_id in sorted(backup_list) ] })) except Exception as exc: logging.error("Barman cloud backup list exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise SystemExit(1)
def __init__(self, cloud_interface, server_name): """ Object responsible for handling interactions with cloud storage :param CloudInterface cloud_interface: The interface to use to upload the backup :param str server_name: The name of the server as configured in Barman """ self.cloud_interface = cloud_interface self.server_name = server_name self.catalog = CloudBackupCatalog(cloud_interface, server_name)
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() catalog = CloudBackupCatalog(cloud_interface, config.server_name) if config.release: catalog.release_keep(config.backup_id) elif config.status: target = catalog.get_keep_target(config.backup_id) if target: print("Keep: %s" % target) else: print("Keep: nokeep") else: backup_info = catalog.get_backup_info(config.backup_id) if backup_info.status == BackupInfo.DONE: catalog.keep_backup(config.backup_id, config.target) else: logging.error( "Cannot add keep to backup %s because it has status %s. " "Only backups with status DONE can be kept.", config.backup_id, backup_info.status, ) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud keep exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) if not cloud_interface.test_connectivity(): # Deliberately raise an error if we cannot connect raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: # If the bucket does not exist then the check should pass return catalog = CloudBackupCatalog(cloud_interface, config.server_name) wals = list(catalog.get_wal_paths().keys()) check_archive_usable( wals, timeline=config.timeline, ) except WalArchiveContentError as err: logging.error( "WAL archive check failed for server %s: %s", config.server_name, force_str(err), ) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud WAL archive check exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise SystemExit(1) # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise SystemExit(1) catalog = CloudBackupCatalog( cloud_interface=cloud_interface, server_name=config.server_name ) # Call catalog.get_backup_list now so we know we can read the whole catalog # (the results are cached so this does not result in extra calls to cloud # storage) catalog.get_backup_list() if len(catalog.unreadable_backups) > 0: logging.error( "Cannot read the following backups: %s\n" "Unsafe to proceed with deletion due to failure reading backup catalog" % catalog.unreadable_backups ) raise SystemExit(1) if config.backup_id: # Because we only care about one backup, skip the annotation cache # because it is only helpful when dealing with multiple backups if catalog.should_keep_backup(config.backup_id, use_cache=False): logging.error( "Skipping delete of backup %s for server %s " "as it has a current keep request. If you really " "want to delete this backup please remove the keep " "and try again.", config.backup_id, config.server_name, ) raise SystemExit(1) _delete_backup( cloud_interface, catalog, config.backup_id, config.dry_run ) elif config.retention_policy: retention_policy = RetentionPolicyFactory.create( "retention_policy", config.retention_policy, server_name=config.server_name, catalog=catalog, ) # Sort to ensure that we delete the backups in ascending order, that is # from oldest to newest. This ensures that the relevant WALs will be cleaned # up after each backup is deleted. backups_to_delete = sorted( [ backup_id for backup_id, status in retention_policy.report().items() if status == "OBSOLETE" ] ) for backup_id in backups_to_delete: _delete_backup( cloud_interface, catalog, backup_id, config.dry_run, skip_wal_cleanup_if_standalone=False, ) except Exception as exc: logging.error("Barman cloud backup delete exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise SystemExit(1)
class CloudBackupDownloader(object): """ Cloud storage download client """ def __init__(self, cloud_interface, server_name): """ Object responsible for handling interactions with cloud storage :param CloudInterface cloud_interface: The interface to use to upload the backup :param str server_name: The name of the server as configured in Barman """ self.cloud_interface = cloud_interface self.server_name = server_name self.catalog = CloudBackupCatalog(cloud_interface, server_name) def download_backup(self, backup_id, destination_dir, tablespaces): """ Download a backup from cloud storage :param str backup_id: The backup id to restore :param str destination_dir: Path to the destination directory """ backup_info = self.catalog.get_backup_info(backup_id) if not backup_info: logging.error("Backup %s for server %s does not exists", backup_id, self.server_name) raise SystemExit(1) backup_files = self.catalog.get_backup_files(backup_info) # We must download and restore a bunch of .tar files that contain PGDATA # and each tablespace. First, we determine a target directory to extract # each tar file into and record these in copy_jobs. For each tablespace, # the location may be overriden by `--tablespace name:/new/location` on # the command-line; and we must also add an entry to link_jobs to create # a symlink from $PGDATA/pg_tblspc/oid to the correct location after the # downloads. copy_jobs = [] link_jobs = [] for oid in backup_files: file_info = backup_files[oid] # PGDATA is restored where requested (destination_dir) if oid is None: target_dir = destination_dir else: for tblspc in backup_info.tablespaces: if oid == tblspc.oid: target_dir = tblspc.location if tblspc.name in tablespaces: target_dir = os.path.realpath( tablespaces[tblspc.name]) logging.debug( "Tablespace %s (oid=%s) will be located at %s", tblspc.name, oid, target_dir, ) link_jobs.append([ "%s/pg_tblspc/%s" % (destination_dir, oid), target_dir ]) break else: raise AssertionError( "The backup file oid '%s' must be present " "in backupinfo.tablespaces list") # Validate the destination directory before starting recovery if os.path.exists(target_dir) and os.listdir(target_dir): logging.error( "Destination %s already exists and it is not empty", target_dir) raise SystemExit(1) copy_jobs.append([file_info, target_dir]) for additional_file in file_info.additional_files: copy_jobs.append([additional_file, target_dir]) # Now it's time to download the files for file_info, target_dir in copy_jobs: # Download the file logging.debug( "Extracting %s to %s (%s)", file_info.path, target_dir, "decompressing " + file_info.compression if file_info.compression else "no compression", ) self.cloud_interface.extract_tar(file_info.path, target_dir) for link, target in link_jobs: os.symlink(target, link) # If we did not restore the pg_wal directory from one of the uploaded # backup files, we must recreate it here. (If pg_wal was originally a # symlink, it would not have been uploaded.) wal_path = os.path.join(destination_dir, backup_info.wal_directory()) if not os.path.exists(wal_path): os.mkdir(wal_path)