def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() catalog = CloudBackupCatalog(cloud_interface, config.server_name) if config.release: catalog.release_keep(config.backup_id) elif config.status: target = catalog.get_keep_target(config.backup_id) if target: print("Keep: %s" % target) else: print("Keep: nokeep") else: backup_info = catalog.get_backup_info(config.backup_id) if backup_info.status == BackupInfo.DONE: catalog.keep_backup(config.backup_id, config.target) else: logging.error( "Cannot add keep to backup %s because it has status %s. " "Only backups with status DONE can be kept.", config.backup_id, backup_info.status, ) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud keep exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Validate the destination directory before starting recovery if os.path.exists(config.recovery_dir) and os.listdir(config.recovery_dir): logging.error( "Destination %s already exists and it is not empty", config.recovery_dir ) raise OperationErrorExit() try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): downloader = CloudBackupDownloader( cloud_interface=cloud_interface, server_name=config.server_name ) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() downloader.download_backup( config.backup_id, config.recovery_dir, tablespace_map(config.tablespace), ) except KeyboardInterrupt as exc: logging.error("Barman cloud restore was interrupted by the user") logging.debug("Exception details:", exc_info=exc) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud restore exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Validate the WAL file name before downloading it if not is_any_xlog_file(config.wal_name): logging.error("%s is an invalid name for a WAL file" % config.wal_name) raise CLIErrorExit() try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): downloader = CloudWalDownloader(cloud_interface=cloud_interface, server_name=config.server_name) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() downloader.download_wal(config.wal_name, config.wal_dest) except Exception as exc: logging.error("Barman cloud WAL restore exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) if not cloud_interface.test_connectivity(): # Deliberately raise an error if we cannot connect raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: # If the bucket does not exist then the check should pass return catalog = CloudBackupCatalog(cloud_interface, config.server_name) wals = list(catalog.get_wal_paths().keys()) check_archive_usable( wals, timeline=config.timeline, ) except WalArchiveContentError as err: logging.error( "WAL archive check failed for server %s: %s", config.server_name, force_str(err), ) raise OperationErrorExit() except Exception as exc: logging.error("Barman cloud WAL archive check exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def _delete_backup( cloud_interface, catalog, backup_id, dry_run=True, skip_wal_cleanup_if_standalone=True, ): backup_info = catalog.get_backup_info(backup_id) if not backup_info: logging.warning("Backup %s does not exist", backup_id) return objects_to_delete = _get_files_for_backup(catalog, backup_info) backup_info_path = os.path.join(catalog.prefix, backup_info.backup_id, "backup.info") logging.debug("Will delete backup.info file at %s" % backup_info_path) if not dry_run: try: cloud_interface.delete_objects(objects_to_delete) # Do not try to delete backup.info until we have successfully deleted # everything else so that it is possible to retry the operation should # we fail to delete any backup file cloud_interface.delete_objects([backup_info_path]) except Exception as exc: logging.error("Could not delete backup %s: %s", backup_id, force_str(exc)) raise OperationErrorExit() else: print("Skipping deletion of objects %s due to --dry-run option" % (objects_to_delete + [backup_info_path])) _remove_wals_for_backup(cloud_interface, catalog, backup_info, dry_run, skip_wal_cleanup_if_standalone) # It is important that the backup is removed from the catalog after cleaning # up the WALs because the code in _remove_wals_for_backup depends on the # deleted backup existing in the backup catalog catalog.remove_backup_from_cache(backup_id)
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): catalog = CloudBackupCatalog(cloud_interface=cloud_interface, server_name=config.server_name) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() backup_list = catalog.get_backup_list() # Output if config.format == "console": COLUMNS = "{:<20}{:<25}{:<30}{:<16}" print( COLUMNS.format("Backup ID", "End Time", "Begin Wal", "Archival Status")) for backup_id in sorted(backup_list): item = backup_list[backup_id] if item and item.status == BackupInfo.DONE: keep_target = catalog.get_keep_target(item.backup_id) keep_status = (keep_target and "KEEP:%s" % keep_target.upper() or "") print( COLUMNS.format( item.backup_id, item.end_time.strftime("%Y-%m-%d %H:%M:%S"), item.begin_wal, keep_status, )) else: print( json.dumps({ "backups_list": [ backup_list[backup_id].to_json() for backup_id in sorted(backup_list) ] })) except Exception as exc: logging.error("Barman cloud backup list exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise OperationErrorExit() catalog = CloudBackupCatalog(cloud_interface=cloud_interface, server_name=config.server_name) # Call catalog.get_backup_list now so we know we can read the whole catalog # (the results are cached so this does not result in extra calls to cloud # storage) catalog.get_backup_list() if len(catalog.unreadable_backups) > 0: logging.error( "Cannot read the following backups: %s\n" "Unsafe to proceed with deletion due to failure reading backup catalog" % catalog.unreadable_backups) raise OperationErrorExit() if config.backup_id: # Because we only care about one backup, skip the annotation cache # because it is only helpful when dealing with multiple backups if catalog.should_keep_backup(config.backup_id, use_cache=False): logging.error( "Skipping delete of backup %s for server %s " "as it has a current keep request. If you really " "want to delete this backup please remove the keep " "and try again.", config.backup_id, config.server_name, ) raise OperationErrorExit() _delete_backup(cloud_interface, catalog, config.backup_id, config.dry_run) elif config.retention_policy: try: retention_policy = RetentionPolicyFactory.create( "retention_policy", config.retention_policy, server_name=config.server_name, catalog=catalog, ) except InvalidRetentionPolicy as exc: logging.error( "Could not create retention policy %s: %s", config.retention_policy, force_str(exc), ) raise CLIErrorExit() # Sort to ensure that we delete the backups in ascending order, that is # from oldest to newest. This ensures that the relevant WALs will be cleaned # up after each backup is deleted. backups_to_delete = sorted([ backup_id for backup_id, status in retention_policy.report().items() if status == "OBSOLETE" ]) for backup_id in backups_to_delete: _delete_backup( cloud_interface, catalog, backup_id, config.dry_run, skip_wal_cleanup_if_standalone=False, ) except Exception as exc: logging.error("Barman cloud backup delete exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) tempdir = tempfile.mkdtemp(prefix="barman-cloud-backup-") try: # Create any temporary file in the `tempdir` subdirectory tempfile.tempdir = tempdir cloud_interface = get_cloud_interface(config) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) with closing(cloud_interface): # TODO: Should the setup be optional? cloud_interface.setup_bucket() # Perform the backup uploader_kwargs = { "server_name": config.server_name, "compression": config.compression, "max_archive_size": config.max_archive_size, "cloud_interface": cloud_interface, } if __is_hook_script(): if "BARMAN_BACKUP_DIR" not in os.environ: raise BarmanException( "BARMAN_BACKUP_DIR environment variable not set") if "BARMAN_BACKUP_ID" not in os.environ: raise BarmanException( "BARMAN_BACKUP_ID environment variable not set") if os.getenv("BARMAN_STATUS") != "DONE": raise UnrecoverableHookScriptError( "backup in '%s' has status '%s' (status should be: DONE)" % (os.getenv("BARMAN_BACKUP_DIR"), os.getenv("BARMAN_STATUS"))) uploader = CloudBackupUploaderBarman( backup_dir=os.getenv("BARMAN_BACKUP_DIR"), backup_id=os.getenv("BARMAN_BACKUP_ID"), **uploader_kwargs) uploader.backup() else: conninfo = build_conninfo(config) postgres = PostgreSQLConnection( conninfo, config.immediate_checkpoint, application_name="barman_cloud_backup", ) try: postgres.connect() except PostgresConnectionError as exc: logging.error("Cannot connect to postgres: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise OperationErrorExit() with closing(postgres): uploader = CloudBackupUploaderPostgres(postgres=postgres, **uploader_kwargs) uploader.backup() except KeyboardInterrupt as exc: logging.error("Barman cloud backup was interrupted by the user") logging.debug("Exception details:", exc_info=exc) raise OperationErrorExit() except UnrecoverableHookScriptError as exc: logging.error("Barman cloud backup exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise SystemExit(63) except Exception as exc: logging.error("Barman cloud backup exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit() finally: # Remove the temporary directory and all the contained files rmtree(tempdir, ignore_errors=True)
def download_wal(self, wal_name, wal_dest): """ Download a WAL file from cloud storage :param str wal_name: Name of the WAL file :param str wal_dest: Full path of the destination WAL file """ # Correctly format the source path on s3 source_dir = os.path.join(self.cloud_interface.path, self.server_name, "wals", hash_dir(wal_name)) # Add a path separator if needed if not source_dir.endswith(os.path.sep): source_dir += os.path.sep wal_path = os.path.join(source_dir, wal_name) remote_name = None # Automatically detect compression based on the file extension compression = None for item in self.cloud_interface.list_bucket(source_dir): # perfect match (uncompressed file) if item == wal_path: remote_name = item # look for compressed files or .partial files elif item.startswith(wal_path): # Detect compression basename = item for e, c in ALLOWED_COMPRESSIONS.items(): if item[-len(e):] == e: # Strip extension basename = basename[:-len(e)] compression = c break # Check basename is a known xlog file (.partial?) if not is_any_xlog_file(basename): logging.warning("Unknown WAL file: %s", item) continue # Exclude backup informative files (not needed in recovery) elif is_backup_file(basename): logging.info("Skipping backup file: %s", item) continue # Found candidate remote_name = item logging.info( "Found WAL %s for server %s as %s", wal_name, self.server_name, remote_name, ) break if not remote_name: logging.info("WAL file %s for server %s does not exists", wal_name, self.server_name) raise OperationErrorExit() if compression and sys.version_info < (3, 0, 0): raise BarmanException( "Compressed WALs cannot be restored with Python 2.x - " "please upgrade to a supported version of Python 3") # Download the file logging.debug( "Downloading %s to %s (%s)", remote_name, wal_dest, "decompressing " + compression if compression else "no compression", ) self.cloud_interface.download_file(remote_name, wal_dest, compression)
def download_backup(self, backup_id, destination_dir, tablespaces): """ Download a backup from cloud storage :param str backup_id: The backup id to restore :param str destination_dir: Path to the destination directory """ backup_info = self.catalog.get_backup_info(backup_id) if not backup_info: logging.error( "Backup %s for server %s does not exists", backup_id, self.server_name ) raise OperationErrorExit() backup_files = self.catalog.get_backup_files(backup_info) # We must download and restore a bunch of .tar files that contain PGDATA # and each tablespace. First, we determine a target directory to extract # each tar file into and record these in copy_jobs. For each tablespace, # the location may be overridden by `--tablespace name:/new/location` on # the command-line; and we must also add an entry to link_jobs to create # a symlink from $PGDATA/pg_tblspc/oid to the correct location after the # downloads. copy_jobs = [] link_jobs = [] for oid in backup_files: file_info = backup_files[oid] # PGDATA is restored where requested (destination_dir) if oid is None: target_dir = destination_dir else: for tblspc in backup_info.tablespaces: if oid == tblspc.oid: target_dir = tblspc.location if tblspc.name in tablespaces: target_dir = os.path.realpath(tablespaces[tblspc.name]) logging.debug( "Tablespace %s (oid=%s) will be located at %s", tblspc.name, oid, target_dir, ) link_jobs.append( ["%s/pg_tblspc/%s" % (destination_dir, oid), target_dir] ) break else: raise AssertionError( "The backup file oid '%s' must be present " "in backupinfo.tablespaces list" ) # Validate the destination directory before starting recovery if os.path.exists(target_dir) and os.listdir(target_dir): logging.error( "Destination %s already exists and it is not empty", target_dir ) raise OperationErrorExit() copy_jobs.append([file_info, target_dir]) for additional_file in file_info.additional_files: copy_jobs.append([additional_file, target_dir]) # Now it's time to download the files for file_info, target_dir in copy_jobs: # Download the file logging.debug( "Extracting %s to %s (%s)", file_info.path, target_dir, "decompressing " + file_info.compression if file_info.compression else "no compression", ) self.cloud_interface.extract_tar(file_info.path, target_dir) for link, target in link_jobs: os.symlink(target, link) # If we did not restore the pg_wal directory from one of the uploaded # backup files, we must recreate it here. (If pg_wal was originally a # symlink, it would not have been uploaded.) wal_path = os.path.join(destination_dir, backup_info.wal_directory()) if not os.path.exists(wal_path): os.mkdir(wal_path)