def compress_directly_to_a_file(self, proc, basebackup_path): rsa_public_key = None encryption_key_id = self.config["backup_sites"][self.site].get("encryption_key_id", None) if encryption_key_id: rsa_public_key = self.config["backup_sites"][self.site]["encryption_keys"][encryption_key_id]["public"] c = Compressor() compression_algorithm = self.config.get("compression", {}).get("algorithm", "snappy") self.log.debug("Compressing basebackup directly to file: %r", basebackup_path) original_input_size, compressed_file_size = c.compress_filepath( fileobj=proc.stdout, targetfilepath=basebackup_path, compression_algorithm=compression_algorithm, rsa_public_key=rsa_public_key) return original_input_size, compressed_file_size, {"compression-algorithm": compression_algorithm, "encryption-key-id": encryption_key_id}
def compress_directly_to_a_file(self, proc, basebackup_path): rsa_public_key = None encryption_key_id = self.config["backup_sites"][self.site]["encryption_key_id"] if encryption_key_id: rsa_public_key = self.config["backup_sites"][self.site]["encryption_keys"][encryption_key_id]["public"] c = Compressor() compression_algorithm = self.config["compression"]["algorithm"] self.log.debug("Compressing basebackup directly to file: %r", basebackup_path) set_stream_nonblocking(proc.stderr) original_input_size, compressed_file_size = c.compress_filepath( fileobj=proc.stdout, stderr=proc.stderr, compressed_filepath=basebackup_path, compression_algorithm=compression_algorithm, rsa_public_key=rsa_public_key) metadata = { "compression-algorithm": compression_algorithm, "encryption-key-id": encryption_key_id, } return original_input_size, compressed_file_size, metadata
def compress_directly_to_a_file(self, proc, basebackup_path): rsa_public_key = None encryption_key_id = self.config["backup_sites"][ self.site]["encryption_key_id"] if encryption_key_id: rsa_public_key = self.config["backup_sites"][ self.site]["encryption_keys"][encryption_key_id]["public"] c = Compressor() compression_algorithm = self.config["compression"]["algorithm"] self.log.debug("Compressing basebackup directly to file: %r", basebackup_path) set_stream_nonblocking(proc.stderr) original_input_size, compressed_file_size = c.compress_filepath( fileobj=proc.stdout, stderr=proc.stderr, compressed_filepath=basebackup_path, compression_algorithm=compression_algorithm, rsa_public_key=rsa_public_key) metadata = { "compression-algorithm": compression_algorithm, "encryption-key-id": encryption_key_id, } return original_input_size, compressed_file_size, metadata
def _get_basebackup(self, pgdata, basebackup, site, primary_conninfo=None, recovery_end_command=None, recovery_target_action=None, recovery_target_name=None, recovery_target_time=None, recovery_target_xid=None, restore_to_master=None, overwrite=False): targets = [recovery_target_name, recovery_target_time, recovery_target_xid] if sum(0 if flag is None else 1 for flag in targets) > 1: raise RestoreError("Specify at most one of recovery_target_name, " "recovery_target_time or recovery_target_xid") # If basebackup that we want it set as latest, figure out which one it is if recovery_target_time: try: recovery_target_time = dateutil.parser.parse(recovery_target_time) except ValueError as ex: raise RestoreError("recovery_target_time {!r}: {}".format(recovery_target_time, ex)) basebackup = self._find_nearest_basebackup(recovery_target_time) elif basebackup == "latest": basebackup = self._find_nearest_basebackup() if os.path.exists(pgdata): if overwrite: shutil.rmtree(pgdata) else: raise RestoreError("Target directory '{}' exists and --overwrite not specified, aborting.".format(pgdata)) os.makedirs(pgdata) os.chmod(pgdata, 0o700) tmp = tempfile.TemporaryFile(dir=self.config["backup_location"], prefix="basebackup.", suffix=".pghoard") metadata = self.storage.get_basebackup_file_to_fileobj(basebackup, tmp) rsa_private_key = None key_id = metadata.get("encryption-key-id") if key_id: site_keys = self.config["backup_sites"][site]["encryption_keys"] rsa_private_key = site_keys[key_id]["private"] c = Compressor() tmp = c.decompress_from_fileobj_to_fileobj(tmp, metadata, rsa_private_key) tar = tarfile.open(fileobj=tmp, mode="r|") # "r|" prevents seek()ing tar.extractall(pgdata) tar.close() create_recovery_conf( dirpath=pgdata, site=site, primary_conninfo=primary_conninfo, recovery_end_command=recovery_end_command, recovery_target_action=recovery_target_action, recovery_target_name=recovery_target_name, recovery_target_time=recovery_target_time, recovery_target_xid=recovery_target_xid, restore_to_master=restore_to_master, ) print("Basebackup complete.") print("You can start PostgreSQL by running pg_ctl -D %s start" % pgdata) print("On systemd based systems you can run systemctl start postgresql") print("On SYSV Init based systems you can run /etc/init.d/postgresql start")
def _get_basebackup(self, pgdata, basebackup, site, primary_conninfo=None, recovery_end_command=None, recovery_target_action=None, recovery_target_name=None, recovery_target_time=None, recovery_target_xid=None, restore_to_master=None, overwrite=False): targets = [ recovery_target_name, recovery_target_time, recovery_target_xid ] if sum(0 if flag is None else 1 for flag in targets) > 1: raise RestoreError("Specify at most one of recovery_target_name, " "recovery_target_time or recovery_target_xid") # If basebackup that we want it set as latest, figure out which one it is if recovery_target_time: try: recovery_target_time = dateutil.parser.parse( recovery_target_time) except (TypeError, ValueError) as ex: raise RestoreError("recovery_target_time {!r}: {}".format( recovery_target_time, ex)) basebackup = self._find_nearest_basebackup(recovery_target_time) elif basebackup == "latest": basebackup = self._find_nearest_basebackup() if os.path.exists(pgdata): if overwrite: shutil.rmtree(pgdata) else: raise RestoreError( "Target directory '{}' exists and --overwrite not specified, aborting." .format(pgdata)) os.makedirs(pgdata) os.chmod(pgdata, 0o700) tmp = tempfile.TemporaryFile(dir=self.config["backup_location"], prefix="basebackup.", suffix=".pghoard") metadata = self.storage.get_basebackup_file_to_fileobj(basebackup, tmp) rsa_private_key = None key_id = metadata.get("encryption-key-id") if key_id: site_keys = self.config["backup_sites"][site]["encryption_keys"] rsa_private_key = site_keys[key_id]["private"] c = Compressor() tmp = c.decompress_from_fileobj_to_fileobj(tmp, metadata, rsa_private_key) tar = tarfile.open(fileobj=tmp, mode="r|") # "r|" prevents seek()ing tar.extractall(pgdata) tar.close() create_recovery_conf( dirpath=pgdata, site=site, port=self.config["http_port"], primary_conninfo=primary_conninfo, recovery_end_command=recovery_end_command, recovery_target_action=recovery_target_action, recovery_target_name=recovery_target_name, recovery_target_time=recovery_target_time, recovery_target_xid=recovery_target_xid, restore_to_master=restore_to_master, ) print("Basebackup complete.") print("You can start PostgreSQL by running pg_ctl -D %s start" % pgdata) print( "On systemd based systems you can run systemctl start postgresql") print( "On SYSV Init based systems you can run /etc/init.d/postgresql start" )