Exemplo n.º 1
0
    def delete_remote_basebackup(self, site, basebackup, metadata):
        start_time = time.monotonic()
        storage = self.site_transfers.get(site)
        main_backup_key = os.path.join(self.config["backup_sites"][site]["prefix"], "basebackup", basebackup)
        basebackup_data_files = [main_backup_key]

        if metadata.get("format") == "pghoard-bb-v2":
            bmeta_compressed = storage.get_contents_to_string(main_backup_key)[0]
            with rohmufile.file_reader(fileobj=io.BytesIO(bmeta_compressed), metadata=metadata,
                                       key_lookup=config.key_lookup_for_site(self.config, site)) as input_obj:
                bmeta = extract_pghoard_bb_v2_metadata(input_obj)
                self.log.debug("PGHoard chunk metadata: %r", bmeta)
                for chunk in bmeta["chunks"]:
                    basebackup_data_files.append(os.path.join(
                        self.config["backup_sites"][site]["prefix"],
                        "basebackup_chunk",
                        chunk["chunk_filename"],
                    ))

        self.log.debug("Deleting basebackup datafiles: %r", ', '.join(basebackup_data_files))
        for obj_key in basebackup_data_files:
            try:
                storage.delete_key(obj_key)
            except FileNotFoundFromStorageError:
                self.log.info("Tried to delete non-existent basebackup %r", obj_key)
            except Exception as ex:  # FIXME: don't catch all exceptions; pylint: disable=broad-except
                self.log.exception("Problem deleting: %r", obj_key)
                self.metrics.unexpected_exception(ex, where="delete_remote_basebackup")
        self.log.info("Deleted basebackup datafiles: %r, took: %.2fs",
                      ', '.join(basebackup_data_files), time.monotonic() - start_time)
Exemplo n.º 2
0
    def delete_remote_basebackup(self, site, basebackup, metadata):
        start_time = time.monotonic()
        storage = self.site_transfers.get(site)
        main_backup_key = os.path.join(self.config["backup_sites"][site]["prefix"], "basebackup", basebackup)
        basebackup_data_files = [main_backup_key]

        if metadata.get("format") == "pghoard-bb-v2":
            bmeta_compressed = storage.get_contents_to_string(main_backup_key)[0]
            with rohmufile.file_reader(fileobj=io.BytesIO(bmeta_compressed), metadata=metadata,
                                       key_lookup=config.key_lookup_for_site(self.config, site)) as input_obj:
                bmeta = extract_pghoard_bb_v2_metadata(input_obj)
                self.log.debug("PGHoard chunk metadata: %r", bmeta)
                for chunk in bmeta["chunks"]:
                    basebackup_data_files.append(os.path.join(
                        self.config["backup_sites"][site]["prefix"],
                        "basebackup_chunk",
                        chunk["chunk_filename"],
                    ))

        self.log.debug("Deleting basebackup datafiles: %r", ', '.join(basebackup_data_files))
        for obj_key in basebackup_data_files:
            try:
                storage.delete_key(obj_key)
            except FileNotFoundFromStorageError:
                self.log.info("Tried to delete non-existent basebackup %r", obj_key)
            except Exception as ex:  # FIXME: don't catch all exceptions; pylint: disable=broad-except
                self.log.exception("Problem deleting: %r", obj_key)
                self.metrics.unexpected_exception(ex, where="delete_remote_basebackup")
        self.log.info("Deleted basebackup datafiles: %r, took: %.2fs",
                      ', '.join(basebackup_data_files), time.monotonic() - start_time)
Exemplo n.º 3
0
    def handle_decompression_event(self, event):
        with open(event["local_path"], "wb") as output_obj:
            rohmufile.read_file(
                input_obj=BytesIO(event["blob"]),
                output_obj=output_obj,
                metadata=event.get("metadata"),
                key_lookup=config.key_lookup_for_site(self.config, event["site"]),
                log_func=self.log.debug,
            )

        if "callback_queue" in event:
            event["callback_queue"].put({"success": True, "opaque": event.get("opaque")})
Exemplo n.º 4
0
    def handle_decompression_event(self, event):
        with open(event["local_path"], "wb") as output_obj:
            rohmufile.read_file(
                input_obj=BytesIO(event["blob"]),
                output_obj=output_obj,
                metadata=event.get("metadata"),
                key_lookup=config.key_lookup_for_site(self.config, event["site"]),
                log_func=self.log.debug,
            )

        if "callback_queue" in event:
            event["callback_queue"].put({"success": True, "opaque": event.get("opaque")})
Exemplo n.º 5
0
 def fetch(self, site, key, target_path):
     try:
         lookup = key_lookup_for_site(self.config, site)
         data, metadata = self.transfer.get_contents_to_string(key)
         if isinstance(data, str):
             data = data.encode("latin1")
         file_size = len(data)
         with open(target_path, "wb") as target_file:
             output = create_sink_pipeline(
                 output=target_file, file_size=file_size, metadata=metadata, key_lookup=lookup, throttle_time=0)
             output.write(data)
         return file_size, metadata
     except Exception:
         if os.path.isfile(target_path):
             os.unlink(target_path)
         raise
Exemplo n.º 6
0
 def fetch(self, site, key, target_path):
     try:
         lookup = key_lookup_for_site(self.config, site)
         data, metadata = self.transfer.get_contents_to_string(key)
         if isinstance(data, str):
             data = data.encode("latin1")
         file_size = len(data)
         with open(target_path, "wb") as target_file:
             output = create_sink_pipeline(
                 output=target_file, file_size=file_size, metadata=metadata, key_lookup=lookup, throttle_time=0
             )
             output.write(data)
         return file_size, metadata
     except Exception:
         if os.path.isfile(target_path):
             os.unlink(target_path)
         raise
Exemplo n.º 7
0
    def _get_delta_basebackup_files(self, site, storage, metadata,
                                    basebackup_name_to_delete,
                                    backups_to_keep):
        all_hexdigests = set()
        keep_hexdigests = set()

        basebackup_data_files = list()
        for backup_name in [basebackup_name_to_delete
                            ] + [back["name"] for back in backups_to_keep]:
            delta_backup_key = os.path.join(self._get_site_prefix(site),
                                            "basebackup", backup_name)
            bmeta_compressed = storage.get_contents_to_string(
                delta_backup_key)[0]
            with rohmufile.file_reader(fileobj=io.BytesIO(bmeta_compressed),
                                       metadata=metadata,
                                       key_lookup=config.key_lookup_for_site(
                                           self.config, site)) as input_obj:
                meta = extract_pghoard_delta_v1_metadata(input_obj)

            manifest = meta["manifest"]
            snapshot_result = manifest["snapshot_result"]
            backup_state = snapshot_result["state"]
            files = backup_state["files"]

            backup_hexdigests = set(delta_file["hexdigest"]
                                    for delta_file in files
                                    if delta_file["hexdigest"])
            all_hexdigests |= backup_hexdigests

            if backup_name != basebackup_name_to_delete:
                # Keep data file in case if there is still a reference from other backups
                keep_hexdigests |= backup_hexdigests

        # Remove unreferenced files
        extra_hexdigests = set(all_hexdigests).difference(keep_hexdigests)
        for hexdigest in extra_hexdigests:
            basebackup_data_files.append(
                os.path.join(self._get_site_prefix(site), "basebackup_delta",
                             hexdigest))

        return basebackup_data_files
Exemplo n.º 8
0
    def _get_basebackup(self, pgdata, basebackup, site,
                        primary_conninfo=None,
                        recovery_end_command=None,
                        recovery_target_action=None,
                        recovery_target_name=None,
                        recovery_target_time=None,
                        recovery_target_xid=None,
                        restore_to_master=None,
                        overwrite=False,
                        tablespace_mapping=None):
        targets = [recovery_target_name, recovery_target_time, recovery_target_xid]
        if sum(0 if flag is None else 1 for flag in targets) > 1:
            raise RestoreError("Specify at most one of recovery_target_name, "
                               "recovery_target_time or recovery_target_xid")

        # If basebackup that we want it set as latest, figure out which one it is
        if recovery_target_time:
            try:
                recovery_target_time = dateutil.parser.parse(recovery_target_time)
            except (TypeError, ValueError) as ex:
                raise RestoreError("recovery_target_time {!r}: {}".format(recovery_target_time, ex))
            basebackup = self._find_nearest_basebackup(recovery_target_time)
        elif basebackup == "latest":
            basebackup = self._find_nearest_basebackup()

        # Grab basebackup metadata to make sure it exists and to look up tablespace requirements
        metadata = self.storage.get_basebackup_metadata(basebackup)

        # Make sure we have a proper place to write the $PGDATA and possible tablespaces
        dirs_to_create = []
        dirs_to_recheck = []
        dirs_to_wipe = []

        if not os.path.exists(pgdata):
            dirs_to_create.append(pgdata)
        elif overwrite:
            dirs_to_create.append(pgdata)
            dirs_to_wipe.append(pgdata)
        elif os.listdir(pgdata) in ([], ["lost+found"]):
            # Allow empty directories as well as ext3/4 mount points to be used, but check that we can write to them
            dirs_to_recheck.append(["$PGDATA", pgdata])
        else:
            raise RestoreError("$PGDATA target directory {!r} exists, is not empty and --overwrite not specified, aborting."
                               .format(pgdata))

        tablespaces = {}
        tsmetare = re.compile("^tablespace-name-([0-9]+)$")
        for kw, value in metadata.items():
            match = tsmetare.match(kw)
            if not match:
                continue
            tsoid = match.group(1)
            tsname = value
            tspath = tablespace_mapping.pop(tsname, metadata["tablespace-path-{}".format(tsoid)])
            if not os.path.exists(tspath):
                raise RestoreError("Tablespace {!r} target directory {!r} does not exist, aborting."
                                   .format(tsname, tspath))
            if os.listdir(tspath) not in ([], ["lost+found"]):
                # Allow empty directories as well as ext3/4 mount points to be used, but check that we can write to them
                raise RestoreError("Tablespace {!r} target directory {!r} exists but is not empty, aborting."
                                   .format(tsname, tspath))

            print("Using existing empty directory {!r} for tablespace {!r}".format(tspath, tsname))
            tablespaces[tsname] = {
                "oid": int(tsoid),
                "path": tspath,
            }
            dirs_to_recheck.append(["Tablespace {!r}".format(tsname), tspath])

        # We .pop() the elements of tablespace_mapping above - if mappings are given they must all exist or the
        # user probably made a typo with tablespace names, abort in that case.
        if tablespace_mapping:
            raise RestoreError("Tablespace mapping for {} was requested, but the tablespaces are not present in the backup"
                               .format(sorted(tablespace_mapping)))

        # First check that the existing (empty) directories are writable, then possibly wipe any directories as
        # requested by --overwrite and finally create the new dirs
        for diruse, dirname in dirs_to_recheck:
            try:
                tempfile.TemporaryFile(dir=dirname).close()
            except PermissionError:
                raise RestoreError("{} target directory {!r} is empty, but not writable, aborting."
                                   .format(diruse, dirname))

        for dirname in dirs_to_wipe:
            shutil.rmtree(dirname)
        for dirname in dirs_to_create:
            os.makedirs(dirname)
            os.chmod(dirname, 0o700)

        with tempfile.TemporaryFile(dir=self.config["backup_location"], prefix="basebackup.", suffix=".pghoard") as tmp:
            self.storage.get_basebackup_file_to_fileobj(basebackup, tmp)
            tmp.seek(0)

            with rohmufile.file_reader(fileobj=tmp, metadata=metadata,
                                       key_lookup=config.key_lookup_for_site(self.config, site)) as input_obj:
                if metadata.get("format") == "pghoard-bb-v1":
                    self._extract_pghoard_bb_v1(input_obj, pgdata, tablespaces)
                else:
                    self._extract_basic(input_obj, pgdata)

        create_recovery_conf(
            dirpath=pgdata,
            site=site,
            port=self.config["http_port"],
            primary_conninfo=primary_conninfo,
            recovery_end_command=recovery_end_command,
            recovery_target_action=recovery_target_action,
            recovery_target_name=recovery_target_name,
            recovery_target_time=recovery_target_time,
            recovery_target_xid=recovery_target_xid,
            restore_to_master=restore_to_master,
        )

        print("Basebackup restoration complete.")
        print("You can start PostgreSQL by running pg_ctl -D %s start" % pgdata)
        print("On systemd based systems you can run systemctl start postgresql")
        print("On SYSV Init based systems you can run /etc/init.d/postgresql start")