Ejemplo n.º 1
0
    def wal_archive(self, wal_path, concurrency=1):
        """
        Uploads a WAL file to S3 or Windows Azure Blob Service

        This code is intended to typically be called from Postgres's
        archive_command feature.
        """

        # Upload the segment expressly indicated.  It's special
        # relative to other uploads when parallel wal-push is enabled,
        # in that it's not desirable to tweak its .ready/.done files
        # in archive_status.
        xlog_dir = os.path.dirname(wal_path)
        segment = WalSegment(wal_path, explicit=True)
        uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
        group = WalTransferGroup(uploader)
        group.start(segment)

        # Upload any additional wal segments up to the specified
        # concurrency by scanning the Postgres archive_status
        # directory.
        started = 1
        seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
        while started < concurrency:
            try:
                other_segment = seg_stream.next()
            except StopIteration:
                break

            if other_segment.path != wal_path:
                group.start(other_segment)
                started += 1

        # Wait for uploads to finish.
        group.join()
Ejemplo n.º 2
0
    def wal_archive(self, wal_path, concurrency=1):
        """
        Uploads a WAL file to S3 or Windows Azure Blob Service

        This code is intended to typically be called from Postgres's
        archive_command feature.
        """

        # Upload the segment expressly indicated.  It's special
        # relative to other uploads when parallel wal-push is enabled,
        # in that it's not desirable to tweak its .ready/.done files
        # in archive_status.
        xlog_dir = os.path.dirname(wal_path)
        segment = WalSegment(wal_path, explicit=True)
        uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
        group = WalTransferGroup(uploader)
        group.start(segment)

        # Upload any additional wal segments up to the specified
        # concurrency by scanning the Postgres archive_status
        # directory.
        started = 1
        seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
        while started < concurrency:
            try:
                other_segment = seg_stream.next()
            except StopIteration:
                break

            if other_segment.path != wal_path:
                group.start(other_segment)
                started += 1

        # Wait for uploads to finish.
        group.join()
Ejemplo n.º 3
0
    def wal_prefetch(self, base, segment_name):
        url = '{0}://{1}/{2}'.format(self.layout.scheme,
                                     self.layout.store_name(),
                                     self.layout.wal_path(segment_name))
        pd = prefetch.Dirs(base)
        seg = WalSegment(segment_name)
        pd.create(seg)
        with pd.download(seg) as d:
            logger.info(msg='begin wal restore',
                        structured={
                            'action': 'wal-prefetch',
                            'key': url,
                            'seg': segment_name,
                            'prefix': self.layout.path_prefix,
                            'state': 'begin'
                        })

            ret = do_lzop_get(self.creds,
                              url,
                              d.dest,
                              self.gpg_key_id is not None,
                              do_retry=False)

            logger.info(msg='complete wal restore',
                        structured={
                            'action': 'wal-prefetch',
                            'key': url,
                            'seg': segment_name,
                            'prefix': self.layout.path_prefix,
                            'state': 'complete'
                        })

            return ret
Ejemplo n.º 4
0
    def wal_archive(self, wal_path, concurrency=1):
        """
        Uploads a WAL file to S3 or Windows Azure Blob Service

        This code is intended to typically be called from Postgres's
        archive_command feature.
        """

        # Upload the segment expressly indicated.  It's special
        # relative to other uploads when parallel wal-push is enabled,
        # in that it's not desirable to tweak its .ready/.done files
        # in archive_status.
        xlog_dir = os.path.dirname(wal_path)
        segment = WalSegment(wal_path, explicit=True)
        uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
        group = WalTransferGroup(uploader)
        group.start(segment)

        # Upload any additional wal segments up to the specified
        # concurrency by scanning the Postgres archive_status
        # directory.
        started = 1
        seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
        while started < concurrency:
            try:
                other_segment = next(seg_stream)
            except StopIteration:
                break

            if other_segment.path != wal_path:
                group.start(other_segment)
                started += 1

        try:
            # Wait for uploads to finish.
            group.join()
        except EnvironmentError as e:
            if e.errno == errno.ENOENT:
                print(e)
                raise UserException(
                    msg='could not find file for wal-push',
                    detail=('The operating system reported: {0} {1}'.format(
                        e.strerror, repr(e.filename))))
            raise
Ejemplo n.º 5
0
    def wal_archive(self, wal_path, concurrency=1):
        """
        Uploads a WAL file to S3 or Windows Azure Blob Service

        This code is intended to typically be called from Postgres's
        archive_command feature.
        """

        # Upload the segment expressly indicated.  It's special
        # relative to other uploads when parallel wal-push is enabled,
        # in that it's not desirable to tweak its .ready/.done files
        # in archive_status.
        xlog_dir = os.path.dirname(wal_path)
        segment = WalSegment(wal_path, explicit=True)
        uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
        group = WalTransferGroup(uploader)
        group.start(segment)

        # Upload any additional wal segments up to the specified
        # concurrency by scanning the Postgres archive_status
        # directory.
        started = 1
        seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
        while started < concurrency:
            try:
                other_segment = next(seg_stream)
            except StopIteration:
                break

            if other_segment.path != wal_path:
                group.start(other_segment)
                started += 1

        try:
            # Wait for uploads to finish.
            group.join()
        except EnvironmentError as e:
            if e.errno == errno.ENOENT:
                print(e)
                raise UserException(
                    msg='could not find file for wal-push',
                    detail=('The operating system reported: {0} {1}'
                            .format(e.strerror, repr(e.filename))))
            raise
Ejemplo n.º 6
0
    def wal_prefetch(self, base, segment_name):
        url = '{0}://{1}/{2}'.format(self.layout.scheme,
                                     self.layout.store_name(),
                                     self.layout.wal_path(segment_name))
        pd = prefetch.Dirs(base)
        seg = WalSegment(segment_name)
        pd.create(seg)
        with pd.download(seg) as d:
            logger.info(msg='begin wal restore',
                        structured={
                            'action': 'wal-prefetch',
                            'key': url,
                            'seg': segment_name,
                            'prefix': self.layout.path_prefix,
                            'state': 'begin'
                        })

            ret = do_lzop_get(self.creds,
                              url,
                              d.dest,
                              self.gpg_key_id is not None,
                              do_retry=False)
            if not ret:
                # If the download failed, AtomicDownload.__exit__()
                # must be informed so that it does not link an empty
                # archive file into place.
                #
                # We thus raise SystemExit. This is acceptable for
                # prefetch since prefetch execution is daemonized.
                # I.e., PostgreSQL has no knowledge of prefetch
                # exit codes.
                raise SystemExit('Failed to prefetch %s' % segment_name)

            logger.info(msg='complete wal restore',
                        structured={
                            'action': 'wal-prefetch',
                            'key': url,
                            'seg': segment_name,
                            'prefix': self.layout.path_prefix,
                            'state': 'complete'
                        })

            return ret
Ejemplo n.º 7
0
    def wal_restore(self, wal_name, wal_destination, prefetch_max):
        """
        Downloads a WAL file from S3 or Windows Azure Blob Service

        This code is intended to typically be called from Postgres's
        restore_command feature.

        NB: Postgres doesn't guarantee that wal_name ==
        basename(wal_path), so both are required.

        """
        url = '{0}://{1}/{2}'.format(self.layout.scheme,
                                     self.layout.store_name(),
                                     self.layout.wal_path(wal_name))

        if prefetch_max > 0:
            # Check for prefetch-hit.
            base = os.path.dirname(os.path.realpath(wal_destination))
            pd = prefetch.Dirs(base)
            seg = WalSegment(wal_name)

            started = start_prefetches(seg, pd, prefetch_max)
            last_size = 0

            while True:
                if pd.contains(seg):
                    pd.promote(seg, wal_destination)
                    logger.info(msg='promoted prefetched wal segment',
                                structured={
                                    'action': 'wal-fetch',
                                    'key': url,
                                    'seg': wal_name,
                                    'prefix': self.layout.path_prefix
                                })

                    pd.clear_except(started)
                    return True

                # If there is a 'running' download, wait a bit for it
                # to make progress or finish.  However, if it doesn't
                # make progress in some amount of time, assume that
                # the prefetch process has died and go on with the
                # in-band downloading code.
                sz = pd.running_size(seg)
                if sz <= last_size:
                    break

                last_size = sz
                gevent.sleep(0.5)

            pd.clear_except(started)

        logger.info(msg='begin wal restore',
                    structured={
                        'action': 'wal-fetch',
                        'key': url,
                        'seg': wal_name,
                        'prefix': self.layout.path_prefix,
                        'state': 'begin'
                    })

        ret = do_lzop_get(self.creds, url, wal_destination, self.gpg_key_id
                          is not None)

        logger.info(msg='complete wal restore',
                    structured={
                        'action': 'wal-fetch',
                        'key': url,
                        'seg': wal_name,
                        'prefix': self.layout.path_prefix,
                        'state': 'complete'
                    })

        return ret