示例#1
0
    def backup_list(self, query, detail):
        """
        Lists base backups and basic information about them

        """
        import csv

        from boto.s3.connection import OrdinaryCallingFormat
        from boto.s3.connection import S3Connection

        from wal_e.storage.s3_storage import BackupInfo

        s3_conn = self.new_connection()

        bl = s3_worker.BackupList(s3_conn,
                                  s3_storage.StorageLayout(self.s3_prefix),
                                  detail)

        # If there is no query, return an exhaustive list, otherwise
        # find a backup instad.
        if query is None:
            bl_iter = bl
        else:
            bl_iter = bl.find_all(query)

        # TODO: support switchable formats for difference needs.
        w_csv = csv.writer(sys.stdout, dialect='excel-tab')
        w_csv.writerow(BackupInfo._fields)

        for backup_info in bl_iter:
            w_csv.writerow(backup_info)

        sys.stdout.flush()
示例#2
0
    def delete_all(self, dry_run, layout=None):
        if layout is None:
            layout = s3_storage.StorageLayout(self.s3_prefix)

        s3_conn = self.new_connection()
        delete_cxt = s3_worker.DeleteFromContext(s3_conn, layout, dry_run)
        delete_cxt.delete_everything()
示例#3
0
    def delete_old_versions(self, dry_run):
        assert s3_storage.CURRENT_VERSION not in s3_storage.OBSOLETE_VERSIONS

        for obsolete_version in s3_storage.OBSOLETE_VERSIONS:
            layout = s3_storage.StorageLayout(self.s3_prefix,
                                              version=obsolete_version)
            self.delete_all(dry_run, layout)
示例#4
0
    def delete_old_versions(self, dry_run):
        obsolete_versions = ('004', '003', '002', '001', '000')
        assert s3_storage.CURRENT_VERSION not in obsolete_versions

        for obsolete_version in obsolete_versions:
            layout = s3_storage.StorageLayout(self.s3_prefix,
                                              version=obsolete_version)
            self.delete_all(dry_run, layout)
示例#5
0
def test_empty_latest_listing():
    """Test listing a 'backup-list LATEST' on an empty prefix."""

    bucket_name = 'wal-e-test-empty-listing'
    layout = s3_storage.StorageLayout(
        's3://{0}/test-prefix'.format(bucket_name))

    with FreshBucket(bucket_name,
                     host='s3.amazonaws.com',
                     calling_format=OrdinaryCallingFormat()) as fb:
        fb.create()
        bl = BackupList(fb.conn, layout, False)
        found = list(bl.find_all('LATEST'))
        assert len(found) == 0
示例#6
0
 def delete_before(self, dry_run, segment_info):
     layout = s3_storage.StorageLayout(self.s3_prefix)
     s3_conn = self.new_connection()
     delete_cxt = s3_worker.DeleteFromContext(s3_conn, layout, dry_run)
     delete_cxt.delete_before(segment_info)
示例#7
0
    def database_s3_fetch(self, pg_cluster_dir, backup_name, pool_size):

        if os.path.exists(os.path.join(pg_cluster_dir, 'postmaster.pid')):
            raise UserException(
                msg='attempting to overwrite a live data directory',
                detail='Found a postmaster.pid lockfile, and aborting',
                hint='Shut down postgres. If there is a stale lockfile, '
                'then remove it after being very sure postgres is not '
                'running.')

        layout = s3_storage.StorageLayout(self.s3_prefix)

        s3_connections = []
        for i in xrange(pool_size):
            s3_connections.append(self.new_connection())

        bl = s3_worker.BackupList(s3_connections[0],
                                  s3_storage.StorageLayout(self.s3_prefix),
                                  detail=False)

        # If there is no query, return an exhaustive list, otherwise
        # find a backup instad.
        backups = list(bl.find_all(backup_name))
        assert len(backups) <= 1
        if len(backups) == 0:
            raise UserException(
                msg='no backups found for fetching',
                detail='No backup matching the query {0} was able to be '
                'located.'.format(backup_name))
        elif len(backups) > 1:
            raise UserException(
                msg='more than one backup found for fetching',
                detail='More than one backup matching the query {0} was able '
                'to be located.'.format(backup_name),
                hint='To list qualifying backups, '
                'try "wal-e backup-list QUERY".')

        # There must be exactly one qualifying backup at this point.
        assert len(backups) == 1
        backup_info = backups[0]
        layout.basebackup_tar_partition_directory(backup_info)

        partition_iter = s3_worker.TarPartitionLister(
            s3_connections[0], layout, backup_info)

        assert len(s3_connections) == pool_size
        fetchers = []
        for i in xrange(pool_size):
            fetchers.append(s3_worker.BackupFetcher(
                    s3_connections[i], layout, backup_info, pg_cluster_dir,
                    (self.gpg_key_id is not None)))
        assert len(fetchers) == pool_size

        p = gevent.pool.Pool(size=pool_size)
        fetcher_cycle = itertools.cycle(fetchers)
        for part_name in partition_iter:
            p.spawn(
                self._exception_gather_guard(
                    fetcher_cycle.next().fetch_partition),
                part_name)

        p.join(raise_error=True)