示例#1
0
    def backup_list(self, query, detail):
        """
        Lists base backups and basic information about them

        """
        import csv

        from boto.s3.connection import OrdinaryCallingFormat
        from boto.s3.connection import S3Connection

        from wal_e.storage.s3_storage import BackupInfo

        s3_conn = self.new_connection()

        bl = s3_worker.BackupList(s3_conn,
                                  s3_storage.StorageLayout(self.s3_prefix),
                                  detail)

        # If there is no query, return an exhaustive list, otherwise
        # find a backup instad.
        if query is None:
            bl_iter = bl
        else:
            bl_iter = bl.find_all(query)

        # TODO: support switchable formats for difference needs.
        w_csv = csv.writer(sys.stdout, dialect='excel-tab')
        w_csv.writerow(BackupInfo._fields)

        for backup_info in bl_iter:
            w_csv.writerow(backup_info)

        sys.stdout.flush()
示例#2
0
    def database_s3_fetch(self, pg_cluster_dir, backup_name, pool_size):

        if os.path.exists(os.path.join(pg_cluster_dir, 'postmaster.pid')):
            raise UserException(
                msg='attempting to overwrite a live data directory',
                detail='Found a postmaster.pid lockfile, and aborting',
                hint='Shut down postgres. If there is a stale lockfile, '
                'then remove it after being very sure postgres is not '
                'running.')

        layout = s3_storage.StorageLayout(self.s3_prefix)

        s3_connections = []
        for i in xrange(pool_size):
            s3_connections.append(self.new_connection())

        bl = s3_worker.BackupList(s3_connections[0],
                                  s3_storage.StorageLayout(self.s3_prefix),
                                  detail=False)

        # If there is no query, return an exhaustive list, otherwise
        # find a backup instad.
        backups = list(bl.find_all(backup_name))
        assert len(backups) <= 1
        if len(backups) == 0:
            raise UserException(
                msg='no backups found for fetching',
                detail='No backup matching the query {0} was able to be '
                'located.'.format(backup_name))
        elif len(backups) > 1:
            raise UserException(
                msg='more than one backup found for fetching',
                detail='More than one backup matching the query {0} was able '
                'to be located.'.format(backup_name),
                hint='To list qualifying backups, '
                'try "wal-e backup-list QUERY".')

        # There must be exactly one qualifying backup at this point.
        assert len(backups) == 1
        backup_info = backups[0]
        layout.basebackup_tar_partition_directory(backup_info)

        partition_iter = s3_worker.TarPartitionLister(
            s3_connections[0], layout, backup_info)

        assert len(s3_connections) == pool_size
        fetchers = []
        for i in xrange(pool_size):
            fetchers.append(s3_worker.BackupFetcher(
                    s3_connections[i], layout, backup_info, pg_cluster_dir,
                    (self.gpg_key_id is not None)))
        assert len(fetchers) == pool_size

        p = gevent.pool.Pool(size=pool_size)
        fetcher_cycle = itertools.cycle(fetchers)
        for part_name in partition_iter:
            p.spawn(
                self._exception_gather_guard(
                    fetcher_cycle.next().fetch_partition),
                part_name)

        p.join(raise_error=True)