Esempio n. 1
0
def purge_backup(dsn, backup_id, ssh_user, keyfile):
    """Purge backups for a particular backup-id"""
    nodes = query_ndb(dsn, query=['backupdatadir'])
    for node in nodes:
        query = nodes[node]
        host = '%s@%s' % (ssh_user, node)
        remote_path = os.path.join(query.backupdatadir,
                                   'BACKUP',
                                   'BACKUP-%d' % backup_id)
        ssh(host,
            'rm -fr %s' % list2cmdline([remote_path]),
            keyfile=keyfile)
Esempio n. 2
0
def archive_data_nodes(dsn,
                       backup_id,
                       ssh_user,
                       keyfile,
                       target_path):
    """Archive the backups specified by ``backup_id`` on the data nodes

    :param dsn: connection string to use to query the data nodes involved
    :param backup_id: backup_id of of the backup to archive on each node
    :param ssh_user: ssh user to use when archiving data
    :param keyfile: ssh keyfile to use for authentication

    :raises: ClusterError on failure
    """
    nodes = query_ndb(dsn, query=['nodegroup', 'nodeid', 'backupdatadir'])
    results = []
    for node in nodes:
        query = nodes[node]
        host = '%s@%s' % (ssh_user, node)
        remote_path = os.path.join(query.backupdatadir,
                                   'BACKUP',
                                   'BACKUP-%d' % backup_id)
        try:
            ssh(host,
                'ls -lah ' + list2cmdline([remote_path]),
                keyfile=keyfile)
        except ClusterCommandError, exc:
            if exc.status != 255:
                LOG.error("Error when checking Backup path. "
                          "Skipping backups for node %d", query.nodeid)
                continue
            # status == 255 errors are probably fatal
            raise

        rsync(host, keyfile, list2cmdline([remote_path]), target_path)
        LOG.info("Archived node %s with backup id %d", node, backup_id)
        results.append(query)