def pssh_run(self, hosts, command, hosts_variables=None, ssh_client=None):
        """
        Runs a command on hosts list using pssh under the hood
        Return: True (success) or False (error)
        """
        if ssh_client is None:
            ssh_client = ParallelSSHClient
        pssh_run_success = False
        success = []
        error = []
        i = 1

        username = self.config.ssh.username if self.config.ssh.username != '' else None
        port = int(self.config.ssh.port)
        pkey = self.config.ssh.key_file if self.config.ssh.key_file != '' else None
        cert_file = self.config.ssh.cert_file if self.config.ssh.cert_file != '' else None

        logging.info('Executing "{command}" on following nodes {hosts} with a parallelism/pool size of {pool_size}'
                     .format(command=command, hosts=hosts, pool_size=self.pool_size))

        for parallel_hosts in divide_chunks(hosts, self.pool_size):

            client = ssh_client(parallel_hosts,
                                forward_ssh_agent=True,
                                pool_size=len(parallel_hosts),
                                user=username,
                                port=port,
                                pkey=pkey,
                                cert_file=cert_file)
            logging.debug('Batch #{i}: Running "{command}" on nodes {hosts} parallelism of {pool_size}'
                          .format(i=i, command=command, hosts=parallel_hosts, pool_size=len(parallel_hosts)))
            output = client.run_command(command, host_args=hosts_variables,
                                        sudo=medusa.utils.evaluate_boolean(self.config.cassandra.use_sudo))
            client.join(output)

            success = success + list(filter(lambda host_output: host_output.exit_code == 0, output))
            error = error + list(filter(lambda host_output: host_output.exit_code != 0, output))

        # Report on execution status
        if len(success) == len(hosts):
            logging.info('Job executing "{}" ran and finished Successfully on all nodes.'
                         .format(command))
            pssh_run_success = True
        elif len(error) > 0:
            logging.error('Job executing "{}" ran and finished with errors on following nodes: {}'
                          .format(command, sorted(set(map(lambda host_output: host_output.host, error)))))
            display_output(error)
        else:
            err_msg = 'Something unexpected happened while running pssh command'
            logging.error(err_msg)
            raise Exception(err_msg)

        return pssh_run_success
Exemple #2
0
def download_data(storageconfig, backup, fqtns_to_restore, destination):
    storage = Storage(config=storageconfig)
    manifest = json.loads(backup.manifest)

    for section in manifest:

        fqtn = "{}.{}".format(section['keyspace'], section['columnfamily'])
        dst = destination / section['keyspace'] / section['columnfamily']
        srcs = [
            '{}{}'.format(
                storage.storage_driver.get_path_prefix(backup.data_path),
                obj['path']) for obj in section['objects']
        ]

        if len(srcs) > 0 and (len(fqtns_to_restore) == 0
                              or fqtn in fqtns_to_restore):
            logging.debug('Downloading  %s files to %s', len(srcs), dst)

            dst.mkdir(parents=True)

            # check for hidden sub-folders in the table directory
            # (e.g. secondary indices which live in table/.table_idx)
            dst_subfolders = {
                dst / src.parent.name
                for src in map(pathlib.Path, srcs)
                if src.parent.name.startswith('.')
            }
            # create the sub-folders so the downloads actually work
            for subfolder in dst_subfolders:
                subfolder.mkdir(parents=False)

            for src_batch in divide_chunks(srcs, GSUTIL_MAX_FILES_PER_CHUNK):
                storage.storage_driver.download_blobs(src_batch, dst)
        elif len(srcs) == 0 and (len(fqtns_to_restore) == 0
                                 or fqtn in fqtns_to_restore):
            logging.debug('There is nothing to download for {}'.format(fqtn))
        else:
            logging.debug(
                'Download of {} was not requested, skipping'.format(fqtn))

    logging.info('Downloading backup metadata...')
    storage.storage_driver.download_blobs(srcs=[
        '{}'.format(path) for path in
        [backup.manifest_path, backup.schema_path, backup.tokenmap_path]
    ],
                                          dest=destination)
def backup_snapshots(storage, manifest, node_backup, node_backup_cache,
                     snapshot):
    try:
        num_files = 0
        for snapshot_path in snapshot.find_dirs():
            logging.debug("Backing up {}".format(snapshot_path))

            (needs_backup, already_backed_up
             ) = node_backup_cache.replace_or_remove_if_cached(
                 keyspace=snapshot_path.keyspace,
                 columnfamily=snapshot_path.columnfamily,
                 srcs=list(snapshot_path.list_files()))

            num_files += len(needs_backup) + len(already_backed_up)

            dst_path = str(
                node_backup.datapath(keyspace=snapshot_path.keyspace,
                                     columnfamily=snapshot_path.columnfamily))
            logging.debug("destination path: {}".format(dst_path))

            manifest_objects = list()
            if len(needs_backup) > 0:
                # If there is a plenty of files to upload it should be
                # splitted to batches due to 'gsutil cp' which
                # can't handle too much source files via STDIN.
                for src_batch in divide_chunks(needs_backup,
                                               GSUTIL_MAX_FILES_PER_CHUNK):
                    manifest_objects += storage.storage_driver.upload_blobs(
                        src_batch, dst_path)

            # Reintroducing already backed up objects in the manifest in differential
            for obj in already_backed_up:
                manifest_objects.append(obj)

            manifest.append(
                make_manifest_object(node_backup.fqdn, snapshot_path,
                                     manifest_objects, storage))

        return num_files
    except Exception as e:
        logging.error('This error happened during the backup: {}'.format(
            str(e)))
        traceback.print_exc()
        raise e