def restore_node(config,
                 temp_dir,
                 backup_name,
                 in_place,
                 keep_auth,
                 seeds,
                 verify,
                 keyspaces,
                 tables,
                 use_sstableloader=False):

    if in_place and keep_auth:
        logging.error(
            'Cannot keep system_auth when restoring in-place. It would be overwritten'
        )
        sys.exit(1)

    storage = Storage(config=config.storage)

    if not use_sstableloader:
        restore_node_locally(config, temp_dir, backup_name, in_place,
                             keep_auth, seeds, storage, keyspaces, tables)
    else:
        restore_node_sstableloader(config, temp_dir, backup_name, in_place,
                                   keep_auth, seeds, storage, keyspaces,
                                   tables)

    if verify:
        hostname_resolver = HostnameResolver(
            medusa.config.evaluate_boolean(
                config.cassandra.resolve_ip_addresses))
        verify_restore([hostname_resolver.resolve_fqdn()], config)
Пример #2
0
def orchestrate(config, backup_name, seed_target, temp_dir, host_list, keep_auth, bypass_checks,
                verify, keyspaces, tables, parallel_restores, use_sstableloader=False):
    monitoring = Monitoring(config=config.monitoring)
    try:
        restore_start_time = datetime.datetime.now()
        if seed_target is None and host_list is None:
            # if no target node is provided, nor a host list file, default to the local node as seed target
            hostname_resolver = HostnameResolver(medusa.utils.evaluate_boolean(config.cassandra.resolve_ip_addresses))
            seed_target = hostname_resolver.resolve_fqdn(socket.gethostbyname(socket.getfqdn()))
            logging.warning("Seed target was not provided, using the local hostname: {}".format(seed_target))

        if seed_target is not None and host_list is not None:
            err_msg = 'You must either provide a seed target or a list of host, not both'
            logging.error(err_msg)
            raise Exception(err_msg)

        if not temp_dir.is_dir():
            err_msg = '{} is not a directory'.format(temp_dir)
            logging.error(err_msg)
            raise Exception(err_msg)

        storage = Storage(config=config.storage)

        try:
            cluster_backup = storage.get_cluster_backup(backup_name)
        except KeyError:
            err_msg = 'No such backup --> {}'.format(backup_name)
            logging.error(err_msg)
            raise Exception(err_msg)

        restore = RestoreJob(cluster_backup, config, temp_dir, host_list, seed_target, keep_auth, verify,
                             parallel_restores, keyspaces, tables, bypass_checks, use_sstableloader)
        restore.execute()

        restore_end_time = datetime.datetime.now()
        restore_duration = restore_end_time - restore_start_time

        logging.debug('Emitting metrics')

        logging.info('Restore duration: {}'.format(restore_duration.seconds))
        tags = ['medusa-cluster-restore', 'restore-duration', backup_name]
        monitoring.send(tags, restore_duration.seconds)

        tags = ['medusa-cluster-restore', 'restore-error', backup_name]
        monitoring.send(tags, 0)

        logging.debug('Done emitting metrics')
        logging.info('Successfully restored the cluster')

    except Exception as e:
        tags = ['medusa-cluster-restore', 'restore-error', backup_name]
        monitoring.send(tags, 1)

        logging.error('This error happened during the cluster restore: {}'.format(str(e)))
        traceback.print_exc()
        sys.exit(1)
Пример #3
0
def invoke_sstableloader(config, download_dir, keep_auth, fqtns_to_restore,
                         storage_port):
    hostname_resolver = HostnameResolver(
        medusa.utils.evaluate_boolean(config.cassandra.resolve_ip_addresses))
    cassandra_is_ccm = int(shlex.split(config.cassandra.is_ccm)[0])
    keyspaces = os.listdir(str(download_dir))
    for keyspace in keyspaces:
        ks_path = os.path.join(str(download_dir), keyspace)
        if os.path.isdir(ks_path) and keyspace_is_allowed_to_restore(
                keyspace, keep_auth, fqtns_to_restore):
            logging.info(
                'Restoring keyspace {} with sstableloader...'.format(ks_path))
            for table in os.listdir(str(ks_path)):
                table_path = os.path.join(str(ks_path), table)
                if os.path.isdir(table_path) and table_is_allowed_to_restore(
                        keyspace, table, fqtns_to_restore):
                    logging.debug(
                        'Restoring table {} with sstableloader...'.format(
                            table))
                    cql_username = '******' if config.cassandra.cql_username is None else config.cassandra.cql_username
                    cql_password = '******' if config.cassandra.cql_password is None else config.cassandra.cql_password
                    sstableloader_args = [
                        config.cassandra.sstableloader_bin.replace(
                            "github:apache/", "githubCOLONapacheSLASH"), '-d',
                        hostname_resolver.resolve_fqdn()
                        if cassandra_is_ccm == 0 else '127.0.0.1',
                        '--conf-path', config.cassandra.config_file,
                        '--username', cql_username, '--password', cql_password,
                        '--no-progress',
                        os.path.join(ks_path, table)
                    ]
                    if storage_port != "7000":
                        sstableloader_args.append("--storage-port")
                        sstableloader_args.append(storage_port)
                    if config.cassandra.sstableloader_ts is not None and \
                       config.cassandra.sstableloader_tspw is not None and \
                       config.cassandra.sstableloader_ks is not None and \
                       config.cassandra.sstableloader_kspw is not None:
                        sstableloader_args.append("-ts")
                        sstableloader_args.append(
                            config.cassandra.sstableloader_ts)
                        sstableloader_args.append("-tspw")
                        sstableloader_args.append(
                            config.cassandra.sstableloader_tspw)
                        sstableloader_args.append("-ks")
                        sstableloader_args.append(
                            config.cassandra.sstableloader_ks)
                        sstableloader_args.append("-kspw")
                        sstableloader_args.append(
                            config.cassandra.sstableloader_kspw)

                    output = subprocess.check_output(sstableloader_args)
                    for line in output.decode('utf-8').split('\n'):
                        logging.debug(line)
Пример #4
0
class RestoreJob(object):
    def __init__(self,
                 cluster_backup,
                 config,
                 temp_dir,
                 host_list,
                 seed_target,
                 keep_auth,
                 verify,
                 pssh_pool_size,
                 keyspaces={},
                 tables={},
                 bypass_checks=False,
                 use_sstableloader=False):
        self.id = uuid.uuid4()
        self.ringmap = None
        self.cluster_backup = cluster_backup
        self.session_provider = None
        self.config = config
        self.host_list = host_list
        self.seed_target = seed_target
        self.keep_auth = keep_auth
        self.verify = verify
        self.in_place = None
        self.temp_dir = temp_dir  # temporary files
        self.work_dir = self.temp_dir / 'medusa-job-{id}'.format(id=self.id)
        self.host_map = {
        }  # Map of backup host/target host for the restore process
        self.keyspaces = keyspaces
        self.tables = tables
        self.bypass_checks = bypass_checks
        self.use_sstableloader = use_sstableloader
        self.pssh_pool_size = pssh_pool_size
        self.cassandra = Cassandra(config.cassandra)
        fqdn_resolver = medusa.config.evaluate_boolean(
            self.config.cassandra.resolve_ip_addresses)
        self.fqdn_resolver = HostnameResolver(fqdn_resolver)

    def execute(self):
        logging.info('Ensuring the backup is found and is complete')
        if not self.cluster_backup.is_complete():
            raise Exception('Backup is not complete')

        # CASE 1 : We're restoring using a seed target. Source/target mapping will be built based on tokenmap.
        if self.seed_target is not None:
            self.session_provider = CqlSessionProvider([self.seed_target],
                                                       self.config.cassandra)

            with self.session_provider.new_session() as session:
                self._populate_ringmap(self.cluster_backup.tokenmap,
                                       session.tokenmap())

        # CASE 2 : We're restoring a backup on a different cluster
        if self.host_list is not None:
            logging.info('Restore will happen on new hardware')
            self.in_place = False
            self._populate_hostmap()
            logging.info(
                'Starting Restore on all the nodes in this list: {}'.format(
                    self.host_list))

        self._restore_data()

    def _pssh_run(self, hosts, command, hosts_variables=None):
        """
        Runs a command on hosts list using cstar under the hood
        There is no return made, to check the result there is a distinct function
        Return: True (success) or False (error)
        """
        logging.debug("Running pssh command on {}".format(hosts))
        pssh_run_success = False
        username = self.config.ssh.username if self.config.ssh.username != '' else None
        port = self.config.ssh.port
        pkey = None
        if self.config.ssh.key_file is not None and self.config.ssh.key_file != '':
            pkey = paramiko.RSAKey.from_private_key_file(
                self.config.ssh.key_file, None)

        client = ParallelSSHClient(hosts,
                                   forward_ssh_agent=True,
                                   pool_size=self.pssh_pool_size,
                                   user=username,
                                   port=port,
                                   pkey=pkey)
        logging.info('Executing "{}" on all nodes.'.format(command))
        output = client.run_command(command,
                                    host_args=hosts_variables,
                                    sudo=True)
        client.join(output)

        success = list(
            filter(
                lambda host_output: host_output.exit_code == 0,
                list(map(lambda host_output: host_output[1], output.items()))))
        error = list(
            filter(
                lambda host_output: host_output.exit_code != 0,
                list(map(lambda host_output: host_output[1], output.items()))))

        # Report on execution status
        if len(success) == len(hosts):
            logging.info(
                'Job executing "{}" ran and finished Successfully on all nodes.'
                .format(command))
            pssh_run_success = True
        elif len(error) > 0:
            logging.info(
                'Job executing "{}" ran and finished with errors on following nodes: {}'
                .format(
                    command,
                    sorted(
                        set(map(lambda host_output: host_output.host,
                                error)))))
            self.display_output(error)
        else:
            err_msg = 'Something unexpected happened while running pssh command'
            logging.error(err_msg)
            raise Exception(err_msg)

        return pssh_run_success

    def display_output(self, host_outputs):
        for host_out in host_outputs:
            for line in host_out.stdout:
                logging.info("{}-stdout: {}".format(host_out.host, line))
            for line in host_out.stderr:
                logging.info("{}-stderr: {}".format(host_out.host, line))

    def _validate_ringmap(self, tokenmap, target_tokenmap):
        for host, ring_item in target_tokenmap.items():
            if not ring_item.get('is_up'):
                raise Exception('Target {host} is not up!'.format(host=host))
        if len(target_tokenmap) != len(tokenmap):
            return False
        return True

    def _populate_ringmap(self, tokenmap, target_tokenmap):
        def _tokens_from_ringitem(ringitem):
            return ','.join(map(str, ringitem['tokens']))

        def _token_counts_per_host(tokenmap):
            for host, ringitem in tokenmap.items():
                return len(ringitem['tokens'])

        def _hosts_from_tokenmap(tokenmap):
            hosts = set()
            for host, ringitem in tokenmap.items():
                hosts.add(host)
            return hosts

        def _chunk(my_list, nb_chunks):
            groups = []
            for i in range(nb_chunks):
                groups.append([])
            for i in range(len(my_list)):
                groups[i % nb_chunks].append(my_list[i])
            return groups

        topology_matches = self._validate_ringmap(tokenmap, target_tokenmap)
        self.in_place = self._is_restore_in_place(tokenmap, target_tokenmap)
        if self.in_place:
            logging.info(
                "Restoring on the same cluster that was the backup was taken on (in place fashion)"
            )
            self.keep_auth = False
        else:
            logging.info(
                "Restoring on a different cluster than the backup one (remote fashion)"
            )
            if self.keep_auth:
                logging.info(
                    'system_auth keyspace will be left untouched on the target nodes'
                )
            else:
                # ops might not be aware of the underlying behavior towards auth. Let's ask what to do...
                really_keep_auth = None
                while (really_keep_auth != 'Y'
                       and really_keep_auth != 'n') and not self.bypass_checks:
                    really_keep_auth = input(
                        'Do you want to skip restoring the system_auth keyspace and keep the'
                        + ' credentials of the target cluster? (Y/n)')
                self.keep_auth = True if really_keep_auth == 'Y' else False

        if topology_matches:
            target_tokens = {
                _tokens_from_ringitem(ringitem): host
                for host, ringitem in target_tokenmap.items()
            }
            backup_tokens = {
                _tokens_from_ringitem(ringitem): host
                for host, ringitem in tokenmap.items()
            }

            target_tokens_per_host = _token_counts_per_host(tokenmap)
            backup_tokens_per_host = _token_counts_per_host(target_tokenmap)

            # we must have the same number of tokens per host in both vnode and normal clusters
            if target_tokens_per_host != backup_tokens_per_host:
                logging.info(
                    'Source/target rings have different number of tokens per node: {}/{}'
                    .format(backup_tokens_per_host, target_tokens_per_host))
                topology_matches = False

            # if not using vnodes, the tokens must match exactly
            if backup_tokens_per_host == 1 and target_tokens.keys(
            ) != backup_tokens.keys():
                extras = target_tokens.keys() ^ backup_tokens.keys()
                logging.info(
                    'Tokenmap is differently distributed. Extra items: {}'.
                    format(extras))
                topology_matches = False

        if topology_matches:
            # We can associate each restore node with exactly one backup node
            backup_ringmap = collections.defaultdict(list)
            target_ringmap = collections.defaultdict(list)
            for token, host in backup_tokens.items():
                backup_ringmap[token].append(host)
            for token, host in target_tokens.items():
                target_ringmap[token].append(host)

            self.ringmap = backup_ringmap
            i = 0
            for token, hosts in backup_ringmap.items():
                # take the node that has the same token list or pick the one with the same position in the map.
                restore_host = target_ringmap.get(
                    token,
                    list(target_ringmap.values())[i])[0]
                isSeed = True if self.fqdn_resolver.resolve_fqdn(
                    restore_host) in self._get_seeds_fqdn() else False
                self.host_map[restore_host] = {
                    'source': [hosts[0]],
                    'seed': isSeed
                }
                i += 1
        else:
            # Topologies are different between backup and restore clusters. Using the sstableloader for restore.
            self.use_sstableloader = True
            backup_hosts = _hosts_from_tokenmap(tokenmap)
            restore_hosts = list(_hosts_from_tokenmap(target_tokenmap))
            if len(backup_hosts) >= len(restore_hosts):
                grouped_backups = _chunk(list(backup_hosts),
                                         len(restore_hosts))
            else:
                grouped_backups = _chunk(list(backup_hosts), len(backup_hosts))
            for i in range(min([len(grouped_backups), len(restore_hosts)])):
                # associate one restore host with several backups as we don't have the same number of nodes.
                self.host_map[restore_hosts[i]] = {
                    'source': grouped_backups[i],
                    'seed': False
                }

    def _is_restore_in_place(self, backup_tokenmap, target_tokenmap):
        # If at least one node is part of both tokenmaps, then we're restoring in place
        # Otherwise we're restoring a remote cluster
        return len(set(backup_tokenmap.keys())
                   & set(target_tokenmap.keys())) > 0

    def _get_seeds_fqdn(self):
        seeds = list()
        for seed in self.cassandra.seeds:
            seeds.append(self.fqdn_resolver.resolve_fqdn(seed))
        return seeds

    def _populate_hostmap(self):
        with open(self.host_list, 'r') as f:
            for line in f.readlines():
                seed, target, source = line.replace('\n', '').split(
                    self.config.storage.host_file_separator)
                # in python, bool('False') evaluates to True. Need to test the membership as below
                self.host_map[self.fqdn_resolver.resolve_fqdn(target.strip())] \
                    = {'source': [self.fqdn_resolver.resolve_fqdn(source.strip())], 'seed': seed in ['True']}

    def _restore_data(self):
        # create workdir on each target host
        # Later: distribute a credential
        # construct command for each target host
        # invoke `nohup medusa-wrapper #{command}` on each target host
        # wait for exit on each
        logging.info('Starting cluster restore...')
        logging.info('Working directory for this execution: {}'.format(
            self.work_dir))
        for target, sources in self.host_map.items():
            logging.info(
                'About to restore on {} using {} as backup source'.format(
                    target, sources))

        logging.info(
            'This will delete all data on the target nodes and replace it with backup {}.'
            .format(self.cluster_backup.name))

        proceed = None
        while (proceed != 'Y' and proceed != 'n') and not self.bypass_checks:
            proceed = input('Are you sure you want to proceed? (Y/n)')

        if proceed == 'n':
            err_msg = 'Restore manually cancelled'
            logging.error(err_msg)
            raise Exception(err_msg)

        # work out which nodes are seeds in the target cluster
        target_seeds = [t for t, s in self.host_map.items() if s['seed']]
        logging.info("target seeds : {}".format(target_seeds))
        # work out which nodes are seeds in the target cluster
        target_hosts = self.host_map.keys()
        logging.info("target hosts : {}".format(target_hosts))

        if self.use_sstableloader is False:
            # stop all target nodes
            logging.info('Stopping Cassandra on all nodes currently up')

            # Generate a Job ID for this run
            job_id = str(uuid.uuid4())
            logging.debug('Job id is: {}'.format(job_id))
            # Define command to run
            command = self.config.cassandra.stop_cmd
            logging.debug('Command to run is: {}'.format(command))

            self._pssh_run(target_hosts, command, hosts_variables={})

        else:
            # we're using the sstableloader, which will require to (re)create the schema and empty the tables
            logging.info("Restoring schema on the target cluster")
            self._restore_schema()

        # trigger restores everywhere at once
        # pass in seed info so that non-seeds can wait for seeds before starting
        # seeds, naturally, don't wait for anything

        # Generate a Job ID for this run
        hosts_variables = []
        for target, source in [(t, s['source'])
                               for t, s in self.host_map.items()]:
            logging.info('Restoring data on {}...'.format(target))
            seeds = '' if target in target_seeds or len(target_seeds) == 0 \
                    else '--seeds {}'.format(','.join(target_seeds))
            hosts_variables.append((','.join(source), seeds))
            command = self._build_restore_cmd(target, source, seeds)

        pssh_run_success = self._pssh_run(target_hosts,
                                          command,
                                          hosts_variables=hosts_variables)

        if not pssh_run_success:
            # we could implement a retry.
            err_msg = 'Some nodes failed to restore. Exiting'
            logging.error(err_msg)
            raise Exception(err_msg)

        logging.info(
            'Restore process is complete. The cluster should be up shortly.')

        if self.verify:
            verify_restore(target_hosts, self.config)

    def _build_restore_cmd(self, target, source, seeds):
        in_place_option = '--in-place' if self.in_place else '--remote'
        keep_auth_option = '--keep-auth' if self.keep_auth else ''
        keyspace_options = expand_repeatable_option('keyspace', self.keyspaces)
        table_options = expand_repeatable_option('table', self.tables)
        # We explicitly set --no-verify since we are doing verification here in this module
        # from the control node
        verify_option = '--no-verify'

        # %s placeholders in the below command will get replaced by pssh using per host command substitution
        command = 'nohup sh -c "mkdir {work}; cd {work} && medusa-wrapper sudo medusa --fqdn=%s -vvv restore-node ' \
                  '{in_place} {keep_auth} %s {verify} --backup-name {backup} --temp-dir {temp_dir} ' \
                  '{use_sstableloader} {keyspaces} {tables}"' \
            .format(work=self.work_dir,
                    in_place=in_place_option,
                    keep_auth=keep_auth_option,
                    verify=verify_option,
                    backup=self.cluster_backup.name,
                    temp_dir=self.temp_dir,
                    use_sstableloader='--use-sstableloader' if self.use_sstableloader is True else '',
                    keyspaces=keyspace_options,
                    tables=table_options)

        logging.debug(
            'Restoring on node {} with the following command {}'.format(
                target, command))

        return command

    def _restore_schema(self):
        schema = parse_schema(self.cluster_backup.schema)
        with self.session_provider.new_session() as session:
            for keyspace in schema.keys():
                if keyspace.startswith("system"):
                    continue
                else:
                    self._create_or_recreate_schema_objects(
                        session, keyspace, schema[keyspace])

    def _create_or_recreate_schema_objects(self, session, keyspace,
                                           keyspace_schema):
        logging.info("(Re)creating schema for keyspace {}".format(keyspace))
        if (keyspace not in session.cluster.metadata.keyspaces):
            # Keyspace doesn't exist on the target cluster. Got to create it and all the tables as well.
            session.execute(keyspace_schema['create_statement'])
        for mv in keyspace_schema['materialized_views']:
            # MVs need to be dropped before we drop the tables
            logging.debug("Dropping MV {}.{}".format(keyspace, mv[0]))
            session.execute("DROP MATERIALIZED VIEW IF EXISTS {}.{}".format(
                keyspace, mv[0]))
        for table in keyspace_schema['tables'].items():
            logging.debug("Dropping table {}.{}".format(keyspace, table[0]))
            session.execute("DROP TABLE IF EXISTS {}.{}".format(
                keyspace, table[0]))
        for udt in keyspace_schema['udt'].items():
            # then custom types as they can be used in tables
            session.execute("DROP TYPE IF EXISTS {}.{}".format(
                keyspace, udt[0]))
            # Then we create the missing ones
            session.execute(udt[1])
        for table in keyspace_schema['tables'].items():
            logging.debug("Creating table {}.{}".format(keyspace, table[0]))
            # Create the tables
            session.execute(table[1])
        for index in keyspace_schema['indices'].items():
            # indices were dropped with their base tables
            logging.debug("Creating index {}.{}".format(keyspace, index[0]))
            session.execute(index[1])
        for mv in keyspace_schema['materialized_views']:
            # Base tables are created now, we can create the MVs
            logging.debug("Creating MV {}.{}".format(keyspace, mv[0]))
            session.execute(mv[1])
 def test_address_resolving(self):
     hostname_resolver = HostnameResolver(resolve_addresses=True)
     self.assertNotEqual("127.0.0.1", hostname_resolver.resolve_fqdn("127.0.0.1"))
Пример #6
0
class CqlSession(object):
    EXCLUDED_KEYSPACES = ['system_traces']

    def __init__(self, session, resolve_ip_addresses=True):
        self._session = session
        self.hostname_resolver = HostnameResolver(resolve_ip_addresses)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.shutdown()

    def shutdown(self):
        self.session.shutdown()
        self.cluster.shutdown()

    @property
    def cluster(self):
        return self._session.cluster

    @property
    def session(self):
        return self._session

    def token(self):
        listen_address = self.cluster.contact_points[0]
        token_map = self.cluster.metadata.token_map
        for token, host in token_map.token_to_host_owner.items():
            if host.address == listen_address:
                return token.value
        raise RuntimeError('Unable to get current token')

    def datacenter(self):
        logging.debug('Checking datacenter...')
        listen_address = socket.gethostbyname(self.cluster.contact_points[0])
        token_map = self.cluster.metadata.token_map

        for host in token_map.token_to_host_owner.values():
            socket_host = self.hostname_resolver.resolve_fqdn(listen_address)
            logging.debug('Checking host {} against {}/{}'.format(
                host.address, listen_address, socket_host))
            if host.address == listen_address or self.hostname_resolver.resolve_fqdn(
                    host.address) == socket_host:
                return host.datacenter

        raise RuntimeError('Unable to current datacenter')

    def tokenmap(self):
        token_map = self.cluster.metadata.token_map
        datacenter = self.datacenter()

        def get_host(host_token_pair):
            return host_token_pair[0]

        def get_host_address(host_token_pair):
            return host_token_pair[0].address

        def get_token(host_token_pair):
            return host_token_pair[1]

        host_token_pairs = sorted(
            [(host, token.value)
             for token, host in token_map.token_to_host_owner.items()],
            key=get_host_address)
        host_tokens_groups = itertools.groupby(host_token_pairs, key=get_host)
        host_tokens_pairs = [(host, list(map(get_token, tokens)))
                             for host, tokens in host_tokens_groups]

        return {
            self.hostname_resolver.resolve_fqdn(host.address): {
                'tokens': tokens,
                'is_up': host.is_up
            }
            for host, tokens in host_tokens_pairs
            if host.datacenter == datacenter
        }

    def dump_schema(self):
        keyspaces = self.session.cluster.metadata.keyspaces
        return '\n\n'.join(metadata.export_as_string()
                           for keyspace, metadata in keyspaces.items()
                           if keyspace not in self.EXCLUDED_KEYSPACES)

    def schema_path_mapping(self):
        query = 'SELECT keyspace_name, columnfamily_name, cf_id FROM system.schema_columnfamilies'

        return (row for row in self.session.execute(query)
                if row.keyspace_name not in self.EXCLUDED_KEYSPACES)

    def execute(self, query):
        return self.session.execute(query)
Пример #7
0
class RestoreJob(object):
    def __init__(self,
                 cluster_backup,
                 config,
                 temp_dir,
                 host_list,
                 seed_target,
                 keep_auth,
                 verify,
                 parallel_restores,
                 keyspaces=None,
                 tables=None,
                 bypass_checks=False,
                 use_sstableloader=False,
                 version_target=None):

        self.id = uuid.uuid4()
        self.ringmap = None
        self.cluster_backup = cluster_backup
        self.session_provider = None
        self.orchestration = Orchestration(config, parallel_restores)
        self.config = config
        self.host_list = host_list
        self.seed_target = seed_target
        self.keep_auth = keep_auth
        self.verify = verify
        self.in_place = None
        self.temp_dir = temp_dir  # temporary files
        self.work_dir = self.temp_dir / 'medusa-job-{id}'.format(id=self.id)
        self.host_map = {
        }  # Map of backup host/target host for the restore process
        self.keyspaces = keyspaces if keyspaces else {}
        self.tables = tables if tables else {}
        self.bypass_checks = bypass_checks
        self.use_sstableloader = use_sstableloader
        self.pssh_pool_size = parallel_restores
        self.cassandra = Cassandra(config)
        fqdn_resolver = medusa.utils.evaluate_boolean(
            self.config.cassandra.resolve_ip_addresses)
        self.fqdn_resolver = HostnameResolver(fqdn_resolver)
        self._version_target = version_target

    def execute(self):
        logging.info('Ensuring the backup is found and is complete')
        if not self.cluster_backup.is_complete():
            raise RuntimeError('Backup is not complete')

        # CASE 1 : We're restoring using a seed target. Source/target mapping will be built based on tokenmap.
        if self.seed_target is not None:
            self.session_provider = CqlSessionProvider([self.seed_target],
                                                       self.config.cassandra)
            with self.session_provider.new_session() as session:
                self._populate_ringmap(self.cluster_backup.tokenmap,
                                       session.tokenmap())
                self._capture_release_version(session)

        # CASE 2 : We're restoring a backup on a different cluster
        if self.host_list is not None:
            logging.info('Restore will happen on new hardware')
            self.in_place = False
            self._populate_hostmap()
            self._capture_release_version(session=None)
            logging.info(
                'Starting Restore on all the nodes in this list: {}'.format(
                    self.host_list))

        self._restore_data()

    @staticmethod
    def _validate_ringmap(tokenmap, target_tokenmap):
        for host, ring_item in target_tokenmap.items():
            if not ring_item.get('is_up'):
                raise RuntimeError(
                    'Target {host} is not up!'.format(host=host))
        if len(target_tokenmap) != len(tokenmap):
            return False
        return True

    def _populate_ringmap(self, tokenmap, target_tokenmap):
        def _tokens_from_ringitem(ringitem):
            return ','.join(map(str, ringitem['tokens']))

        def _token_counts_per_host(tokenmap):
            for host, ringitem in tokenmap.items():
                return len(ringitem['tokens'])

        def _hosts_from_tokenmap(tokenmap):
            hosts = set()
            for host, ringitem in tokenmap.items():
                hosts.add(host)
            return hosts

        def _chunk(my_list, nb_chunks):
            groups = []
            for i in range(nb_chunks):
                groups.append([])
            for i in range(len(my_list)):
                groups[i % nb_chunks].append(my_list[i])
            return groups

        target_tokens = {}
        backup_tokens = {}
        topology_matches = self._validate_ringmap(tokenmap, target_tokenmap)
        self.in_place = self._is_restore_in_place(tokenmap, target_tokenmap)
        if self.in_place:
            logging.info(
                "Restoring on the same cluster that was the backup was taken on (in place fashion)"
            )
            self.keep_auth = False
        else:
            logging.info(
                "Restoring on a different cluster than the backup one (remote fashion)"
            )
            if self.keep_auth:
                logging.info(
                    'system_auth keyspace will be left untouched on the target nodes'
                )
            else:
                # ops might not be aware of the underlying behavior towards auth. Let's ask what to do...
                really_keep_auth = None
                while (really_keep_auth != 'Y'
                       and really_keep_auth != 'n') and not self.bypass_checks:
                    really_keep_auth = input(
                        'Do you want to skip restoring the system_auth keyspace and keep the'
                        + ' credentials of the target cluster? (Y/n)')
                self.keep_auth = True if really_keep_auth == 'Y' else False

        if topology_matches:
            target_tokens = {
                _tokens_from_ringitem(ringitem): host
                for host, ringitem in target_tokenmap.items()
            }
            backup_tokens = {
                _tokens_from_ringitem(ringitem): host
                for host, ringitem in tokenmap.items()
            }

            target_tokens_per_host = _token_counts_per_host(tokenmap)
            backup_tokens_per_host = _token_counts_per_host(target_tokenmap)

            # we must have the same number of tokens per host in both vnode and normal clusters
            if target_tokens_per_host != backup_tokens_per_host:
                logging.info(
                    'Source/target rings have different number of tokens per node: {}/{}'
                    .format(backup_tokens_per_host, target_tokens_per_host))
                topology_matches = False

            # if not using vnodes, the tokens must match exactly
            if backup_tokens_per_host == 1 and target_tokens.keys(
            ) != backup_tokens.keys():
                extras = target_tokens.keys() ^ backup_tokens.keys()
                logging.info(
                    'Tokenmap is differently distributed. Extra items: {}'.
                    format(extras))
                topology_matches = False

        if topology_matches:
            # We can associate each restore node with exactly one backup node
            backup_ringmap = collections.defaultdict(list)
            target_ringmap = collections.defaultdict(list)
            for token, host in backup_tokens.items():
                backup_ringmap[token].append(host)
            for token, host in target_tokens.items():
                target_ringmap[token].append(host)

            self.ringmap = backup_ringmap
            i = 0
            for token, hosts in backup_ringmap.items():
                # take the node that has the same token list or pick the one with the same position in the map.
                restore_host = target_ringmap.get(
                    token,
                    list(target_ringmap.values())[i])[0]
                is_seed = True if self.fqdn_resolver.resolve_fqdn(
                    restore_host) in self._get_seeds_fqdn() else False
                self.host_map[restore_host] = {
                    'source': [hosts[0]],
                    'seed': is_seed
                }
                i += 1
            logging.debug("self.host_map: {}".format(self.host_map))
        else:
            # Topologies are different between backup and restore clusters. Using the sstableloader for restore.
            self.use_sstableloader = True
            backup_hosts = _hosts_from_tokenmap(tokenmap)
            restore_hosts = list(_hosts_from_tokenmap(target_tokenmap))
            if len(backup_hosts) >= len(restore_hosts):
                grouped_backups = _chunk(list(backup_hosts),
                                         len(restore_hosts))
            else:
                grouped_backups = _chunk(list(backup_hosts), len(backup_hosts))
            for i in range(min([len(grouped_backups), len(restore_hosts)])):
                # associate one restore host with several backups as we don't have the same number of nodes.
                self.host_map[restore_hosts[i]] = {
                    'source': grouped_backups[i],
                    'seed': False
                }

    @staticmethod
    def _is_restore_in_place(backup_tokenmap, target_tokenmap):
        # If at least one node is part of both tokenmaps, then we're restoring in place
        # Otherwise we're restoring a remote cluster
        return len(set(backup_tokenmap.keys())
                   & set(target_tokenmap.keys())) > 0

    def _get_seeds_fqdn(self):
        seeds = list()
        for seed in self.cassandra.seeds:
            seeds.append(self.fqdn_resolver.resolve_fqdn(seed))
        logging.debug("seeds are: {}".format(seeds))
        return seeds

    def _populate_hostmap(self):
        """
        When there are no seed nodes to pull cluster topology from, the essential information required for a restore
            can be passed in via a simple file using the --host-list CLI argument.

        Each line in the file must have three pieces of information in this order:
            - the string `True` or `False`; This indicates if the source node was a seed node
            - the host/ip that the restore operation is to take place on / destination node
            - the host/ip where the data came from / source node
        Each field is separated by a comma.

        E.G.: Using medusa to restore a 4 node cluster from a previous backup taken of that same cluster:
            medusa@cassandra-node01:~$ cat nodes.list
            True,10.10.1.127,10.10.1.127
            True,10.10.1.128,10.10.1.128
            False,10.10.1.129,10.10.1.129
            False,10.10.1.130,10.10.1.130

        :return:
        """
        with open(self.host_list, 'r') as f:
            for line in f.readlines():
                # Remove leading/trailing whitespace
                _line = line.strip()
                # Ignore comment lines
                if _line.startswith('#'):
                    continue
                seed, target, source = _line.split(
                    self.config.storage.host_file_separator)
                # in python, bool('False') evaluates to True. Need to test the membership as below
                target_resolved = self.fqdn_resolver.resolve_fqdn(
                    target.strip())
                source_resolved = self.fqdn_resolver.resolve_fqdn(
                    source.strip())
                self.host_map[target_resolved] = {
                    'source': [source_resolved],
                    'seed': seed in ['True']
                }

    def _restore_data(self):
        # create workdir on each target host
        # Later: distribute a credential
        # construct command for each target host
        # invoke `nohup medusa-wrapper #{command}` on each target host
        # wait for exit on each
        logging.info('Starting cluster restore...')
        logging.info('Working directory for this execution: {}'.format(
            self.work_dir))
        for target, sources in self.host_map.items():
            logging.info(
                'About to restore on {} using {} as backup source'.format(
                    target, sources))

        logging.info(
            "This will delete all data on the target nodes and replace it with backup '{}'."
            .format(self.cluster_backup.name))

        proceed = None
        while (proceed != 'Y' and proceed != 'n') and not self.bypass_checks:
            proceed = input('Are you sure you want to proceed? (Y/n)')

        if proceed == 'n':
            err_msg = 'Restore manually cancelled'
            logging.error(err_msg)
            raise RuntimeError(err_msg)

        # work out which nodes are seeds in the target cluster
        target_seeds = [t for t, s in self.host_map.items() if s['seed']]
        logging.info("target seeds : {}".format(target_seeds))
        # work out which nodes are seeds in the target cluster
        target_hosts = [host for host in self.host_map.keys()]
        logging.info("target hosts : {}".format(target_hosts))

        if self.use_sstableloader is False:
            # stop all target nodes
            logging.info('Stopping Cassandra on all nodes currently up')

            # Generate a Job ID for this run
            job_id = str(uuid.uuid4())
            logging.debug('Job id is: {}'.format(job_id))
            # Define command to run
            command = self.config.cassandra.stop_cmd
            logging.debug('Command to run is: {}'.format(command))

            self.orchestration.pssh_run(target_hosts,
                                        command,
                                        hosts_variables={})

        else:
            # we're using the sstableloader, which will require to (re)create the schema and empty the tables
            logging.info("Restoring schema on the target cluster")
            self._restore_schema()

        # trigger restores everywhere at once
        # pass in seed info so that non-seeds can wait for seeds before starting
        # seeds, naturally, don't wait for anything

        # Generate a Job ID for this run
        hosts_variables = []
        for target, source in [(t, s['source'])
                               for t, s in self.host_map.items()]:
            logging.info('Restoring data on {}...'.format(target))
            seeds = '' if target in target_seeds or len(target_seeds) == 0 \
                else '--seeds {}'.format(','.join(target_seeds))
            hosts_variables.append((','.join(source), seeds))

        command = self._build_restore_cmd()
        pssh_run_success = self.orchestration.pssh_run(
            target_hosts, command, hosts_variables=hosts_variables)

        if not pssh_run_success:
            # we could implement a retry.
            err_msg = 'Some nodes failed to restore. Exiting'
            logging.error(err_msg)
            raise RuntimeError(err_msg)

        logging.info(
            'Restore process is complete. The cluster should be up shortly.')

        if self.verify:
            verify_restore(target_hosts, self.config)

    def _build_restore_cmd(self):
        in_place_option = '--in-place' if self.in_place else '--remote'
        keep_auth_option = '--keep-auth' if self.keep_auth else ''
        keyspace_options = expand_repeatable_option('keyspace', self.keyspaces)
        table_options = expand_repeatable_option('table', self.tables)
        # We explicitly set --no-verify since we are doing verification here in this module
        # from the control node
        verify_option = '--no-verify'

        # %s placeholders in the below command will get replaced by pssh using per host command substitution
        command = 'mkdir -p {work}; cd {work} && medusa-wrapper {sudo} medusa {config} ' \
                  '--fqdn=%s -vvv restore-node ' \
                  '{in_place} {keep_auth} %s {verify} --backup-name {backup} --temp-dir {temp_dir} ' \
                  '{use_sstableloader} {keyspaces} {tables}' \
            .format(work=self.work_dir,
                    sudo='sudo' if medusa.utils.evaluate_boolean(self.config.cassandra.use_sudo) else '',
                    config=f'--config-file {self.config.file_path}' if self.config.file_path else '',
                    in_place=in_place_option,
                    keep_auth=keep_auth_option,
                    verify=verify_option,
                    backup=self.cluster_backup.name,
                    temp_dir=self.temp_dir,
                    use_sstableloader='--use-sstableloader' if self.use_sstableloader else '',
                    keyspaces=keyspace_options,
                    tables=table_options)

        logging.debug(
            'Preparing to restore on all nodes with the following command: {}'.
            format(command))

        return command

    def _restore_schema(self):
        schema = parse_schema(self.cluster_backup.schema)
        with self.session_provider.new_session() as session:
            for keyspace in schema.keys():
                if keyspace.startswith("system"):
                    continue
                else:
                    self._create_or_recreate_schema_objects(
                        session, keyspace, schema[keyspace])

    def _create_or_recreate_schema_objects(self, session, keyspace,
                                           keyspace_schema):
        logging.info("(Re)creating schema for keyspace {}".format(keyspace))
        if keyspace not in session.cluster.metadata.keyspaces:
            # Keyspace doesn't exist on the target cluster. Got to create it and all the tables as well.
            session.execute(keyspace_schema['create_statement'])
        for mv in keyspace_schema['materialized_views']:
            # MVs need to be dropped before we drop the tables
            logging.debug("Dropping MV {}.{}".format(keyspace, mv[0]))
            session.execute("DROP MATERIALIZED VIEW IF EXISTS {}.{}".format(
                keyspace, mv[0]))
        for table in keyspace_schema['tables'].items():
            logging.debug("Dropping table {}.{}".format(keyspace, table[0]))
            session.execute("DROP TABLE IF EXISTS {}.{}".format(
                keyspace, table[0]))
        for udt in keyspace_schema['udt'].items():
            # then custom types as they can be used in tables
            session.execute("DROP TYPE IF EXISTS {}.{}".format(
                keyspace, udt[0]))
            # Then we create the missing ones
            session.execute(udt[1])
        for table in keyspace_schema['tables'].items():
            logging.debug("Creating table {}.{}".format(keyspace, table[0]))
            # Create the tables
            session.execute(table[1])
        for index in keyspace_schema['indices'].items():
            # indices were dropped with their base tables
            logging.debug("Creating index {}.{}".format(keyspace, index[0]))
            session.execute(index[1])
        for mv in keyspace_schema['materialized_views']:
            # Base tables are created now, we can create the MVs
            logging.debug("Creating MV {}.{}".format(keyspace, mv[0]))
            session.execute(mv[1])

    # Capture release version as specified, from driver, or use default.
    # This is necessary for logic that requires knowledge of differences between 2, 3, and 4.
    def _capture_release_version(self, session):
        # If no version specified via CLI, but have a session, get version from driver.
        if not self._version_target and session:
            driver_app_version = session.cluster.application_version
            if driver_app_version:
                logging.debug('Driver version provided as: {}'.format(
                    driver_app_version))
                HostMan.set_release_version(driver_app_version)
            else:
                logging.debug(
                    'Unable to obtain app_version via driver or command line, '
                    'using default: {}'.format(
                        HostMan.DEFAULT_RELEASE_VERSION))
                # Using default as target wasn't found by driver or provided to RestoreJob
                HostMan.set_release_version(HostMan.DEFAULT_RELEASE_VERSION)
        # If no session available or specified version from CLI, use default.
        elif not self._version_target:
            # Use default
            HostMan.set_release_version(HostMan.DEFAULT_RELEASE_VERSION)
        else:
            # Use what is specified from CLI as version.
            logging.debug('Target version provided as: {}'.format(
                self._version_target))
            HostMan.set_release_version(self._version_target)