def restore_node(config, temp_dir, backup_name, in_place, keep_auth, seeds, verify, keyspaces, tables, use_sstableloader=False): if in_place and keep_auth: logging.error( 'Cannot keep system_auth when restoring in-place. It would be overwritten' ) sys.exit(1) storage = Storage(config=config.storage) if not use_sstableloader: restore_node_locally(config, temp_dir, backup_name, in_place, keep_auth, seeds, storage, keyspaces, tables) else: restore_node_sstableloader(config, temp_dir, backup_name, in_place, keep_auth, seeds, storage, keyspaces, tables) if verify: hostname_resolver = HostnameResolver( medusa.config.evaluate_boolean( config.cassandra.resolve_ip_addresses)) verify_restore([hostname_resolver.resolve_fqdn()], config)
def orchestrate(config, backup_name, seed_target, temp_dir, host_list, keep_auth, bypass_checks, verify, keyspaces, tables, parallel_restores, use_sstableloader=False): monitoring = Monitoring(config=config.monitoring) try: restore_start_time = datetime.datetime.now() if seed_target is None and host_list is None: # if no target node is provided, nor a host list file, default to the local node as seed target hostname_resolver = HostnameResolver(medusa.utils.evaluate_boolean(config.cassandra.resolve_ip_addresses)) seed_target = hostname_resolver.resolve_fqdn(socket.gethostbyname(socket.getfqdn())) logging.warning("Seed target was not provided, using the local hostname: {}".format(seed_target)) if seed_target is not None and host_list is not None: err_msg = 'You must either provide a seed target or a list of host, not both' logging.error(err_msg) raise Exception(err_msg) if not temp_dir.is_dir(): err_msg = '{} is not a directory'.format(temp_dir) logging.error(err_msg) raise Exception(err_msg) storage = Storage(config=config.storage) try: cluster_backup = storage.get_cluster_backup(backup_name) except KeyError: err_msg = 'No such backup --> {}'.format(backup_name) logging.error(err_msg) raise Exception(err_msg) restore = RestoreJob(cluster_backup, config, temp_dir, host_list, seed_target, keep_auth, verify, parallel_restores, keyspaces, tables, bypass_checks, use_sstableloader) restore.execute() restore_end_time = datetime.datetime.now() restore_duration = restore_end_time - restore_start_time logging.debug('Emitting metrics') logging.info('Restore duration: {}'.format(restore_duration.seconds)) tags = ['medusa-cluster-restore', 'restore-duration', backup_name] monitoring.send(tags, restore_duration.seconds) tags = ['medusa-cluster-restore', 'restore-error', backup_name] monitoring.send(tags, 0) logging.debug('Done emitting metrics') logging.info('Successfully restored the cluster') except Exception as e: tags = ['medusa-cluster-restore', 'restore-error', backup_name] monitoring.send(tags, 1) logging.error('This error happened during the cluster restore: {}'.format(str(e))) traceback.print_exc() sys.exit(1)
def invoke_sstableloader(config, download_dir, keep_auth, fqtns_to_restore, storage_port): hostname_resolver = HostnameResolver( medusa.utils.evaluate_boolean(config.cassandra.resolve_ip_addresses)) cassandra_is_ccm = int(shlex.split(config.cassandra.is_ccm)[0]) keyspaces = os.listdir(str(download_dir)) for keyspace in keyspaces: ks_path = os.path.join(str(download_dir), keyspace) if os.path.isdir(ks_path) and keyspace_is_allowed_to_restore( keyspace, keep_auth, fqtns_to_restore): logging.info( 'Restoring keyspace {} with sstableloader...'.format(ks_path)) for table in os.listdir(str(ks_path)): table_path = os.path.join(str(ks_path), table) if os.path.isdir(table_path) and table_is_allowed_to_restore( keyspace, table, fqtns_to_restore): logging.debug( 'Restoring table {} with sstableloader...'.format( table)) cql_username = '******' if config.cassandra.cql_username is None else config.cassandra.cql_username cql_password = '******' if config.cassandra.cql_password is None else config.cassandra.cql_password sstableloader_args = [ config.cassandra.sstableloader_bin.replace( "github:apache/", "githubCOLONapacheSLASH"), '-d', hostname_resolver.resolve_fqdn() if cassandra_is_ccm == 0 else '127.0.0.1', '--conf-path', config.cassandra.config_file, '--username', cql_username, '--password', cql_password, '--no-progress', os.path.join(ks_path, table) ] if storage_port != "7000": sstableloader_args.append("--storage-port") sstableloader_args.append(storage_port) if config.cassandra.sstableloader_ts is not None and \ config.cassandra.sstableloader_tspw is not None and \ config.cassandra.sstableloader_ks is not None and \ config.cassandra.sstableloader_kspw is not None: sstableloader_args.append("-ts") sstableloader_args.append( config.cassandra.sstableloader_ts) sstableloader_args.append("-tspw") sstableloader_args.append( config.cassandra.sstableloader_tspw) sstableloader_args.append("-ks") sstableloader_args.append( config.cassandra.sstableloader_ks) sstableloader_args.append("-kspw") sstableloader_args.append( config.cassandra.sstableloader_kspw) output = subprocess.check_output(sstableloader_args) for line in output.decode('utf-8').split('\n'): logging.debug(line)
def __init__(self, config, backup_name, stagger, mode, temp_dir, parallel_snapshots, parallel_uploads): self.id = uuid.uuid4() # TODO expose the argument below (Note that min(1000, <number_of_hosts>) will be used) self.orchestration_snapshots = Orchestration(config, parallel_snapshots) self.orchestration_uploads = Orchestration(config, parallel_uploads) self.config = config self.backup_name = backup_name self.stagger = stagger self.mode = mode self.temp_dir = temp_dir self.work_dir = self.temp_dir / 'medusa-job-{id}'.format(id=self.id) self.hosts = {} self.cassandra = Cassandra(config) self.snapshot_tag = '{}{}'.format(self.cassandra.SNAPSHOT_PREFIX, self.backup_name) fqdn_resolver = medusa.config.evaluate_boolean(self.config.cassandra.resolve_ip_addresses) self.fqdn_resolver = HostnameResolver(fqdn_resolver)
def __init__(self, cluster_backup, config, temp_dir, host_list, seed_target, keep_auth, verify, parallel_restores, keyspaces=None, tables=None, bypass_checks=False, use_sstableloader=False, version_target=None): self.id = uuid.uuid4() self.ringmap = None self.cluster_backup = cluster_backup self.session_provider = None self.orchestration = Orchestration(config, parallel_restores) self.config = config self.host_list = host_list self.seed_target = seed_target self.keep_auth = keep_auth self.verify = verify self.in_place = None self.temp_dir = temp_dir # temporary files self.work_dir = self.temp_dir / 'medusa-job-{id}'.format(id=self.id) self.host_map = { } # Map of backup host/target host for the restore process self.keyspaces = keyspaces if keyspaces else {} self.tables = tables if tables else {} self.bypass_checks = bypass_checks self.use_sstableloader = use_sstableloader self.pssh_pool_size = parallel_restores self.cassandra = Cassandra(config) fqdn_resolver = medusa.utils.evaluate_boolean( self.config.cassandra.resolve_ip_addresses) self.fqdn_resolver = HostnameResolver(fqdn_resolver) self._version_target = version_target
def test_address_resolving(self): hostname_resolver = HostnameResolver(resolve_addresses=True) self.assertNotEqual("127.0.0.1", hostname_resolver.resolve_fqdn("127.0.0.1"))
def __init__(self, session, resolve_ip_addresses=True): self._session = session self.hostname_resolver = HostnameResolver(resolve_ip_addresses)