def restore_mysql(cfg, dst, backup_copy, cache): """Restore from mysql backup""" LOG.debug('mysql: %r', cfg) if not backup_copy: LOG.info('No backup copy specified. Choose one from below:') list_available_backups(cfg) exit(1) try: ensure_empty(dst) dst_storage = get_destination( cfg, get_hostname_from_backup_copy(backup_copy) ) key = dst_storage.basename(backup_copy) copy = dst_storage.status()[key] if cache: restore_from_mysql(cfg, copy, dst, cache=Cache(cache)) else: restore_from_mysql(cfg, copy, dst) except (TwinDBBackupError, CacheException) as err: LOG.error(err) exit(1) except (OSError, IOError) as err: LOG.error(err) exit(1)
def backup(cfg, run_type, lock_file, binlogs_only): """Run backup job""" try: run_backup_job( cfg, run_type, lock_file=lock_file, binlogs_only=binlogs_only ) except (LockWaitTimeoutError, OperationError) as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except ModifierException as err: LOG.error('Error in modifier class') LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except SourceError as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except KeyboardInterrupt: LOG.info('Exiting...') kill_children() exit(1)
def storage_server(docker_client, container_network): bootstrap_script = '/twindb-backup/support/bootstrap/storage_server.sh' container = get_container( 'storage_server', docker_client, container_network, bootstrap_script=bootstrap_script, image="centos:centos7", last_n=2 ) timeout = time.time() + 30 * 60 while time.time() < timeout: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if sock.connect_ex((container['ip'], 22)) == 0: break time.sleep(1) yield container if container: LOG.info('Removing container %s', container['Id']) docker_client.api.remove_container(container=container['Id'], force=True)
def container_network(docker_client): api = docker_client.api network = None network_params = { "NAME": NETWORK_NAME, "subnet": None, "second_octet": None } ipam_config = _ipam_config() subnet = ipam_config["Config"][0]["Subnet"] network_params["subnet"] = subnet network_params["second_octet"] = int(subnet.split(".")[1]) try: network = api.create_network(name=NETWORK_NAME, driver="bridge", ipam=ipam_config, check_duplicate=True) LOG.info("Created subnet %s", network_params["subnet"]) LOG.debug(network) except APIError as err: if err.status_code == 500: LOG.info("Network %r already exists", network) else: raise yield network_params if network: api.remove_network(net_id=network["Id"])
def container_network(docker_client): api = docker_client.api network = None network_params = { 'NAME': NETWORK_NAME, 'subnet': None, 'second_octet': None } ipam_config = _ipam_config() subnet = ipam_config['Config'][0]['Subnet'] network_params['subnet'] = subnet network_params['second_octet'] = int(subnet.split('.')[1]) try: network = api.create_network(name=NETWORK_NAME, driver="bridge", ipam=ipam_config, check_duplicate=True) LOG.info('Created subnet %s', network_params['subnet']) LOG.debug(network) except APIError as err: if err.status_code == 500: LOG.info('Network %r already exists', network) else: raise yield network_params if network: api.remove_network(net_id=network['Id'])
def get_container(name, bootstrap_script, client, network, last_n=1): api = client.api api.pull(NODE_IMAGE) cwd = os.getcwd() host_config = api.create_host_config( binds={cwd: { 'bind': '/twindb-backup', 'mode': 'rw', }}, dns=['8.8.8.8']) ip = '172.%d.3.%d' % (network['second_octet'], last_n) networking_config = api.create_networking_config( {network['NAME']: api.create_endpoint_config(ipv4_address=ip)}) LOG.debug(networking_config) container = api.create_container(image=NODE_IMAGE, name='%s_%d' % (name, last_n), ports=[22, 3306], host_config=host_config, networking_config=networking_config, volumes=['/twindb-backup'], command='bash %s' % bootstrap_script) container['ip'] = ip LOG.info('Created container %r', container) try: api.start(container['Id']) LOG.info('Started %r', container) return container except APIError as err: LOG.error(err) client.api.remove_container(container=container['Id'], force=True)
def storage_server(docker_client, container_network): bootstrap_script = "/twindb-backup/support/bootstrap/storage_server.sh" container = get_container( "storage_server", docker_client, container_network, bootstrap_script=bootstrap_script, image="centos:centos7", last_n=3, ) timeout = time.time() + 30 * 60 while time.time() < timeout: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if sock.connect_ex((container["ip"], 22)) == 0: break time.sleep(1) yield container if container: LOG.info("Removing container %s", container["Id"]) docker_client.api.remove_container(container=container["Id"], force=True)
def restore_from_file(twindb_config, copy, dst_dir): """ Restore a directory from a backup copy in the directory :param twindb_config: tool configuration :type twindb_config: TwinDBBackupConfig :param copy: Instance of BaseCopy or and inheriting classes. :type copy: BaseCopy :param dst_dir: Path to destination directory. Must exist and be empty. :type dst_dir: str """ LOG.info('Restoring %s in %s', copy.key, dst_dir) mkdir_p(dst_dir) restore_start = time.time() keep_local_path = twindb_config.keep_local_path if keep_local_path and os.path.exists(osp.join(keep_local_path, copy.key)): dst = Local(osp.join(keep_local_path, copy.key)) stream = dst.get_stream(copy) else: dst = twindb_config.destination() stream = dst.get_stream(copy) # GPG modifier if twindb_config.gpg: gpg = Gpg(stream, twindb_config.gpg.recipient, twindb_config.gpg.keyring, secret_keyring=twindb_config.gpg.secret_keyring) LOG.debug('Decrypting stream') stream = gpg.revert_stream() else: LOG.debug('Not decrypting the stream') with stream as handler: try: LOG.debug('handler type: %s', type(handler)) LOG.debug('stream type: %s', type(stream)) cmd = ["tar", "zvxf", "-"] LOG.debug('Running %s', ' '.join(cmd)) proc = Popen(cmd, stdin=handler, cwd=dst_dir) cout, cerr = proc.communicate() ret = proc.returncode if ret: LOG.error('%s exited with code %d', cmd, ret) if cout: LOG.error('STDOUT: %s', cout) if cerr: LOG.error('STDERR: %s', cerr) return LOG.info('Successfully restored %s in %s', copy.key, dst_dir) except (OSError, DestinationError) as err: LOG.error('Failed to decompress %s: %s', copy.key, err) exit(1) export_info(twindb_config, data=time.time() - restore_start, category=ExportCategory.files, measure_type=ExportMeasureType.restore)
def _print_binlog(dst): dst_files = dst.list_files( dst.remote_path, pattern="/binlog/", recursive=True, files_only=True ) if dst_files: LOG.info("Binary logs:") for copy in dst_files: print(copy)
def restore_from_file(config, backup_copy, dst_dir): """ Restore a directory from a backup copy in the directory :param config: Tool configuration. :type config: ConfigParser.ConfigParser :param backup_copy: Backup name. :type backup_copy: str :param dst_dir: Path to destination directory. Must exist and be empty. :type dst_dir: str """ LOG.info('Restoring %s in %s', backup_copy, dst_dir) mkdir_p(dst_dir) restore_start = time.time() if os.path.exists(backup_copy): dst = Local(backup_copy) stream = dst.get_stream(backup_copy) else: dst = get_destination(config) stream = dst.get_stream(backup_copy) # GPG modifier try: gpg = Gpg(stream, config.get('gpg', 'recipient'), config.get('gpg', 'keyring'), secret_keyring=config.get('gpg', 'secret_keyring')) LOG.debug('Decrypting stream') stream = gpg.revert_stream() except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): LOG.debug('Not decrypting the stream') with stream as handler: try: LOG.debug('handler type: %s', type(handler)) LOG.debug('stream type: %s', type(stream)) cmd = ["tar", "zvxf", "-"] LOG.debug('Running %s', ' '.join(cmd)) proc = Popen(cmd, stdin=handler, cwd=dst_dir) cout, cerr = proc.communicate() ret = proc.returncode if ret: LOG.error('%s exited with code %d', cmd, ret) if cout: LOG.error('STDOUT: %s', cout) if cerr: LOG.error('STDERR: %s', cerr) return LOG.info('Successfully restored %s in %s', backup_copy, dst_dir) except (OSError, DestinationError) as err: LOG.error('Failed to decompress %s: %s', backup_copy, err) exit(1) export_info(config, data=time.time() - restore_start, category=ExportCategory.files, measure_type=ExportMeasureType.restore)
def restore_mysql(ctx, dst, backup_copy, cache): """Restore from mysql backup""" LOG.debug('mysql: %r', ctx.obj['twindb_config']) if not backup_copy: LOG.info('No backup copy specified. Choose one from below:') list_available_backups(ctx.obj['twindb_config']) exit(1) try: ensure_empty(dst) incomplete_copy = MySQLCopy( path=backup_copy ) dst_storage = ctx.obj['twindb_config'].destination( backup_source=incomplete_copy.host ) mysql_status = MySQLStatus(dst=dst_storage) copies = [ cp for cp in mysql_status if backup_copy.endswith(cp.name) ] try: copy = copies.pop(0) except IndexError: raise TwinDBBackupError( 'Can not find copy %s in MySQL status. ' 'Inspect output of `twindb-backup status` and verify ' 'that correct copy is specified.' % backup_copy ) if copies: raise TwinDBBackupError( 'Multiple copies match pattern %s. Make sure you give unique ' 'copy name for restore.' ) if cache: restore_from_mysql( ctx.obj['twindb_config'], copy, dst, cache=Cache(cache) ) else: restore_from_mysql(ctx.obj['twindb_config'], copy, dst) except (TwinDBBackupError, CacheException) as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except (OSError, IOError) as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1)
def _print_media_type(dst, media_type): for run_type in INTERVALS: pattern = "/%s/%s/" % (run_type, media_type) dst_files = dst.list_files( dst.remote_path, pattern=pattern, recursive=True, files_only=True ) if dst_files: LOG.info("%s %s copies:", media_type, run_type) for copy in dst_files: print(copy)
def test_get_stream(gs): status = MySQLStatus(dst=gs) copy = status['master1/daily/mysql/mysql-2019-04-04_05_29_05.xbstream.gz'] with gs.get_stream(copy) as stream: LOG.debug('starting reading from pipe') content = stream.read() LOG.debug('finished reading from pipe') assert len(content), 'Failed to read from GS' LOG.info('Read %d bytes', len(content))
def kill_children(): """ Kill child process """ for proc in multiprocessing.active_children(): LOG.info('Terminating %r [%d] ...', proc, proc.pid) proc.terminate() parent = psutil.Process(os.getpid()) for child in parent.children(recursive=True): LOG.info('Terminating process %r', child) child.kill()
def share_backup(ctx, s3_url): """Share backup copy for download""" if not s3_url: LOG.info('No backup copy specified. Choose one from below:') list_available_backups(ctx.obj['twindb_config']) exit(1) try: share(ctx.obj['twindb_config'], s3_url) except TwinDBBackupError as err: LOG.error(err) exit(1)
def kill_children(): """ Kill child process """ for proc in multiprocessing.active_children(): LOG.info("Terminating %r [%d] ...", proc, proc.pid) proc.terminate() parent = psutil.Process(os.getpid()) for child in parent.children(recursive=True): LOG.info("Terminating process %r", child) child.kill()
def _print_binlog(dst): dst_files = dst.list_files( dst.remote_path, pattern='/binlog/', recursive=True, files_only=True ) if dst_files: LOG.info('Binary logs:') for copy in dst_files: print(copy)
def _print_media_type(dst, media_type): for run_type in INTERVALS: pattern = "/%s/%s/" % (run_type, media_type) dst_files = dst.list_files( dst.remote_path, pattern=pattern, recursive=True, files_only=True ) if dst_files: LOG.info('%s %s copies:', media_type, run_type) for copy in dst_files: print(copy)
def _ipam_config(): for octet in xrange(16, 31): subnet = "172.%d.0.0/16" % octet try: ipam_pool = IPAMPool(subnet=subnet) ipam_config = IPAMConfig(pool_configs=[ipam_pool]) return ipam_config except APIError as err: if err.status_code == 409: LOG.info('Subnet %s already exists', subnet) continue else: raise
def create_bucket(self): """Creates the bucket in gcs that will store the backups. :raises GCSDestinationError: if failed to create the bucket. :raises GCSDestinationError: If authentication error. """ try: self._gcs_client.create_bucket(bucket_name=self.bucket) except (GoogleAPIError, GoogleAuthError) as err: raise GCSDestinationError(err) LOG.info('Created bucket %s', self.bucket)
def master1(docker_client, container_network, tmpdir_factory): try: platform = os.environ["PLATFORM"] except KeyError: raise EnvironmentError("""The environment variable PLATFORM must be defined. Allowed values are: * centos * debian * ubuntu """) bootstrap_script = ("/twindb-backup/support/bootstrap/master/" "%s/master1.sh" % platform) datadir = tmpdir_factory.mktemp("mysql") twindb_config_dir = tmpdir_factory.mktemp("twindb") container = get_container( "master1", docker_client, container_network, str(datadir), twindb_config_dir=str(twindb_config_dir), last_n=1, ) try: timeout = time.time() + 30 * 60 LOG.info("Waiting until port TCP/3306 becomes available") while time.time() < timeout: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if sock.connect_ex((container["ip"], 3306)) == 0: break time.sleep(1) LOG.info("Still waiting") LOG.info("Port TCP/3306 is ready") privileges_file = ("/twindb-backup/vagrant/environment/puppet/" "modules/profile/files/mysql_grants.sql") cmd = ["bash", "-c", "mysql -uroot mysql < %s" % privileges_file] ret, cout = docker_execute(docker_client, container["Id"], cmd) print(cout) assert ret == 0 ret, _ = docker_execute(docker_client, container["Id"], ["ls"]) assert ret == 0 ret, cout = docker_execute(docker_client, container["Id"], ["bash", bootstrap_script]) print(cout) assert ret == 0 yield container finally: if container: LOG.info("Removing container %s", container["Id"]) docker_client.api.remove_container(container=container["Id"], force=True)
def delete_bucket(self, force=False): """Delete the bucket in gcs that was storing the backups. :param force: If the bucket is non-empty then delete the objects before deleting the bucket. :type force: bool :raise GCSDestinationError: if failed to delete the bucket. """ try: self._bucket_obj.delete(force=force) except (GoogleAPIError, GoogleAuthError) as err: raise GCSDestinationError(err) LOG.info('Deleted bucket %s', self.bucket)
def restore_file(cfg, dst, backup_copy): """Restore from file backup""" LOG.debug('file: %r', cfg) if not backup_copy: LOG.info('No backup copy specified. Choose one from below:') list_available_backups(cfg) exit(1) try: ensure_empty(dst) restore_from_file(cfg, backup_copy, dst) except TwinDBBackupError as err: LOG.error(err) exit(1)
def master2(docker_client, container_network): container = _get_master(2, docker_client, container_network) timeout = time.time() + 30 * 60 while time.time() < timeout: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if sock.connect_ex((container['ip'], 22)) == 0: break time.sleep(1) yield container if container: LOG.info('Removing container %s', container['Id']) docker_client.api.remove_container(container=container['Id'], force=True)
def _get_master(n, client, network): """ :param n: 1 or 2 :return: Container """ api = client.api api.pull(NODE_IMAGE) cwd = os.getcwd() host_config = api.create_host_config( binds={ cwd: { 'bind': '/twindb-backup', 'mode': 'rw', } }, dns=['8.8.8.8'] ) ip = '172.%d.3.%d' % (network['second_octet'], n) networking_config = api.create_networking_config({ network['NAME']: api.create_endpoint_config(ipv4_address=ip) }) LOG.debug(networking_config) container = api.create_container( image=NODE_IMAGE, name='master%d' % n, ports=[22, 3306], host_config=host_config, networking_config=networking_config, volumes=['/twindb-backup'], command='bash /twindb-backup/support/clone/master%d.sh' % n # command='/bin/sleep 36000' ) container['ip'] = ip LOG.info('Created container %r', container) try: api.start(container['Id']) LOG.info('Started %r', container) return container except APIError as err: LOG.error(err) client.api.remove_container(container=container['Id'], force=True)
def restore_mysql(ctx, dst, backup_copy, cache): """Restore from mysql backup""" LOG.debug('mysql: %r', ctx.obj['twindb_config']) if not backup_copy: LOG.info('No backup copy specified. Choose one from below:') list_available_backups(ctx.obj['twindb_config']) exit(1) try: ensure_empty(dst) incomplete_copy = MySQLCopy(path=backup_copy) dst_storage = ctx.obj['twindb_config'].destination( backup_source=incomplete_copy.host) mysql_status = MySQLStatus(dst=dst_storage, status_directory=incomplete_copy.host) copies = [cp for cp in mysql_status if backup_copy.endswith(cp.name)] try: copy = copies.pop(0) except IndexError: raise TwinDBBackupError( 'Can not find copy %s in MySQL status. ' 'Inspect output of `twindb-backup status` and verify ' 'that correct copy is specified.' % backup_copy) if copies: raise TwinDBBackupError( 'Multiple copies match pattern %s. Make sure you give unique ' 'copy name for restore.') if cache: restore_from_mysql(ctx.obj['twindb_config'], copy, dst, cache=Cache(cache)) else: restore_from_mysql(ctx.obj['twindb_config'], copy, dst) except (TwinDBBackupError, CacheException) as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except (OSError, IOError) as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1)
def master2(docker_client, container_network): bootstrap_script = '/twindb-backup/support/bootstrap/master2.sh' container = get_container('master2', bootstrap_script, docker_client, container_network, 2) timeout = time.time() + 30 * 60 while time.time() < timeout: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if sock.connect_ex((container['ip'], 22)) == 0: break time.sleep(1) yield container if container: LOG.info('Removing container %s', container['Id']) docker_client.api.remove_container(container=container['Id'], force=True)
def master1(docker_client, container_network, tmpdir_factory): try: platform = os.environ['PLATFORM'] except KeyError: raise EnvironmentError("""The environment variable PLATFORM must be defined. Allowed values are: * centos * debian * ubuntu """) bootstrap_script = '/twindb-backup/support/bootstrap/master/' \ '%s/master1.sh' % platform datadir = tmpdir_factory.mktemp('mysql') twindb_config_dir = tmpdir_factory.mktemp('twindb') container = get_container('master1', docker_client, container_network, str(datadir), twindb_config_dir=str(twindb_config_dir), last_n=1) try: timeout = time.time() + 30 * 60 LOG.info('Waiting until port TCP/3306 becomes available') while time.time() < timeout: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if sock.connect_ex((container['ip'], 3306)) == 0: break time.sleep(1) LOG.info('Still waiting') LOG.info('Port TCP/3306 is ready') privileges_file = "/twindb-backup/vagrant/environment/puppet/" \ "modules/profile/files/mysql_grants.sql" cmd = ["bash", "-c", "mysql -uroot mysql < %s" % privileges_file] ret, cout = docker_execute(docker_client, container['Id'], cmd) print(cout) assert ret == 0 ret, _ = docker_execute(docker_client, container['Id'], ['ls']) assert ret == 0 ret, cout = docker_execute(docker_client, container['Id'], ['bash', bootstrap_script]) print(cout) assert ret == 0 yield container finally: LOG.info('Removing container %s', container['Id']) docker_client.api.remove_container(container=container['Id'], force=True)
def backup(ctx, run_type, lock_file, binlogs_only): """Run backup job""" try: run_backup_job(ctx.obj['twindb_config'], run_type, lock_file=lock_file, binlogs_only=binlogs_only) except TwinDBBackupError as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except KeyboardInterrupt: LOG.info('Exiting...') kill_children() exit(1)
def backup(ctx, run_type, lock_file, binlogs_only): """Run backup job""" try: run_backup_job( ctx.obj['twindb_config'], run_type, lock_file=lock_file, binlogs_only=binlogs_only ) except TwinDBBackupError as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except KeyboardInterrupt: LOG.info('Exiting...') kill_children() exit(1)
def clone(self, dest_host, port, compress=False): """ Send backup to destination host :param dest_host: Destination host :type dest_host: str :param port: Port to sending backup :type port: int :param compress: If True compress stream :type compress: bool :raise RemoteMySQLSourceError: if any error """ retry = 1 retry_time = 2 error_log = "/tmp/{src}_{src_port}-{dst}_{dst_port}.log".format( src=self._ssh_client.host, src_port=self._ssh_client.port, dst=dest_host, dst_port=port, ) if compress: compress_cmd = "| gzip -c - " else: compress_cmd = "" cmd = ( 'bash -c "sudo %s ' "--stream=xbstream " "--host=127.0.0.1 " "--backup " "--target-dir ./ 2> %s" ' %s | ncat %s %d --send-only"' % (self._xtrabackup, error_log, compress_cmd, dest_host, port) ) while retry < 3: try: return self._ssh_client.execute(cmd) except SshClientException as err: LOG.warning(err) LOG.info("Will try again in after %d seconds", retry_time) time.sleep(retry_time) retry_time *= 2 retry += 1
def list_available_backups(config, copy_type=None): """ Print known backup copies on a destination specified in the configuration. :param config: tool configuration :type config: ConfigParser.ConfigParser :param copy_type: Limit list to specific type of backups. :type copy_type: files|mysql """ dsts = [get_destination(config)] if config.has_option('destination', 'keep_local_path'): dsts.insert(0, Local(config.get('destination', 'keep_local_path'))) for dst in dsts: LOG.info('Destination %s', dst) for mtype in MEDIA_TYPES: if copy_type in [None, mtype]: func = "_print_%s" % mtype globals()[func](dst)
def list_available_backups(twindb_config, copy_type=None): """ Print known backup copies on a destination specified in the configuration. :param twindb_config: tool configuration :type twindb_config: TwinDBBackupConfig :param copy_type: Limit list to specific type of backups. :type copy_type: files|mysql """ dsts = [twindb_config.destination()] if twindb_config.keep_local_path: dsts.insert(0, Local(twindb_config.keep_local_path)) for dst in dsts: LOG.info('Destination %s', dst) for mtype in MEDIA_TYPES: if copy_type in [None, mtype]: func = "_print_%s" % mtype globals()[func](dst)
def backup(cfg, run_type, lock_file): """Run backup job""" try: run_backup_job(cfg, run_type, lock_file=lock_file) except IOError as err: LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except ModifierException as err: LOG.error('Error in modifier class') LOG.error(err) LOG.debug(traceback.format_exc()) exit(1) except KeyboardInterrupt: LOG.info('Exiting...') kill_children() exit(1)
def restore_file(ctx, dst, backup_copy): """Restore from file backup""" LOG.debug('file: %r', ctx.obj['twindb_config']) if not backup_copy: LOG.info('No backup copy specified. Choose one from below:') list_available_backups(ctx.obj['twindb_config']) exit(1) try: ensure_empty(dst) copy = FileCopy(path=backup_copy) restore_from_file(ctx.obj['twindb_config'], copy, dst) except TwinDBBackupError as err: LOG.error(err) exit(1) except KeyboardInterrupt: LOG.info('Exiting...') kill_children() exit(1)
def _get_file_content(self, path): attempts = 10 # up to 1024 seconds sleep_time = 2 while sleep_time <= 2**attempts: try: response = self.s3_client.get_object(Bucket=self.bucket, Key=path) self.validate_client_response(response) content = response['Body'].read() return content except ClientError as err: LOG.warning('Failed to read s3://%s/%s', self.bucket, path) LOG.warning(err) LOG.info('Will try again in %d seconds', sleep_time) time.sleep(sleep_time) sleep_time *= 2 msg = 'Failed to read s3://%s/%s after %d attempts' \ % (self.bucket, path, attempts) raise TwinDBBackupError(msg)
def clone(self, dest_host, port, compress=False): """ Send backup to destination host :param dest_host: Destination host :type dest_host: str :param port: Port to sending backup :type port: int :param compress: If True compress stream :type compress: bool :raise RemoteMySQLSourceError: if any error """ retry = 1 retry_time = 2 error_log = "/tmp/{src}_{src_port}-{dst}_{dst_port}.log".format( src=self._ssh_client.host, src_port=self._ssh_client.port, dst=dest_host, dst_port=port ) if compress: compress_cmd = "| gzip -c - " else: compress_cmd = "" cmd = "bash -c \"sudo %s " \ "--stream=xbstream " \ "--host=127.0.0.1 " \ "--backup " \ "--target-dir ./ 2> %s" \ " %s | ncat %s %d --send-only\"" \ % (self._xtrabackup, error_log, compress_cmd, dest_host, port) while retry < 3: try: return self._ssh_client.execute(cmd) except SshClientException as err: LOG.warning(err) LOG.info('Will try again in after %d seconds', retry_time) time.sleep(retry_time) retry_time *= 2 retry += 1
def delete_bucket(self, force=False): """Delete the bucket in s3 that was storing the backups. :param force: If the bucket is non-empty then delete the objects before deleting the bucket. :type force: bool :raise S3DestinationError: if failed to delete the bucket. """ bucket_exists = True try: self.s3_client.head_bucket(Bucket=self._bucket) except ClientError as err: # We come here meaning we did not find the bucket if err.response['ResponseMetadata']['HTTPStatusCode'] == 404: bucket_exists = False else: raise if bucket_exists: LOG.info('Deleting bucket %s', self._bucket) if force: LOG.info('Deleting the objects in the bucket %s', self._bucket) self.delete_all_objects() response = self.s3_client.delete_bucket(Bucket=self._bucket) self.validate_client_response(response) LOG.info('Bucket %s successfully deleted', self._bucket) return True
def create_bucket(self): """Creates the bucket in s3 that will store the backups. :raise S3DestinationError: if failed to create the bucket. """ bucket_exists = True try: self.s3_client.head_bucket(Bucket=self._bucket) except ClientError as err: # We come here meaning we did not find the bucket if err.response['ResponseMetadata']['HTTPStatusCode'] == 404: bucket_exists = False else: raise if not bucket_exists: LOG.info('Created bucket %s', self._bucket) response = self.s3_client.create_bucket(Bucket=self._bucket) self.validate_client_response(response) return True
def _get_file_content(self, path): attempts = 10 # up to 1024 seconds sleep_time = 2 while sleep_time <= 2**attempts: try: response = self.s3_client.get_object( Bucket=self._bucket, Key=path ) self.validate_client_response(response) content = response['Body'].read() return content except ClientError as err: LOG.warning('Failed to read s3://%s/%s', self._bucket, path) LOG.warning(err) LOG.info('Will try again in %d seconds', sleep_time) time.sleep(sleep_time) sleep_time *= 2 msg = 'Failed to read s3://%s/%s after %d attempts' \ % (self._bucket, path, attempts) raise OperationError(msg)
def list_available_backups(twindb_config, copy_type=None): """ Print known backup copies on a destination specified in the configuration. :param twindb_config: tool configuration :type twindb_config: TwinDBBackupConfig :param copy_type: Limit list to specific type of backups. :type copy_type: files|mysql """ dsts = [ twindb_config.destination() ] if twindb_config.keep_local_path: dsts.insert( 0, Local(twindb_config.keep_local_path) ) for dst in dsts: LOG.info('Destination %s', dst) for mtype in MEDIA_TYPES: if copy_type in [None, mtype]: func = "_print_%s" % mtype globals()[func](dst)
def test_clone( runner, master1, slave, docker_client, config_content_clone, client_my_cnf, rsa_private_key): twindb_config_dir = get_twindb_config_dir(docker_client, runner['Id']) twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg' my_cnf_path = "%s/my.cnf" % twindb_config_dir private_key_host = "%s/private_key" % twindb_config_dir private_key_guest = "/etc/twindb/private_key" with open(my_cnf_path, "w") as my_cnf: my_cnf.write(client_my_cnf) with open(private_key_host, "w") as key_fd: key_fd.write(rsa_private_key) with open(twindb_config_host, 'w') as fp: content = config_content_clone.format( PRIVATE_KEY=private_key_guest, MY_CNF='/etc/twindb/my.cnf' ) fp.write(content) cmd = '/usr/sbin/sshd' LOG.info('Run SSH daemon on master1_1') ret, cout = docker_execute(docker_client, master1['Id'], cmd) print(cout) cmd = [ 'twindb-backup', '--debug', '--config', twindb_config_guest, 'clone', 'mysql', '%s:3306' % master1['ip'], '%s:3306' % slave['ip'] ] pause_test(' '.join(cmd)) ret, cout = docker_execute(docker_client, runner['Id'], cmd) print(cout) assert ret == 0 sql_master_2 = RemoteMySQLSource({ "ssh_host": slave['ip'], "ssh_user": '******', "ssh_key": private_key_guest, "mysql_connect_info": MySQLConnectInfo( my_cnf_path, hostname=slave['ip'] ), "run_type": INTERVALS[0], "backup_type": 'full' }) timeout = time.time() + 30 while time.time() < timeout: with sql_master_2.get_connection() as conn: with conn.cursor() as cursor: cursor.execute('SHOW SLAVE STATUS') row = cursor.fetchone() if row['Slave_IO_Running'] == 'Yes' \ and row['Slave_SQL_Running'] == 'Yes': LOG.info('Replication is up and running') return LOG.error('Replication is not running after 30 seconds timeout') assert False
def test_verify_on_master( master1, slave, storage_server, config_content_ssh, docker_client, rsa_private_key): twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg' for cont in master1, slave: twindb_config_dir = get_twindb_config_dir(docker_client, cont['Id']) twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir my_cnf_path = "%s/my.cnf" % twindb_config_dir ssh_key_host = "%s/id_rsa" % twindb_config_dir ssh_key_guest = '/etc/twindb/id_rsa' contents = dedent( """ [client] user=dba password=qwerty """ ) with open(my_cnf_path, "w") as my_cnf: my_cnf.write(contents) my_cnf.flush() with open(ssh_key_host, "w") as ssh_fd: ssh_fd.write(rsa_private_key) ssh_fd.flush() with open(twindb_config_host, 'w') as fp: content = config_content_ssh.format( PRIVATE_KEY=ssh_key_guest, HOST_IP=storage_server['ip'], MY_CNF='/etc/twindb/my.cnf' ) fp.write(content) fp.flush() cmd = [ 'twindb-backup', '--debug', '--config', twindb_config_guest, 'backup', 'daily' ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = [ "bash", "-c", "twindb-backup --config %s ls | grep /tmp/backup " "| grep mysql | sort | tail -1" % twindb_config_guest ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) url = cout.strip() LOG.info(cout) assert ret == 0 cmd = [ 'twindb-backup', '--debug', '--config', twindb_config_guest, 'verify', 'mysql', url, ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = [ 'twindb-backup', '--debug', '--config', twindb_config_guest, 'verify', 'mysql', 'latest', ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = [ 'twindb-backup', '--debug', '--config', twindb_config_guest, 'verify', 'mysql', '--hostname', 'master1_1', 'latest', ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0
def test_restore(master1, storage_server, config_content_ssh, docker_client, rsa_private_key): twindb_config_dir = get_twindb_config_dir(docker_client, master1['Id']) twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg' my_cnf_path = "%s/my.cnf" % twindb_config_dir ssh_key_host = "%s/id_rsa" % twindb_config_dir ssh_key_guest = '/etc/twindb/id_rsa' contents = """ [client] user=dba password=qwerty """ with open(my_cnf_path, "w") as my_cnf: my_cnf.write(contents) with open(ssh_key_host, "w") as ssh_fd: ssh_fd.write(rsa_private_key) with open(twindb_config_host, 'w') as fp: content = config_content_ssh.format( PRIVATE_KEY=ssh_key_guest, HOST_IP=storage_server['ip'], MY_CNF='/etc/twindb/my.cnf' ) fp.write(content) cmd = [ 'twindb-backup', '--debug', '--config', twindb_config_guest, 'backup', 'daily' ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = [ "bash", "-c", "twindb-backup --config %s ls | grep /tmp/backup " "| grep mysql | sort | tail -1" % twindb_config_guest ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) url = cout.strip() LOG.info(cout) assert ret == 0 dst_dir = "/tmp/ssh_dest_restore/" cmd = ['twindb-backup', '--debug', '--config', twindb_config_guest, 'restore', 'mysql', url, "--dst", dst_dir] # print('Test paused') # print(' '.join(cmd)) # import time # time.sleep(36000) ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['find', dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['test', '-f', '%s/backup-my.cnf' % dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['test', '-f', '%s/ibdata1' % dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['test', '-f', '%s/ib_logfile0' % dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['test', '-f', '%s/ib_logfile1' % dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['test', '-f', '%s/mysql/user.MYD' % dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = ['test', '-f', '%s/xtrabackup_logfile' % dst_dir] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0 cmd = [ "bash", "-c", 'test -f %s/_config/etc/my.cnf || test -f %s/_config/etc/mysql/my.cnf' % (dst_dir, dst_dir) ] ret, cout = docker_execute(docker_client, master1['Id'], cmd) LOG.info(cout) assert ret == 0
def restore_from_mysql(twindb_config, copy, dst_dir, tmp_dir=None, cache=None, hostname=None): """ Restore MySQL datadir in a given directory :param twindb_config: tool configuration :type twindb_config: TwinDBBackupConfig :param copy: Backup copy instance. :type copy: MySQLCopy :param dst_dir: Destination directory. Must exist and be empty. :type dst_dir: str :param tmp_dir: Path to temp directory :type tmp_dir: str :param cache: Local cache object. :type cache: Cache :param hostname: Hostname :type hostname: str """ LOG.info('Restoring %s in %s', copy, dst_dir) mkdir_p(dst_dir) dst = None restore_start = time.time() keep_local_path = twindb_config.keep_local_path if keep_local_path and osp.exists(osp.join(keep_local_path, copy.key)): dst = Local(twindb_config.keep_local_path) if not dst: if not hostname: hostname = copy.host if not hostname: raise DestinationError( 'Failed to get hostname from %s' % copy ) dst = twindb_config.destination(backup_source=hostname) key = copy.key status = MySQLStatus(dst=dst) stream = dst.get_stream(copy) if status[key].type == "full": cache_key = os.path.basename(key) if cache: if cache_key in cache: # restore from cache cache.restore_in(cache_key, dst_dir) else: restore_from_mysql_full( stream, dst_dir, twindb_config, redo_only=False ) cache.add(dst_dir, cache_key) else: restore_from_mysql_full( stream, dst_dir, twindb_config, redo_only=False) else: full_copy = status.candidate_parent( copy.run_type ) full_stream = dst.get_stream(full_copy) LOG.debug("Full parent copy is %s", full_copy.key) cache_key = os.path.basename(full_copy.key) if cache: if cache_key in cache: # restore from cache cache.restore_in(cache_key, dst_dir) else: restore_from_mysql_full( full_stream, dst_dir, twindb_config, redo_only=True ) cache.add(dst_dir, cache_key) else: restore_from_mysql_full( full_stream, dst_dir, twindb_config, redo_only=True ) restore_from_mysql_incremental( stream, dst_dir, twindb_config, tmp_dir ) config_dir = os.path.join(dst_dir, "_config") for path, content in get_my_cnf(status, key): config_sub_dir = os.path.join( config_dir, os.path.dirname(path).lstrip('/') ) mkdir_p(config_sub_dir, mode=0755) with open(os.path.join(config_sub_dir, os.path.basename(path)), 'w') as mysql_config: mysql_config.write(content) update_grastate(dst_dir, status, key) export_info(twindb_config, data=time.time() - restore_start, category=ExportCategory.mysql, measure_type=ExportMeasureType.restore) LOG.info('Successfully restored %s in %s.', copy.key, dst_dir) LOG.info('Now copy content of %s to MySQL datadir: ' 'cp -R %s /var/lib/mysql/', dst_dir, osp.join(dst_dir, '*')) LOG.info('Fix permissions: chown -R mysql:mysql /var/lib/mysql/') LOG.info('Make sure innodb_log_file_size and innodb_log_files_in_group ' 'in %s/backup-my.cnf and in /etc/my.cnf are same.', dst_dir) if osp.exists(config_dir): LOG.info('Original my.cnf is restored in %s.', config_dir) LOG.info('Then you can start MySQL normally.')
def restore_from_file(twindb_config, copy, dst_dir): """ Restore a directory from a backup copy in the directory :param twindb_config: tool configuration :type twindb_config: TwinDBBackupConfig :param copy: Instance of BaseCopy or and inheriting classes. :type copy: BaseCopy :param dst_dir: Path to destination directory. Must exist and be empty. :type dst_dir: str """ LOG.info('Restoring %s in %s', copy.key, dst_dir) mkdir_p(dst_dir) restore_start = time.time() keep_local_path = twindb_config.keep_local_path if keep_local_path and os.path.exists(osp.join(keep_local_path, copy.key)): dst = Local(osp.join(keep_local_path, copy.key)) stream = dst.get_stream(copy) else: dst = twindb_config.destination() stream = dst.get_stream(copy) # GPG modifier if twindb_config.gpg: gpg = Gpg( stream, twindb_config.gpg.recipient, twindb_config.gpg.keyring, secret_keyring=twindb_config.gpg.secret_keyring ) LOG.debug('Decrypting stream') stream = gpg.revert_stream() else: LOG.debug('Not decrypting the stream') with stream as handler: try: LOG.debug('handler type: %s', type(handler)) LOG.debug('stream type: %s', type(stream)) cmd = ["tar", "zvxf", "-"] LOG.debug('Running %s', ' '.join(cmd)) proc = Popen(cmd, stdin=handler, cwd=dst_dir) cout, cerr = proc.communicate() ret = proc.returncode if ret: LOG.error('%s exited with code %d', cmd, ret) if cout: LOG.error('STDOUT: %s', cout) if cerr: LOG.error('STDERR: %s', cerr) return LOG.info('Successfully restored %s in %s', copy.key, dst_dir) except (OSError, DestinationError) as err: LOG.error('Failed to decompress %s: %s', copy.key, err) exit(1) export_info( twindb_config, data=time.time() - restore_start, category=ExportCategory.files, measure_type=ExportMeasureType.restore )