def innobackup_decompress(port, threads=8): """ Decompress an unpacked backup compressed with xbstream. Args: port - The port of the instance on which to act threads - A int which signifies how the amount of parallelism. Default is 8 """ datadir = host_utils.get_cnf_setting('datadir', port) cmd = ' '.join(('/usr/bin/innobackupex', '--parallel={threads}', '--decompress', datadir)).format(threads=threads) err_log = os.path.join(datadir, 'xtrabackup-decompress.err') out_log = os.path.join(datadir, 'xtrabackup-decompress.log') with open(err_log, 'w+') as err_handle, open(out_log, 'w') as out_handle: verbose = '{cmd} 2>{err_log} >{out_log}'.format(cmd=cmd, err_log=err_log, out_log=out_log) log.info(verbose) decompress = subprocess.Popen(cmd, shell=True, stdout=out_handle, stderr=err_handle) if decompress.wait() != 0: raise Exception('Fatal error: innobackupex decompress ' 'did not return 0') err_handle.seek(0) log_data = err_handle.readlines() if 'innobackupex: completed OK!' not in log_data[-1]: msg = ('Fatal error: innobackupex decompress did not end with ', '"innobackupex: completed OK"') raise Exception(msg)
def apply_log(port, memory='10G'): """ Apply redo logs for an unpacked and uncompressed instance Args: path - The port of the instance on which to act memory - A string of how much memory can be used to apply logs. Default 10G """ datadir = host_utils.get_cnf_setting('datadir', port) cmd = ' '.join(('/usr/bin/innobackupex', '--apply-log', '--use-memory={memory}', datadir)).format(memory=memory) log_file = os.path.join(datadir, 'xtrabackup-apply-logs.log') with open(log_file, 'w+') as log_handle: verbose = '{cmd} >{log_file}'.format(cmd=cmd, log_file=log_file) log.info(verbose) apply_logs = subprocess.Popen(cmd, shell=True, stderr=log_handle) if apply_logs.wait() != 0: raise Exception('Fatal error: innobackupex apply-logs did not ' 'return return 0') log_handle.seek(0) log_data = log_handle.readlines() if 'innobackupex: completed OK!' not in log_data[-1]: msg = ('Fatal error: innobackupex apply-log did not end with ', '"innobackupex: completed OK"') raise Exception(msg)
def parse_xtrabackup_slave_info(port): """ Pull master_log and master_log_pos from a xtrabackup_slave_info file NOTE: This file has its data as a CHANGE MASTER command. Example: CHANGE MASTER TO MASTER_LOG_FILE='mysql-bin.006233', MASTER_LOG_POS=863 Args: port - the port of the instance on localhost Returns: binlog_file - Binlog file to start reading from binlog_pos - Position in binlog_file to start reading """ datadir = host_utils.get_cnf_setting('datadir', port) file_path = os.path.join(datadir, 'xtrabackup_slave_info') with open(file_path) as f: data = f.read() file_pattern = ".*MASTER_LOG_FILE='([a-z0-9-.]+)'.*" pos_pattern = ".*MASTER_LOG_POS=([0-9]+).*" res = re.match(file_pattern, data) binlog_file = res.group(1) res = re.match(pos_pattern, data) binlog_pos = int(res.group(1)) log.info('Master info: binlog_file: {binlog_file},' ' binlog_pos: {binlog_pos}'.format(binlog_file=binlog_file, binlog_pos=binlog_pos)) return (binlog_file, binlog_pos)
def xbstream_unpack(xbstream, port, restore_source, size=None): """ Decompress an xbstream filename into a directory. Args: xbstream - A string which is the path to the xbstream file port - The port on which to act on on localhost host - A string which is a hostname if the xbstream exists on a remote host size - An int for the size in bytes for remote unpacks for a progress bar """ datadir = host_utils.get_cnf_setting('datadir', port) cmd = ('{s3_script} get --no-md5 -b {bucket} -k {xbstream} ' '2>/dev/null ').format(s3_script=S3_SCRIPT, bucket=environment_specific.S3_BUCKET, xbstream=urllib.quote_plus(xbstream)) if size: cmd = ' | '.join((cmd, '{pv} -s {size}'.format(pv=PV, size=str(size)))) # And finally pipe everything into xbstream to unpack it cmd = ' | '.join( (cmd, '/usr/bin/xbstream -x -C {datadir}'.format(datadir=datadir))) log.info(cmd) extract = subprocess.Popen(cmd, shell=True) if extract.wait() != 0: raise Exception("Error: Xbstream decompress did not succeed, aborting")
def xbstream_unpack(xbstream, port, restore_source, size=None): """ Decompress an xbstream filename into a directory. Args: xbstream - A string which is the path to the xbstream file port - The port on which to act on on localhost host - A string which is a hostname if the xbstream exists on a remote host size - An int for the size in bytes for remote unpacks for a progress bar """ datadir = host_utils.get_cnf_setting('datadir', port) cmd = ('{s3_script} get --no-md5 -b {bucket} -k {xbstream} ' '2>/dev/null ').format(s3_script=S3_SCRIPT, bucket=environment_specific.S3_BUCKET, xbstream=urllib.quote_plus(xbstream)) if size: cmd = ' | '.join((cmd, '{pv} -s {size}'.format(pv=PV, size=str(size)))) # And finally pipe everything into xbstream to unpack it cmd = ' | '.join((cmd, '/usr/bin/xbstream -x -C {}'.format(datadir))) log.info(cmd) extract = subprocess.Popen(cmd, shell=True) if extract.wait() != 0: raise Exception("Error: Xbstream decompress did not succeed, aborting")
def connect_mysql(instance, role="admin"): """Connect to a MySQL instance as admin Args: hostaddr - object describing which mysql instance to connect to role - a string of the name of the mysql role to use. A bootstrap role can be called for MySQL instances lacking any grants. This user does not exit in zk. Returns: a connection to the server as administrator """ if role == "bootstrap": socket = host_utils.get_cnf_setting("socket", instance.port) username = "******" password = "" db = MySQLdb.connect(unix_socket=socket, user=username, passwd=password, cursorclass=MySQLdb.cursors.DictCursor) else: username, password = get_mysql_user_for_role(role) db = MySQLdb.connect( host=instance.hostname, port=instance.port, user=username, passwd=password, cursorclass=MySQLdb.cursors.DictCursor, connect_timeout=CONNECT_TIMEOUT, ) return db
def create_xtrabackup_command(instance, timestamp, tmp_log): """ Create a xtrabackup command Args: instance - A hostAddr object timestamp - A timestamp tmp_log - A path to where xtrabackup should log Returns: a list that can be easily ingested by subprocess """ if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES: cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port) cnf_group = 'mysqld' else: cnf = host_utils.MYSQL_CNF_FILE cnf_group = 'mysqld{port}'.format(port=instance.port) datadir = host_utils.get_cnf_setting('datadir', instance.port) xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role( USER_ROLE_XTRABACKUP) return XTRABACKUP_CMD.format(datadir=datadir, xtra_user=xtra_user, xtra_pass=xtra_pass, cnf=cnf, cnf_group=cnf_group, port=instance.port, tmp_log=tmp_log).split()
def connect_mysql(instance, role='admin'): """Connect to a MySQL instance as admin Args: hostaddr - object describing which mysql instance to connect to role - a string of the name of the mysql role to use. A bootstrap role can be called for MySQL instances lacking any grants. This user does not exit in zk. Returns: a connection to the server as administrator """ if role == 'bootstrap': socket = host_utils.get_cnf_setting('socket', instance.port) username = '******' password = '' db = MySQLdb.connect(unix_socket=socket, user=username, passwd=password, cursorclass=MySQLdb.cursors.DictCursor) else: username, password = get_mysql_user_for_role(role) db = MySQLdb.connect(host=instance.hostname, port=instance.port, user=username, passwd=password, cursorclass=MySQLdb.cursors.DictCursor, connect_timeout=CONNECT_TIMEOUT) return db
def connect_mysql(instance, role='admin'): """Connect to a MySQL instance as admin Args: hostaddr - object describing which mysql instance to connect to role - a string of the name of the mysql role to use. A bootstrap role can be called for MySQL instances lacking any grants. Returns: a connection to the server as administrator """ log.info('Connecting to {}, please be patient. Timeout is 30 seconds.'.format(instance)) if role == 'bootstrap': socket = host_utils.get_cnf_setting('socket', instance.port) username = '******' password = '' # when super_read_only is enabled and autocommit is false, `change master to` is not allowed to execute # so we have to enable autocommit. db = pymysql.connect(unix_socket=socket, user=username, passwd=password, cursorclass=pymysql.cursors.DictCursor) else: username, password = get_mysql_user_for_role(role) db = pymysql.connect(host=instance.hostname, port=instance.port, user=username, passwd=password, cursorclass=pymysql.cursors.DictCursor, connect_timeout=CONNECT_TIMEOUT) return db
def parse_xtrabackup_binlog_info(port): """ Pull master_log and master_log_pos from a xtrabackup_slave_info file Note: This file stores its data as two strings in a file deliminted by a tab. Example: "mysql-bin.006231\t1619" Args: port - the port of the instance on localhost Returns: binlog_file - Binlog file to start reading from binlog_pos - Position in binlog_file to start reading """ datadir = host_utils.get_cnf_setting('datadir', port) file_path = os.path.join(datadir, 'xtrabackup_binlog_info') with open(file_path) as f: data = f.read() fields = data.strip().split("\t") if len(fields) != 2: raise Exception(('Error: Invalid format in ' 'file {file_path}').format(file_path=file_path)) binlog_file = fields[0].strip() binlog_pos = int(fields[1].strip()) log.info('Master info: binlog_file: {binlog_file},' ' binlog_pos: {binlog_pos}'.format(binlog_file=binlog_file, binlog_pos=binlog_pos)) return (binlog_file, binlog_pos)
def create_xtrabackup_command(instance, timestamp, tmp_log): """ Create a xtrabackup command Args: instance - A hostAddr object timestamp - A timestamp tmp_log - A path to where xtrabackup should log Returns: a list that can be easily ingested by subprocess """ if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES: cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port) cnf_group = 'mysqld' else: cnf = host_utils.MYSQL_CNF_FILE cnf_group = 'mysqld{port}'.format(port=instance.port) datadir = host_utils.get_cnf_setting('datadir', instance.port) (xtra_user, xtra_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_XTRABACKUP) return XTRABACKUP_CMD.format(datadir=datadir, xtra_user=xtra_user, xtra_pass=xtra_pass, cnf=cnf, cnf_group=cnf_group, port=instance.port, tmp_log=tmp_log).split()
def xtrabackup_instance(instance): """ Take a compressed mysql backup Args: instance - A hostaddr instance Returns: A string of the path to the finished backup """ # Prevent issues with too many open files resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072)) (temp_path, target_path) = get_paths(port=str(instance.port)) backup_file = ("mysql-{host}-{port}-{timestamp}.xbstream").format( host=instance.hostname, port=str(instance.port), timestamp=time.strftime('%Y-%m-%d-%H:%M:%S')) tmp_xtra_path = os.path.join(temp_path, backup_file) target_xtra_path = os.path.join(target_path, backup_file) tmp_log = ''.join((tmp_xtra_path, '.log')) target_log = ''.join((tmp_xtra_path, '.log')) if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES: cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port) cnf_group = 'mysqld' else: cnf = host_utils.MYSQL_CNF_FILE cnf_group = 'mysqld{port}'.format(port=instance.port) datadir = host_utils.get_cnf_setting('datadir', instance.port) xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role('xtrabackup') cmd = ('/bin/bash -c "/usr/bin/innobackupex {datadir} {XTRA_DEFAULTS} ' '--user={xtra_user} --password={xtra_pass} ' '--defaults-file={cnf} --defaults-group={cnf_group} ' '--port={port} 2>{tmp_log} ' '>{dest}"').format(datadir=datadir, XTRA_DEFAULTS=XTRA_DEFAULTS, xtra_user=xtra_user, xtra_pass=xtra_pass, cnf=cnf, cnf_group=cnf_group, port=instance.port, tmp_log=tmp_log, dest=tmp_xtra_path) log.info(cmd) xtra = subprocess.Popen(cmd, shell=True) xtra.wait() with open(tmp_log, 'r') as log_file: xtra_log = log_file.readlines() if 'innobackupex: completed OK!' not in xtra_log[-1]: raise Exception('innobackupex failed. ' 'log_file: {tmp_log}'.format(tmp_log=tmp_log)) log.info('Moving backup and log to {target}'.format(target=target_path)) os.rename(tmp_xtra_path, target_xtra_path) os.rename(tmp_log, target_log) log.info('Xtrabackup was successful') return target_xtra_path
def xtrabackup_instance(instance, timestamp): """ Take a compressed mysql backup Args: instance - A hostaddr instance timestamp - A timestamp which will be used to create the backup filename Returns: A string of the path to the finished backup """ # Prevent issues with too many open files resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072)) (temp_path, target_path) = get_paths(port=str(instance.port)) backup_file = BACKUP_FILE.format(hostname=instance.hostname, port=instance.port, timestamp=time.strftime( '%Y-%m-%d-%H:%M:%S', timestamp), backup_type=BACKUP_TYPE_XBSTREAM) tmp_xtra_path = os.path.join(temp_path, backup_file) target_xtra_path = os.path.join(target_path, backup_file) tmp_log = ''.join((tmp_xtra_path, '.log')) target_log = ''.join((tmp_xtra_path, '.log')) if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES: cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port) cnf_group = 'mysqld' else: cnf = host_utils.MYSQL_CNF_FILE cnf_group = 'mysqld{port}'.format(port=instance.port) datadir = host_utils.get_cnf_setting('datadir', instance.port) xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role( USER_ROLE_XTRABACKUP) cmd = XTRABACKUP_CMD.format(datadir=datadir, xtra_user=xtra_user, xtra_pass=xtra_pass, cnf=cnf, cnf_group=cnf_group, port=instance.port, tmp_log=tmp_log, tmp_xtra_path=tmp_xtra_path) log.info(cmd) xtra = subprocess.Popen(cmd, shell=True) xtra.wait() with open(tmp_log, 'r') as log_file: xtra_log = log_file.readlines() if INNOBACKUP_OK not in xtra_log[-1]: raise Exception('innobackupex failed. ' 'log_file: {tmp_log}'.format(tmp_log=tmp_log)) log.info('Moving backup and log to {target}'.format(target=target_path)) os.rename(tmp_xtra_path, target_xtra_path) os.rename(tmp_log, target_log) log.info('Xtrabackup was successful') return target_xtra_path
def xtrabackup_instance(instance, timestamp): """ Take a compressed mysql backup Args: instance - A hostaddr instance timestamp - A timestamp which will be used to create the backup filename Returns: A string of the path to the finished backup """ # Prevent issues with too many open files resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072)) (temp_path, target_path) = get_paths(port=str(instance.port)) backup_file = BACKUP_FILE.format(hostname=instance.hostname, port=instance.port, timestamp=time.strftime('%Y-%m-%d-%H:%M:%S', timestamp), backup_type=BACKUP_TYPE_XBSTREAM) tmp_xtra_path = os.path.join(temp_path, backup_file) target_xtra_path = os.path.join(target_path, backup_file) tmp_log = ''.join((tmp_xtra_path, '.log')) target_log = ''.join((tmp_xtra_path, '.log')) if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES: cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port) cnf_group = 'mysqld' else: cnf = host_utils.MYSQL_CNF_FILE cnf_group = 'mysqld{port}'.format(port=instance.port) datadir = host_utils.get_cnf_setting('datadir', instance.port) xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role(USER_ROLE_XTRABACKUP) cmd = XTRABACKUP_CMD.format(datadir=datadir, xtra_user=xtra_user, xtra_pass=xtra_pass, cnf=cnf, cnf_group=cnf_group, port=instance.port, tmp_log=tmp_log, tmp_xtra_path=tmp_xtra_path) log.info(cmd) xtra = subprocess.Popen(cmd, shell=True) xtra.wait() with open(tmp_log, 'r') as log_file: xtra_log = log_file.readlines() if INNOBACKUP_OK not in xtra_log[-1]: raise Exception('innobackupex failed. ' 'log_file: {tmp_log}'.format(tmp_log=tmp_log)) log.info('Moving backup and log to {target}'.format(target=target_path)) os.rename(tmp_xtra_path, target_xtra_path) os.rename(tmp_log, target_log) log.info('Xtrabackup was successful') return target_xtra_path
def parse_xtrabackup_slave_info(port): """ Pull master_log, master_log_pos, or gtid_purged from the xtrabackup_slave_info file NOTE: This file has its data as a CHANGE MASTER command and may also have a list of GTID sets that have been seen. With no GTID, we have this: CHANGE MASTER TO MASTER_LOG_FILE='mysql-bin.006233', MASTER_LOG_POS=863 With GTID, we have this: SET GLOBAL gtid_purged='b27a8edf-eca1-11e6-99e4-0e695f0e3b16:1-3862'; CHANGE MASTER TO MASTER_AUTO_POSITION=1 Args: port - the port of the instance on localhost Returns: binlog_file - Binlog file to start reading from binlog_pos - Position in binlog_file to start reading gtid_purged - The gtid sets that have been applied to this data NOTE: If the backup comes from a non-GTID server, only file and pos will be populated. If it's from a GTID server, only the purge list will be populated. """ datadir = host_utils.get_cnf_setting('datadir', port) file_path = os.path.join(datadir, 'xtrabackup_slave_info') with open(file_path) as f: data = f.read() file_pattern = ".*MASTER_LOG_FILE='([a-z0-9-.]+)'.*" pos_pattern = ".*MASTER_LOG_POS=([0-9]+).*" gtid_pattern = ".*gtid_purged='([a-z0-9-:,\s]+)';.*" res = re.match(gtid_pattern, data) if res: # this is GTID-style replication. we check for this first. gtid_purged = res.group(1) log.info('Master info: GTID purged: {}'.format(gtid_purged)) return(None, None, gtid_purged) else: # and this is coordinate-style replication res = re.match(file_pattern, data) binlog_file = res.group(1) res = re.match(pos_pattern, data) binlog_pos = int(res.group(1)) log.info('Master info: binlog_file: {binlog_file},' ' binlog_pos: {binlog_pos}'.format(binlog_file=binlog_file, binlog_pos=binlog_pos)) return (binlog_file, binlog_pos, None)
def xbstream_unpack(xbstream, port, restore_source, restore_type, size=None): """ Decompress an xbstream filename into a directory. Args: xbstream - A string which is the path to the xbstream file port - The port on which to act on on localhost host - A string which is a hostname if the xbstream exists on a remote host size - An int for the size in bytes for remote unpacks for a progress bar """ (temp_path, target_path) = get_paths(port) temp_backup = os.path.join(temp_path, os.path.basename(xbstream)) datadir = host_utils.get_cnf_setting('datadir', port) if restore_type == 's3': cmd = ('{s3_script} get --no-md5 -b {bucket} -k {xbstream} ' '2>/dev/null ').format(s3_script=S3_SCRIPT, bucket=environment_specific.S3_BUCKET, xbstream=urllib.quote_plus(xbstream), temp_backup=temp_backup) elif restore_type == 'local_file': cmd = '{pv} {xbstream}'.format(pv=PV, xbstream=xbstream) elif restore_type == 'remote_server': cmd = ("ssh {ops} {auth}@{host} '/bin/cat {xbstream}' " "").format(ops=SSH_OPTIONS, auth=SSH_AUTH, host=restore_source.hostname, xbstream=xbstream, temp_backup=temp_backup) else: raise Exception('Restore type {restore_type} is not supported'.format(restore_type=restore_type)) if size and restore_type != 'localhost': cmd = ' | '.join((cmd, '{pv} -s {size}'.format(pv=PV, size=str(size)))) # And finally pipe everything into xbstream to unpack it cmd = ' | '.join((cmd, '/usr/bin/xbstream -x -C {datadir}'.format(datadir=datadir))) log.info(cmd) extract = subprocess.Popen(cmd, shell=True) if extract.wait() != 0: raise Exception("Error: Xbstream decompress did not succeed, aborting")
def xbstream_unpack(xbstream, port, restore_source, restore_type, size=None): """ Decompress an xbstream filename into a directory. Args: xbstream - A string which is the path to the xbstream file port - The port on which to act on on localhost host - A string which is a hostname if the xbstream exists on a remote host size - An int for the size in bytes for remote unpacks for a progress bar """ (temp_path, target_path) = get_paths(port) temp_backup = os.path.join(temp_path, os.path.basename(xbstream)) datadir = host_utils.get_cnf_setting('datadir', port) if restore_type == 's3': cmd = ('{s3_script} get --no-md5 -b {bucket} -k {xbstream} ' '2>/dev/null ').format(s3_script=S3_SCRIPT, bucket=environment_specific.S3_BUCKET, xbstream=urllib.quote_plus(xbstream), temp_backup=temp_backup) elif restore_type == 'local_file': cmd = '{pv} {xbstream}'.format(pv=PV, xbstream=xbstream) elif restore_type == 'remote_server': cmd = ("ssh {ops} {auth}@{host} '/bin/cat {xbstream}' " "").format(ops=SSH_OPTIONS, auth=SSH_AUTH, host=restore_source.hostname, xbstream=xbstream, temp_backup=temp_backup) else: raise Exception('Restore type {restore_type} is not supported'.format( restore_type=restore_type)) if size and restore_type != 'localhost': cmd = ' | '.join((cmd, '{pv} -s {size}'.format(pv=PV, size=str(size)))) # And finally pipe everything into xbstream to unpack it cmd = ' | '.join( (cmd, '/usr/bin/xbstream -x -C {datadir}'.format(datadir=datadir))) log.info(cmd) extract = subprocess.Popen(cmd, shell=True) if extract.wait() != 0: raise Exception("Error: Xbstream decompress did not succeed, aborting")
def parse_xtrabackup_binlog_info(port): """ Pull master_log, master_log_pos, and, optionally, GTID purged from an xtrabackup_binlog_info file Note: This file stores its data as two strings in a file delimited by a tab if we're in non-GTID mode. If we're in GTID mode, we'll have 3 strings tab-delimited and possibly additional lines of other GTID sets. Example: "mysql-bin.006231\t1619" if there are no GTIDs, or "mysql-bin.001234\tUUID:trx,\nUUID:trx" if there are. Args: port - the port of the instance on localhost Returns: binlog_file - Binlog file to start reading from binlog_pos - Position in binlog_file to start reading gtid_purged - Purged GTID sets """ datadir = host_utils.get_cnf_setting('datadir', port) file_path = os.path.join(datadir, 'xtrabackup_binlog_info') with open(file_path) as f: data = f.read() fields = data.strip().split("\t") if len(fields) != 2 and len(fields) != 4: raise Exception(('Error: Invalid format in file {}').format(file_path)) binlog_file = fields[0].strip() binlog_pos = int(fields[1].strip()) gtid_purged = fields[3].replace('\n', ' ').strip() \ if len(fields) > 2 else None log.info('Master info: binlog_file: {binlog_file},' ' binlog_pos: {binlog_pos},' ' gtid_purged_set: {g}'.format(binlog_file=binlog_file, binlog_pos=binlog_pos, g=gtid_purged)) return (binlog_file, binlog_pos, gtid_purged)