def xbstream_restore(xbstream, port): """ Restore an xtrabackup file xbstream - An xbstream file in S3 port - The port on which to act on on localhost """ datadir = host_utils.get_cnf_setting('datadir', port) log.info('Shutting down MySQL') host_utils.stop_mysql(port) log.info('Removing any existing MySQL data') mysql_init_server.delete_mysql_data(port) log.info('Downloading and unpacking backup') backup.xbstream_unpack(xbstream, datadir) log.info('Decompressing compressed ibd files') backup.innobackup_decompress(datadir) log.info('Applying logs') backup.apply_log(datadir) log.info('Removing old innodb redo logs') mysql_init_server.delete_innodb_log_files(port) log.info('Setting permissions for MySQL on {dir}'.format(dir=datadir)) host_utils.change_owner(datadir, 'mysql', 'mysql')
def delete_innodb_log_files(port): """ Purge ib_log files Args: port - the port on which to act on localhost """ try: ib_logs_dir = host_utils.get_cnf_setting('innodb_log_group_home_dir', port) except ConfigParser.NoOptionError: ib_logs_dir = host_utils.get_cnf_setting('datadir', port) glob_path = os.path.join(ib_logs_dir, 'ib_logfile') final_glob = ''.join((glob_path, '*')) for del_file in glob.glob(final_glob): log.info('Clearing {del_file}'.format(del_file=del_file)) os.remove(del_file)
def create_and_chown_dirs(port): """ Create and chown any missing directories needed for mysql """ for variable in DIRS_TO_CREATE: try: path = os.path.dirname(host_utils.get_cnf_setting(variable, port)) except ConfigParser.NoOptionError: # Not defined, so must not matter return if not os.path.isdir(path): log.info('Creating and chowning {path}'.format(path=path)) os.makedirs(path) host_utils.change_owner(path, 'mysql', 'mysql')
def archive_mysql_binlogs(port, dry_run): """ Flush logs and upload all binary logs that don't exist to s3 Arguments: port - Port of the MySQL instance on which to act dry_run - Display output but do not uplad """ binlog_rotator.rotate_binlogs_if_needed(port, dry_run) zk = host_utils.MysqlZookeeper() instance = host_utils.HostAddr(':'.join((host_utils.HOSTNAME, str(port)))) if zk.get_replica_set_from_instance(instance)[0] is None: log.info('Instance is not in production, exiting') return lock_handle = None ensure_binlog_archiving_table_sanity(instance) try: log.info('Taking binlog archiver lock') lock_handle = host_utils.take_flock_lock(BINLOG_LOCK_FILE) log_bin_dir = host_utils.get_cnf_setting('log_bin', port) bin_logs = mysql_lib.get_master_logs(instance) logged_uploads = get_logged_binlog_uploads(instance) for binlog in bin_logs[:-1]: err_count = 0 local_file = os.path.join(os.path.dirname(log_bin_dir), binlog['Log_name']) if already_uploaded(instance, local_file, logged_uploads): continue success = False while not success: try: upload_binlog(instance, local_file, dry_run) success = True except: if err_count > MAX_ERRORS: log.error('Error count in thread > MAX_THREAD_ERROR. ' 'Aborting :(') raise log.error('error: {e}'.format(e=traceback.format_exc())) err_count = err_count + 1 time.sleep(err_count*2) log.info('Archiving complete') finally: if lock_handle: log.info('Releasing lock') host_utils.release_flock_lock(lock_handle)
def delete_mysql_data(port): """ Purge all data on disk for a MySQL instance Args: port - The port on which to act upon on localhost """ for dir_key in DIRS_TO_CLEAR: directory = host_utils.get_cnf_setting(dir_key, port) if not os.path.isdir(directory): directory = os.path.dirname(directory) log.info('Removing contents of {dir}'.format(dir=directory)) host_utils.clean_directory(directory) # This should not bomb if one of the files to truncate # isn't specified in the config file. for file_keys in FILES_TO_CLEAR: try: del_file = host_utils.get_cnf_setting(file_keys, port) log.info('Truncating {del_file}'.format(del_file=del_file)) open(del_file, 'w').close() host_utils.change_owner(del_file, 'mysql', 'mysql') except Exception: log.warning('Option {f} not specified ' 'in my.cnf - continuing.'.format(f=file_keys))
def rotate_binlogs_if_needed(port, dry_run): instance = host_utils.HostAddr(':'.join((host_utils.HOSTNAME, str(port)))) log_bin_dir = host_utils.get_cnf_setting('log_bin', port) binlog = os.path.join(os.path.dirname(log_bin_dir), mysql_lib.get_master_status(instance)['File']) # We don't update access time, so this is creation time. creation = datetime.datetime.fromtimestamp(os.stat(binlog).st_atime) age = (datetime.datetime.utcnow() - creation).seconds if age > MAX_AGE: log.info('Age of current binlog is {age} which is greater than ' 'MAX_AGE ({MAX_AGE})'.format(age=age, MAX_AGE=MAX_AGE)) if not dry_run: log.info('Flushing bin log') mysql_lib.flush_master_log(instance) else: log.info('Age of current binlog is {age} which is less than ' 'MAX_AGE ({MAX_AGE})'.format(age=age, MAX_AGE=MAX_AGE))
def init_privileges_tables(port): """ Bootstap a MySQL instance Args: port - the port on which to act upon on localhost """ datadir = host_utils.get_cnf_setting('datadir', port) cmd = ('{MYSQL_INSTALL_DB} --datadir={datadir}' ' --user=mysql'.format(MYSQL_INSTALL_DB=MYSQL_INSTALL_DB, datadir=datadir)) log.info(cmd) (std_out, std_err, return_code) = host_utils.shell_exec(cmd) if return_code: raise Exception("Return {return_code} != 0 \n" "std_err:{std_err}\n" "std_out:{std_out}".format(return_code=return_code, std_err=std_err, std_out=std_out))
def rotate_binlogs_if_needed(port, dry_run): instance = host_utils.HostAddr(':'.join((host_utils.HOSTNAME, str(port)))) log_bin_dir = host_utils.get_cnf_setting('log_bin', port) binlog = os.path.join(os.path.dirname(log_bin_dir), mysql_lib.get_master_status(instance)['File']) # We don't update access time, so this is creation time. creation = datetime.datetime.fromtimestamp(os.stat(binlog).st_atime) age = (datetime.datetime.utcnow() - creation).seconds if age > MAX_AGE: log.info('Age of current binlog is {age} which is greater than ' ' MAX_AGE ({MAX_AGE})'.format(age=age, MAX_AGE=MAX_AGE)) if not dry_run: log.info('Flushing bin log') mysql_lib.flush_master_log(instance) else: log.info('Age of current binlog is {age} which is less than ' ' MAX_AGE ({MAX_AGE})'.format(age=age, MAX_AGE=MAX_AGE))
def get_all_table_sizes(instance): """ Get size of all innodb tables NOTE: At this point tables should always be innodb NOTE2: file per table should always be on. Args: instance - A hostAddr object """ datadir = host_utils.get_cnf_setting('datadir', instance.port) ret = dict() for db in mysql_lib.get_dbs(instance): ret[db] = dict() db_dir = os.path.join(datadir, db) for table_path in glob.glob(''.join([db_dir, '/*', INNODB_EXTENSION])): (table, partition) = parse_table_file_name(table_path) if table not in ret[db]: ret[db][table] = dict() ret[db][table][partition] = os.stat(table_path).st_size/1024/1024 return ret
def restore_instance(restore_source, destination, no_repl, date, add_to_zk, skip_production_check): """ Restore a MySQL backup on to localhost Args: restore_source - A hostaddr object for where to pull a backup from destination - A hostaddr object for where to restore the backup no_repl - Should replication be not started. It will always be setup. date - What date should the backup be from add_to_zk - Should the instnace be added to zk. If so, the log from the host being launched will be consulted. skip_production_check - Do not check if the host is already in zk for production use. """ log.info('Supplied source is {source}'.format(source=restore_source)) log.info('Supplied destination is {dest}'.format(dest=destination)) log.info('Desired date of restore {date}'.format(date=date)) # Try to prevent unintentional destruction of prod servers zk = host_utils.MysqlZookeeper() try: (_, replica_type) = zk.get_replica_set_from_instance(destination) except: # instance is not in production replica_type = None if replica_type == host_utils.REPLICA_ROLE_MASTER: # If the instance, we will refuse to run. No ifs, ands, or buts/ raise Exception('Restore script must never run on a master') if replica_type: if skip_production_check: log.info('Ignoring production check. We hope you know what you ' 'are doing and we will try to take a backup in case ' 'you are wrong.') try: mysql_backup.mysql_backup(destination) except Exception as e: log.error(e) log.warning('Unable to take a backup. We will give you {time} ' 'seconds to change your mind and ^c.' ''.format(time=SCARY_TIMEOUT)) time.sleep(SCARY_TIMEOUT) else: raise Exception("It appears {instance} is in use. This is" " very dangerous!".format(instance=destination)) # Take a lock to prevent multiple restores from running concurrently log.info('Taking a flock to block another restore from starting') lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE) log.info('Rebuilding cnf files just in case') mysql_cnf_builder.build_cnf() mysql_init_server.create_and_chown_dirs(destination.port) # load some data from the mysql conf file datadir = host_utils.get_cnf_setting('datadir', destination.port) (restore_source, restore_file, restore_size) = find_a_backup_to_restore(restore_source, destination, date) if restore_source.get_zk_replica_set(): replica_set = restore_source.get_zk_replica_set()[0] master = zk.get_mysql_instance_from_replica_set( replica_set, host_utils.REPLICA_ROLE_MASTER) else: # ZK has no idea what this replica set is, probably a new replica set. master = restore_source # Start logging row_id = backup.start_restore_log( master, { 'restore_source': restore_source, 'restore_port': destination.port, 'restore_file': restore_file, 'source_instance': destination.hostname, 'restore_date': date, 'replication': no_repl, 'zookeeper': add_to_zk }) # Giant try to allow logging if anything goes wrong. try: # If we hit an exception, this status will be used. If not, it will # be overwritten restore_log_update = {'restore_status': 'BAD'} log.info('Quick sanity check') mysql_init_server.basic_host_sanity() log.info('Shutting down MySQL') host_utils.stop_mysql(destination.port) log.info('Removing any existing MySQL data') mysql_init_server.delete_mysql_data(destination.port) log.info('Unpacking {rfile} into {ddir}'.format(rfile=restore_file, ddir=datadir)) backup.xbstream_unpack(restore_file, destination.port, restore_source, restore_size) log.info('Decompressing files in {path}'.format(path=datadir)) backup.innobackup_decompress(destination.port) # Determine how much RAM to use for applying logs based on the # system's total RAM size; all our boxes have 32G or more, so # this will always be better than before, but not absurdly high. log_apply_ram = psutil.phymem_usage()[0] / 1024 / 1024 / 1024 / 3 log.info('Applying logs') backup.apply_log(destination.port, memory='{}G'.format(log_apply_ram)) log.info('Removing old innodb redo logs') mysql_init_server.delete_innodb_log_files(destination.port) log.info('Setting permissions for MySQL on {dir}'.format(dir=datadir)) host_utils.change_owner(datadir, 'mysql', 'mysql') log.info('Starting MySQL') host_utils.upgrade_auth_tables(destination.port) restore_log_update = {'restore_status': 'OK'} log.info('Running MySQL upgrade') host_utils.start_mysql( destination.port, options=host_utils.DEFAULTS_FILE_EXTRA_ARG.format( defaults_file=host_utils.MYSQL_NOREPL_CNF_FILE)) if master == backup.get_metadata_from_backup_file(restore_file)[0]: log.info('Pulling replication info from restore to backup source') (binlog_file, binlog_pos) = backup.parse_xtrabackup_binlog_info(datadir) else: log.info('Pulling replication info from restore to ' 'master of backup source') (binlog_file, binlog_pos) = backup.parse_xtrabackup_slave_info(datadir) log.info('Setting up MySQL replication') restore_log_update['replication'] = 'FAIL' # Since we haven't started the slave yet, make sure we've got these # plugins installed, whether we use them or not. mysql_lib.setup_semisync_plugins(destination) # Try to configure replication. mysql_lib.change_master(destination, master, binlog_file, binlog_pos, no_start=(no_repl == 'SKIP')) mysql_lib.wait_replication_catch_up(destination) host_utils.restart_pt_daemons(destination.port) restore_log_update['replication'] = 'OK' mysql_lib.setup_response_time_metrics(destination) except Exception as e: log.error(e) if row_id is not None: restore_log_update['status_message'] = e restore_log_update['finished_at'] = True raise finally: if lock_handle: log.info('Releasing lock') host_utils.release_flock_lock(lock_handle) backup.update_restore_log(master, row_id, restore_log_update) try: if add_to_zk == 'REQ': log.info('Adding instance to zk') modify_mysql_zk.auto_add_instance_to_zk(destination, dry_run=False) backup.update_restore_log(master, row_id, {'zookeeper': 'OK'}) else: log.info('add_to_zk is not set, therefore not adding to zk') except Exception as e: log.warning("An exception occurred: {e}".format(e=e)) log.warning("If this is a DB issue, that's fine. " "Otherwise, you should check ZK.") backup.update_restore_log(master, row_id, {'finished_at': True}) log.info('Starting a new backup') mysql_backup.mysql_backup(destination)
def restore_instance(restore_source, destination, restore_type, restore_file, no_repl, date, add_to_zk, skip_production_check, test_restore): """ Restore a MySQL backup on to localhost Args: restore_source - A hostaddr object for where to pull a backup from destination - A hostaddr object for where to restore the backup restore_type - How to pull the backup, options are 's3', 'remote_server' and 'local_file' no_repl - Should replication be not started. It will always be setup. date - What date should the backup be from add_to_zk - Should the instnace be added to zk. If so, the log from the host being launched will be consulted. skip_production_check - Do not check if the host is already in zk for production use. test_restore - Use less ram and shutdown the instance after going through the motions of a restore. """ (temp_dir, target_dir) = backup.get_paths(str(destination.port)) log.info('Supplied source is {source}'.format(source=restore_source)) log.info('Supplied destination is {dest}'.format(dest=destination)) log.info('Restore type is {rest}'.format(rest=restore_type)) log.info('Local restore file is {file}'.format(file=restore_file)) log.info('Desired date of restore {date}'.format(date=date)) if test_restore == 'test': log.info('Running restore in test mode') # Try to prevent unintentional destruction of prod servers zk = host_utils.MysqlZookeeper() try: (_, replica_type) = zk.get_replica_set_from_instance(destination) except: # instance is not in production replica_type = None if replica_type == host_utils.REPLICA_ROLE_MASTER: # If the instance, we will refuse to run. No ifs, ands, or buts/ raise Exception('Restore script must never run on a master') if replica_type: if skip_production_check: log.info('Ignoring production check. We hope you know what you ' 'are doing and we will try to take a backup in case ' 'you are wrong.') try: mysql_backup.mysql_backup(destination) except Exception as e: log.error(e) log.warning('Unable to take a backup. We will give you {time} ' 'seconds to change your mind and ^c.' ''.format(time=SCARY_TIMEOUT)) time.sleep(SCARY_TIMEOUT) else: raise Exception("It appears {instance} is in use. This is" " very dangerous!".format(instance=destination)) # Take a lock to prevent multiple restores from running concurrently log.info('Taking a flock to block another restore from starting') lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE) log.info('Rebuilding cnf files just in case') mysql_cnf_builder.build_cnf() mysql_init_server.create_and_chown_dirs(destination.port) # load some data from the mysql conf file datadir = host_utils.get_cnf_setting('datadir', destination.port) # Where will we look for a backup? if restore_type != 'local_file': (restore_type, restore_source, restore_file, restore_size) = find_a_backup_to_restore(restore_type, restore_source, destination, date) # Not using an if/else because find_a_backup_to_restore could set to # local_file if the file has already been downloaded. if restore_type == 'local_file': restore_source = backup.get_host_from_backup(restore_file) # restore_size will be computed in the unpack function restore_size = None log.info('Detected the source of backup as {src}'.format(src=restore_source)) if restore_source.get_zk_replica_set(): replica_set = restore_source.get_zk_replica_set()[0] master = zk.get_mysql_instance_from_replica_set(replica_set, host_utils.REPLICA_ROLE_MASTER) else: # ZK has no idea what this replica set is, probably a new replica set. master = restore_source # Start logging row_id = backup.start_restore_log(master, {'restore_type': restore_type, 'test_restore': test_restore, 'restore_source': restore_source, 'restore_port': destination.port, 'restore_file': restore_file, 'source_instance': destination.hostname, 'restore_date': date, 'replication': no_repl, 'zookeeper': add_to_zk}) # Giant try to allow logging if anything goes wrong. try: # If we hit an exception, this status will be used. If not, it will # be overwritten restore_log_update = {'restore_status': 'BAD'} log.info('Quick sanity check') mysql_init_server.basic_host_sanity() log.info('Shutting down MySQL') host_utils.stop_mysql(destination.port) log.info('Removing any existing MySQL data') mysql_init_server.delete_mysql_data(destination.port) log.info('Unpacking {rfile} into {ddir}'.format(rfile=restore_file, ddir=datadir)) backup.xbstream_unpack(restore_file, destination.port, restore_source, restore_type, restore_size) log.info('Decompressing files in {path}'.format(path=datadir)) backup.innobackup_decompress(destination.port) log.info('Applying logs') if test_restore == 'test': # We don't really need a lot of memory if we're just # verifying that it works. backup.apply_log(destination.port, memory='1G') else: backup.apply_log(destination.port, memory='10G') log.info('Removing old innodb redo logs') mysql_init_server.delete_innodb_log_files(destination.port) log.info('Setting permissions for MySQL on {dir}'.format(dir=datadir)) host_utils.change_owner(datadir, 'mysql', 'mysql') log.info('Starting MySQL') host_utils.upgrade_auth_tables(destination.port) restore_log_update = {'restore_status': 'OK'} log.info('Running MySQL upgrade') host_utils.start_mysql(destination.port, options=host_utils.DEFAULTS_FILE_EXTRA_ARG.format(defaults_file=host_utils.MYSQL_NOREPL_CNF_FILE)) if master == backup.get_host_from_backup(restore_file): log.info('Pulling replication info from restore to backup source') (binlog_file, binlog_pos) = backup.parse_xtrabackup_binlog_info(datadir) else: log.info('Pulling replication info from restore to ' 'master of backup source') (binlog_file, binlog_pos) = backup.parse_xtrabackup_slave_info(datadir) log.info('Setting up MySQL replication') restore_log_update['replication'] = 'FAIL' # Since we haven't started the slave yet, make sure we've got these # plugins installed, whether we use them or not. mysql_lib.setup_semisync_plugins(destination) # Try to configure replication. If this was just a test restore, # don't wait for it to catch up - don't even start the slave. if test_restore == 'test': mysql_lib.change_master(destination, master, binlog_file, binlog_pos, no_start=True) backup.quick_test_replication(destination) else: mysql_lib.change_master(destination, master, binlog_file, binlog_pos, no_start=(no_repl == 'SKIP')) mysql_lib.wait_replication_catch_up(destination) host_utils.restart_pt_daemons(destination.port) restore_log_update['replication'] = 'OK' mysql_lib.setup_response_time_metrics(destination) except Exception as e: log.error(e) if row_id is not None: restore_log_update['status_message'] = e restore_log_update['finished_at'] = True raise finally: if lock_handle: log.info('Releasing lock') host_utils.release_flock_lock(lock_handle) backup.update_restore_log(master, row_id, restore_log_update) # If this was a test restore, we don't need to keep the 3307 # instance running, so let's shut it off. if test_restore == 'test': log.info('Shutting down MySQL backup/restore test instance') host_utils.stop_mysql(destination.port) backup.update_restore_log(master, row_id, {'finished_at': True}) return try: if add_to_zk == 'REQ': log.info('Adding instance to zk') modify_mysql_zk.auto_add_instance_to_zk(destination, dry_run=False) backup.update_restore_log(master, row_id, {'zookeeper': 'OK'}) else: log.info('add_to_zk is not set, therefore not adding to zk') except Exception as e: log.warning("An exception occurred: {e}".format(e=e)) log.warning("If this is a DB issue, that's fine. " "Otherwise, you should check ZK.") backup.update_restore_log(master, row_id, {'finished_at': True}) log.info('Starting a new backup') mysql_backup.mysql_backup(destination)
def archive_mysql_binlogs(port, dry_run): """ Flush logs and upload all binary logs that don't exist to s3 Arguments: port - Port of the MySQL instance on which to act dry_run - Display output but do not uplad """ lock_handle = None try: log.info('Taking binlog archiver lock') lock_handle = host_utils.take_flock_lock(BINLOG_LOCK_FILE) log_bin_dir = host_utils.get_cnf_setting('log_bin', port) instance = host_utils.HostAddr(':'.join( (host_utils.HOSTNAME, str(port)))) s3_conn = boto.connect_s3() bucket = s3_conn.get_bucket(environment_specific.S3_BUCKET, validate=False) mysql_conn = mysql_lib.connect_mysql(instance) bin_logs = mysql_lib.get_master_logs(mysql_conn) prefix = os.path.join(BINLOG_S3_DIR, instance.hostname, str(instance.port)) uploaded_binlogs = bucket.get_all_keys(prefix=prefix) for binlog in bin_logs[:-1]: compressed_file = ''.join((binlog['Log_name'], '.gz')) local_file = os.path.join(os.path.dirname(log_bin_dir), binlog['Log_name']) local_file_gz = os.path.join(TMP_DIR, compressed_file) remote_path = os.path.join(BINLOG_S3_DIR, instance.hostname, str(instance.port), compressed_file) log.info( 'Local file {local_file} will compress to {local_file_gz} ' 'and upload to {remote_path}'.format( local_file=local_file, local_file_gz=local_file_gz, remote_path=remote_path)) new_key = boto.s3.key.Key(bucket) new_key.key = remote_path if already_uploaded(remote_path, uploaded_binlogs): log.info('Binlog has already been uploaded') continue if dry_run: log.info('In dry_run mode, skipping compression and upload') continue log.info('Compressing file') f_in = open(local_file, 'r') f_out = gzip.open(local_file_gz, 'w', compresslevel=2) f_out.writelines(f_in) f_out.close() f_in.close() log.info('Uploading file') new_key.set_contents_from_filename(local_file_gz) log.info('Deleting local compressed file') os.remove(local_file_gz) log.info('Archiving complete') finally: if lock_handle: log.info('Releasing lock') host_utils.release_flock_lock(lock_handle)
def archive_mysql_binlogs(port, dry_run): """ Flush logs and upload all binary logs that don't exist to s3 Arguments: port - Port of the MySQL instance on which to act dry_run - Display output but do not uplad """ lock_handle = None try: log.info('Taking binlog archiver lock') lock_handle = host_utils.take_flock_lock(BINLOG_LOCK_FILE) log_bin_dir = host_utils.get_cnf_setting('log_bin', port) instance = host_utils.HostAddr(':'.join((host_utils.HOSTNAME, str(port)))) s3_conn = boto.connect_s3() bucket = s3_conn.get_bucket(environment_specific.S3_BUCKET, validate=False) mysql_conn = mysql_lib.connect_mysql(instance) bin_logs = mysql_lib.get_master_logs(mysql_conn) prefix = os.path.join(BINLOG_S3_DIR, instance.hostname, str(instance.port)) uploaded_binlogs = bucket.get_all_keys(prefix=prefix) for binlog in bin_logs[:-1]: compressed_file = ''.join((binlog['Log_name'], '.gz')) local_file = os.path.join(os.path.dirname(log_bin_dir), binlog['Log_name']) local_file_gz = os.path.join(TMP_DIR, compressed_file) remote_path = os.path.join(BINLOG_S3_DIR, instance.hostname, str(instance.port), compressed_file) log.info('Local file {local_file} will compress to {local_file_gz} ' 'and upload to {remote_path}'.format(local_file=local_file, local_file_gz=local_file_gz, remote_path=remote_path)) new_key = boto.s3.key.Key(bucket) new_key.key = remote_path if already_uploaded(remote_path, uploaded_binlogs): log.info('Binlog has already been uploaded') continue if dry_run: log.info('In dry_run mode, skipping compression and upload') continue log.info('Compressing file') f_in = open(local_file, 'r') f_out = gzip.open(local_file_gz, 'w', compresslevel=2) f_out.writelines(f_in) f_out.close() f_in.close() log.info('Uploading file') new_key.set_contents_from_filename(local_file_gz) log.info('Deleting local compressed file') os.remove(local_file_gz) log.info('Archiving complete') finally: if lock_handle: log.info('Releasing lock') host_utils.release_flock_lock(lock_handle)