def logical_restore(dump, destination):
    """ Restore a compressed mysqldump file from s3 to localhost, port 3306

    Args:
    dump - a mysqldump file in s3
    destination -  a hostaddr object for where the data should be loaded on
                   localhost
    """
    (user, password) = mysql_lib.get_mysql_user_for_role('admin')
    if dump.name.startswith(backup.BACKUP_TYPE_PARTIAL_LOGICAL):
        # TODO: check if db is empty before applying rate limit
        rate_limit = backup.MAX_TRANSFER_RATE
    else:
        log.info('Restarting MySQL to turn off enforce_storage_engine')
        host_utils.stop_mysql(destination.port)
        host_utils.start_mysql(destination.port,
                               host_utils.DEFAULTS_FILE_ARG.format(
                                defaults_file=host_utils.MYSQL_UPGRADE_CNF_FILE))
        rate_limit = None

    log.info('Downloading, decompressing and importing backup')
    procs = dict()
    procs['s3_download'] = backup.create_s3_download_proc(dump)
    procs['pv'] = backup.create_pv_proc(procs['s3_download'].stdout,
                                        size=dump.size,
                                        rate_limit=rate_limit)
    log.info('zcat |')
    procs['zcat'] = subprocess.Popen(['zcat'],
                                     stdin=procs['pv'].stdout,
                                     stdout=subprocess.PIPE)
    mysql_cmd = ['mysql',
                 '--port={}'.format(str(destination.port)),
                 '--host={}'.format(destination.hostname),
                 '--user={}'.format(user),
                 '--password={}'.format(password)]
    log.info(' '.join(mysql_cmd))
    procs['mysql'] = subprocess.Popen(mysql_cmd,
                                      stdin=procs['zcat'].stdout)
    while(not host_utils.check_dict_of_procs(procs)):
        time.sleep(.5)
Exemple #2
0
def logical_restore(dump, destination):
    """ Restore a compressed mysqldump file from s3 to localhost, port 3306

    Args:
    dump - a mysqldump file in s3
    destination -  a hostaddr object for where the data should be loaded on
                   localhost
    """
    log.info('Preparing replication')
    (restore_source, _) = backup.get_metadata_from_backup_file(dump.name)
    # We are importing a mysqldump which was created with --master-data
    # so there will be a CHANGE MASTER statement at the start of the dump.
    # MySQL will basically just ignore a CHANGE MASTER command if
    # master_host is not already setup. So we are setting master_host,
    # username and password here. We use BOGUS for master_log_file so that
    # the IO thread is intentionally broken. With no argument for
    # master_log_file, the IO thread would start downloading the first bin log
    # and the SQL thread would start executing...
    mysql_lib.change_master(destination, restore_source, 'BOGUS', 0,
                            no_start=True)
    log.info('Restarting MySQL to turn off enforce_storage_engine')
    host_utils.stop_mysql(destination.port)
    host_utils.start_mysql(destination.port,
                           host_utils.DEFAULTS_FILE_ARG.format(defaults_file=host_utils.MYSQL_UPGRADE_CNF_FILE))
    log.info('Downloading, decompressing and importing backup')
    procs = dict()
    procs['s3_download'] = backup.create_s3_download_proc(dump)
    procs['pv'] = backup.create_pv_proc(procs['s3_download'].stdout,
                                        size=dump.size)
    log.info('zcat |')
    procs['zcat'] = subprocess.Popen(['zcat'],
                                     stdin=procs['pv'].stdout,
                                     stdout=subprocess.PIPE)
    mysql_cmd = ['mysql', '--port', str(destination.port)]
    log.info(' '.join(mysql_cmd))
    procs['mysql'] = subprocess.Popen(mysql_cmd,
                                      stdin=procs['zcat'].stdout,
                                      stdout=subprocess.PIPE)
    while(not host_utils.check_dict_of_procs(procs)):
        time.sleep(.5)
def safe_upload(precursor_procs, stdin, bucket, key,
                check_func=None, check_arg=None):
    """ For sures, safely upload a file to s3

    Args:
    precursor_procs - A dict of procs that will be monitored
    stdin - The stdout from the last proc in precursor_procs that will be
             uploaded
    bucket - The s3 bucket where we should upload the data
    key - The name of the key which will be the destination of the data
    check_func - An optional function that if supplied will be run after all
                 procs in precursor_procs have finished.
    check_args - The arguments to supply to the check_func
    """
    upload_procs = dict()
    devnull = open(os.devnull, 'w')
    try:
        term_path = get_term_file()
        upload_procs['repeater'] = subprocess.Popen(
                                       [get_exec_path(), term_path],
                                       stdin=stdin,
                                       stdout=subprocess.PIPE)
        upload_procs['uploader'] = subprocess.Popen(
                                       [S3_SCRIPT, 'put',
                                        '-k', urllib.quote_plus(key),
                                        '-b', bucket],
                                       stdin=upload_procs['repeater'].stdout,
                                       stderr=devnull)

        # While the precursor procs are running, we need to make sure
        # none of them have errors and also check that the upload procs
        # also don't have errors.
        while not host_utils.check_dict_of_procs(precursor_procs):
            host_utils.check_dict_of_procs(upload_procs)
            time.sleep(SLEEP_TIME)

        # Once the precursor procs have exited successfully, we will run
        # any defined check function
        if check_func:
            check_func(check_arg)

        # And then create the term file which will cause the repeater and
        # uploader to exit
        with open(term_path, 'w') as term_handle:
            term_handle.write(TERM_STRING)

        # And finally we will wait for the uploader procs to exit without error
        while not host_utils.check_dict_of_procs(upload_procs):
            time.sleep(SLEEP_TIME)
    except:
        # So there has been some sort of a problem. We want to make sure that
        # we kill the uploader so that under no circumstances the upload is
        # successfull with bad data
        if 'uploader' in upload_procs and\
                psutil.pid_exists(upload_procs['uploader'].pid):
            try:
                upload_procs['uploader'].kill()
            except:
                pass

        if 'repeater' in upload_procs and\
                psutil.pid_exists(upload_procs['repeater'].pid):
            try:
                upload_procs['repeater'].kill()
            except:
                pass
        raise
    finally:
        os.remove(term_path)
Exemple #4
0
def safe_upload(precursor_procs,
                stdin,
                bucket,
                key,
                check_func=None,
                check_arg=None):
    """ For sures, safely upload a file to s3

    Args:
    precursor_procs - A dict of procs that will be monitored
    stdin - The stdout from the last proc in precursor_procs that will be
             uploaded
    bucket - The s3 bucket where we should upload the data
    key - The name of the key which will be the destination of the data
    check_func - An optional function that if supplied will be run after all
                 procs in precursor_procs have finished.
    check_args - The arguments to supply to the check_func
    """
    upload_procs = dict()
    devnull = open(os.devnull, 'w')
    try:
        term_path = get_term_file()
        upload_procs['repeater'] = subprocess.Popen(
            [get_exec_path(), term_path], stdin=stdin, stdout=subprocess.PIPE)
        upload_procs['uploader'] = subprocess.Popen(
            [S3_SCRIPT, 'put', '-k',
             urllib.quote_plus(key), '-b', bucket],
            stdin=upload_procs['repeater'].stdout,
            stderr=devnull)

        # While the precursor procs are running, we need to make sure
        # none of them have errors and also check that the upload procs
        # also don't have errors.
        while not host_utils.check_dict_of_procs(precursor_procs):
            host_utils.check_dict_of_procs(upload_procs)
            time.sleep(SLEEP_TIME)

        # Once the precursor procs have exited successfully, we will run
        # any defined check function
        if check_func:
            check_func(check_arg)

        # And then create the term file which will cause the repeater and
        # uploader to exit
        with open(term_path, 'w') as term_handle:
            term_handle.write(TERM_STRING)

        # And finally we will wait for the uploader procs to exit without error
        while not host_utils.check_dict_of_procs(upload_procs):
            time.sleep(SLEEP_TIME)
    except:
        # So there has been some sort of a problem. We want to make sure that
        # we kill the uploader so that under no circumstances the upload is
        # successfull with bad data
        if 'uploader' in upload_procs and\
                psutil.pid_exists(upload_procs['uploader'].pid):
            try:
                upload_procs['uploader'].kill()
            except:
                pass

        if 'repeater' in upload_procs and\
                psutil.pid_exists(upload_procs['repeater'].pid):
            try:
                upload_procs['repeater'].kill()
            except:
                pass
        raise
    finally:
        os.remove(term_path)