Beispiel #1
0
def create_xtrabackup_command(instance, timestamp, tmp_log):
    """ Create a xtrabackup command

    Args:
    instance - A hostAddr object
    timestamp - A timestamp
    tmp_log - A path to where xtrabackup should log

    Returns:
    a list that can be easily ingested by subprocess
    """
    if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES:
        cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port)
        cnf_group = 'mysqld'
    else:
        cnf = host_utils.MYSQL_CNF_FILE
        cnf_group = 'mysqld{port}'.format(port=instance.port)
    datadir = host_utils.get_cnf_setting('datadir', instance.port)
    xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role(
        USER_ROLE_XTRABACKUP)
    return XTRABACKUP_CMD.format(datadir=datadir,
                                 xtra_user=xtra_user,
                                 xtra_pass=xtra_pass,
                                 cnf=cnf,
                                 cnf_group=cnf_group,
                                 port=instance.port,
                                 tmp_log=tmp_log).split()
Beispiel #2
0
def create_xtrabackup_command(instance, timestamp, tmp_log):
    """ Create a xtrabackup command

    Args:
    instance - A hostAddr object
    timestamp - A timestamp
    tmp_log - A path to where xtrabackup should log

    Returns:
    a list that can be easily ingested by subprocess
    """
    if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES:
        cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port)
        cnf_group = 'mysqld'
    else:
        cnf = host_utils.MYSQL_CNF_FILE
        cnf_group = 'mysqld{port}'.format(port=instance.port)
    datadir = host_utils.get_cnf_setting('datadir', instance.port)
    (xtra_user,
     xtra_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_XTRABACKUP)
    return XTRABACKUP_CMD.format(datadir=datadir,
                                 xtra_user=xtra_user,
                                 xtra_pass=xtra_pass,
                                 cnf=cnf,
                                 cnf_group=cnf_group,
                                 port=instance.port,
                                 tmp_log=tmp_log).split()
Beispiel #3
0
def xtrabackup_instance(instance):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    (temp_path, target_path) = get_paths(port=str(instance.port))
    backup_file = ("mysql-{host}-{port}-{timestamp}.xbstream").format(
        host=instance.hostname,
        port=str(instance.port),
        timestamp=time.strftime('%Y-%m-%d-%H:%M:%S'))
    tmp_xtra_path = os.path.join(temp_path, backup_file)
    target_xtra_path = os.path.join(target_path, backup_file)
    tmp_log = ''.join((tmp_xtra_path, '.log'))
    target_log = ''.join((tmp_xtra_path, '.log'))

    if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES:
        cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port)
        cnf_group = 'mysqld'
    else:
        cnf = host_utils.MYSQL_CNF_FILE
        cnf_group = 'mysqld{port}'.format(port=instance.port)
    datadir = host_utils.get_cnf_setting('datadir', instance.port)
    xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role('xtrabackup')

    cmd = ('/bin/bash -c "/usr/bin/innobackupex {datadir} {XTRA_DEFAULTS} '
           '--user={xtra_user} --password={xtra_pass} '
           '--defaults-file={cnf} --defaults-group={cnf_group} '
           '--port={port} 2>{tmp_log} '
           '>{dest}"').format(datadir=datadir,
                              XTRA_DEFAULTS=XTRA_DEFAULTS,
                              xtra_user=xtra_user,
                              xtra_pass=xtra_pass,
                              cnf=cnf,
                              cnf_group=cnf_group,
                              port=instance.port,
                              tmp_log=tmp_log,
                              dest=tmp_xtra_path)

    log.info(cmd)
    xtra = subprocess.Popen(cmd, shell=True)
    xtra.wait()
    with open(tmp_log, 'r') as log_file:
        xtra_log = log_file.readlines()
        if 'innobackupex: completed OK!' not in xtra_log[-1]:
            raise Exception('innobackupex failed. '
                            'log_file: {tmp_log}'.format(tmp_log=tmp_log))

    log.info('Moving backup and log to {target}'.format(target=target_path))
    os.rename(tmp_xtra_path, target_xtra_path)
    os.rename(tmp_log, target_log)
    log.info('Xtrabackup was successful')
    return target_xtra_path
Beispiel #4
0
def xtrabackup_instance(instance, timestamp):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    (temp_path, target_path) = get_paths(port=str(instance.port))
    backup_file = BACKUP_FILE.format(hostname=instance.hostname,
                                     port=instance.port,
                                     timestamp=time.strftime(
                                         '%Y-%m-%d-%H:%M:%S', timestamp),
                                     backup_type=BACKUP_TYPE_XBSTREAM)
    tmp_xtra_path = os.path.join(temp_path, backup_file)
    target_xtra_path = os.path.join(target_path, backup_file)
    tmp_log = ''.join((tmp_xtra_path, '.log'))
    target_log = ''.join((tmp_xtra_path, '.log'))

    if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES:
        cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port)
        cnf_group = 'mysqld'
    else:
        cnf = host_utils.MYSQL_CNF_FILE
        cnf_group = 'mysqld{port}'.format(port=instance.port)
    datadir = host_utils.get_cnf_setting('datadir', instance.port)
    xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role(
        USER_ROLE_XTRABACKUP)
    cmd = XTRABACKUP_CMD.format(datadir=datadir,
                                xtra_user=xtra_user,
                                xtra_pass=xtra_pass,
                                cnf=cnf,
                                cnf_group=cnf_group,
                                port=instance.port,
                                tmp_log=tmp_log,
                                tmp_xtra_path=tmp_xtra_path)
    log.info(cmd)
    xtra = subprocess.Popen(cmd, shell=True)
    xtra.wait()
    with open(tmp_log, 'r') as log_file:
        xtra_log = log_file.readlines()
        if INNOBACKUP_OK not in xtra_log[-1]:
            raise Exception('innobackupex failed. '
                            'log_file: {tmp_log}'.format(tmp_log=tmp_log))

    log.info('Moving backup and log to {target}'.format(target=target_path))
    os.rename(tmp_xtra_path, target_xtra_path)
    os.rename(tmp_log, target_log)
    log.info('Xtrabackup was successful')
    return target_xtra_path
Beispiel #5
0
def xtrabackup_instance(instance, timestamp):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    (temp_path, target_path) = get_paths(port=str(instance.port))
    backup_file = BACKUP_FILE.format(hostname=instance.hostname,
                                     port=instance.port,
                                     timestamp=time.strftime('%Y-%m-%d-%H:%M:%S',
                                                             timestamp),
                                     backup_type=BACKUP_TYPE_XBSTREAM)
    tmp_xtra_path = os.path.join(temp_path, backup_file)
    target_xtra_path = os.path.join(target_path, backup_file)
    tmp_log = ''.join((tmp_xtra_path, '.log'))
    target_log = ''.join((tmp_xtra_path, '.log'))

    if host_utils.get_hiera_role() in host_utils.MASTERFUL_PUPPET_ROLES:
        cnf = host_utils.OLD_CONF_ROOT.format(port=instance.port)
        cnf_group = 'mysqld'
    else:
        cnf = host_utils.MYSQL_CNF_FILE
        cnf_group = 'mysqld{port}'.format(port=instance.port)
    datadir = host_utils.get_cnf_setting('datadir', instance.port)
    xtra_user, xtra_pass = mysql_lib.get_mysql_user_for_role(USER_ROLE_XTRABACKUP)
    cmd = XTRABACKUP_CMD.format(datadir=datadir,
                                xtra_user=xtra_user,
                                xtra_pass=xtra_pass,
                                cnf=cnf,
                                cnf_group=cnf_group,
                                port=instance.port,
                                tmp_log=tmp_log,
                                tmp_xtra_path=tmp_xtra_path)
    log.info(cmd)
    xtra = subprocess.Popen(cmd, shell=True)
    xtra.wait()
    with open(tmp_log, 'r') as log_file:
        xtra_log = log_file.readlines()
        if INNOBACKUP_OK not in xtra_log[-1]:
            raise Exception('innobackupex failed. '
                            'log_file: {tmp_log}'.format(tmp_log=tmp_log))

    log.info('Moving backup and log to {target}'.format(target=target_path))
    os.rename(tmp_xtra_path, target_xtra_path)
    os.rename(tmp_log, target_log)
    log.info('Xtrabackup was successful')
    return target_xtra_path
Beispiel #6
0
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    (temp_path, target_path) = get_paths(port=instance.port)
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime(
                                       '%Y-%m-%d-%H:%M:%S', timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    dump_tmp_path = os.path.join(temp_path, dump_file)
    dump_path = os.path.join(target_path, dump_file)
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(
        USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)

    with open(dump_tmp_path, "w") as out:
        log.info('Running dump')
        log.info('{dump_cmd} | {pigz} > {dump_tmp_path}'
                 ''.format(dump_cmd=dump_cmd,
                           pigz=PIGZ,
                           dump_tmp_path=dump_tmp_path))
        dump = subprocess.Popen(dump_cmd,
                                shell=True,
                                stdout=subprocess.PIPE,
                                stderr=sys.stderr)
        pigz = subprocess.Popen(PIGZ,
                                shell=True,
                                stdin=dump.stdout,
                                stdout=out,
                                stderr=sys.stderr)
        dump.wait()
        pigz.wait()
        if dump.returncode != 0:
            raise Exception("Error: mysqldump did not succeed, aborting")

    log.info('Moving backup and to {target}'.format(target=target_path))
    os.rename(dump_tmp_path, dump_path)
    log.info('mysqldump was successful')
    return dump_path
Beispiel #7
0
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    (temp_path, target_path) = get_paths(port=instance.port)
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime('%Y-%m-%d-%H:%M:%S',
                                                           timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    dump_tmp_path = os.path.join(temp_path, dump_file)
    dump_path = os.path.join(target_path, dump_file)
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)

    with open(dump_tmp_path, "w") as out:
        log.info('Running dump')
        log.info('{dump_cmd} | {pigz} > {dump_tmp_path}'
                 ''.format(dump_cmd=dump_cmd,
                           pigz=PIGZ,
                           dump_tmp_path=dump_tmp_path))
        dump = subprocess.Popen(dump_cmd,
                                shell=True,
                                stdout=subprocess.PIPE,
                                stderr=sys.stderr)
        pigz = subprocess.Popen(PIGZ,
                                shell=True,
                                stdin=dump.stdout,
                                stdout=out,
                                stderr=sys.stderr)
        dump.wait()
        pigz.wait()
        if dump.returncode != 0:
            raise Exception("Error: mysqldump did not succeed, aborting")

    log.info('Moving backup and to {target}'.format(target=target_path))
    os.rename(dump_tmp_path, dump_path)
    log.info('mysqldump was successful')
    return dump_path
Beispiel #8
0
def logical_backup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port).split()

    procs = dict()
    try:
        log.info(' '.join(dump_cmd + ['|']))
        procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                              stdout=subprocess.PIPE)
        procs['pv'] = create_pv_proc(procs['mysqldump'].stdout)
        log.info(' '.join(PIGZ + ['|']))
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['pv'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to s3://{buk}/{key}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           key=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  key=backup_file)
        log.info('mysqldump was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Beispiel #9
0
def upgrade_auth_tables(port):
    """ Run mysql_upgrade

    Args:
    port - the port of the instance on localhost to act on
    """
    start_mysql(port,
                DEFAULTS_FILE_ARG.format(defaults_file=MYSQL_UPGRADE_CNF_FILE))
    socket = get_cnf_setting('socket', port)
    username, password = mysql_lib.get_mysql_user_for_role('admin')
    cmd = ''.join((MYSQL_UPGRADE, ' ', '--upgrade-system-tables ', '-S ',
                   socket, ' ', '-u ', username, ' ', '-p', password))
    log.info(cmd)
    (std_out, std_err, return_code) = shell_exec(cmd)
    log.info(std_out)
    if return_code != 0:
        log.warning(std_err)
        raise Exception('MySQL Upgrade failed with return code '
                        'of: {ret}'.format(ret=return_code))
    stop_mysql(port)
Beispiel #10
0
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime('%Y-%m-%d-%H:%M:%S',
                                                           timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)
    procs = dict()
    try:
        procs['mysqldump'] = subprocess.Popen(dump_cmd.split(),
                                              stdout=subprocess.PIPE)
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['mysqldump'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to {buk}/{key}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           key=dump_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=dump_file)
        log.info('mysqldump was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Beispiel #11
0
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime(
                                       '%Y-%m-%d-%H:%M:%S', timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(
        USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)
    procs = dict()
    try:
        procs['mysqldump'] = subprocess.Popen(dump_cmd.split(),
                                              stdout=subprocess.PIPE)
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['mysqldump'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to {buk}/{key}'
                 ''.format(buk=environment_specific.S3_BUCKET, key=dump_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=dump_file)
        log.info('mysqldump was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Beispiel #12
0
def upgrade_auth_tables(port):
    """ Run mysql_upgrade

    Args:
    port - the port of the instance on localhost to act on
    """
    start_mysql(port,
                DEFAULTS_FILE_ARG.format(defaults_file=MYSQL_UPGRADE_CNF_FILE))
    socket = get_cnf_setting('socket', port)
    username, password = mysql_lib.get_mysql_user_for_role('admin')
    cmd = '' .join((MYSQL_UPGRADE, ' ',
                    '--upgrade-system-tables ',
                    '-S ', socket, ' ',
                    '-u ', username, ' ',
                    '-p', password))
    log.info(cmd)
    (std_out, std_err, return_code) = shell_exec(cmd)
    log.info(std_out)
    if return_code != 0:
        log.warning(std_err)
        raise Exception('MySQL Upgrade failed with return code '
                        'of: {ret}'.format(ret=return_code))
    stop_mysql(port)
Beispiel #13
0
def logical_backup_instance(instance, timestamp, blackhole=False,
                            initial_build=False, databases=None):
    """ Take a compressed mysqldump backup

    Args:
        instance - A hostaddr instance
        timestamp - A timestamp which will be used to create the backup filename
        blackhole - Boolean, if set will backup DBs as blackhole tables
                    with no indexes or data
        initial_build - Boolean, if this is being created right after the server
                      was built
        databases - List, if set backup only a subset of databases

    Returns:
        An S3 key of the backup.
    """
    zk = host_utils.MysqlZookeeper()
    try:
        replica_type = zk.get_replica_type_from_instance(instance)
    except:
        # instance is not in production
        replica_type = host_utils.REPLICA_ROLE_MASTER

    arg_repl_coordinate = ARG_MASTER_DATA \
        if replica_type == host_utils.REPLICA_ROLE_MASTER else ARG_SLAVE_DATA

    arg_no_data = ARG_NO_DATA if blackhole else ''
    if databases:
        backup_type = BACKUP_TYPE_PARTIAL_LOGICAL
        db_args = ARG_DATABASES.format(dbs=' '.join(databases))
    else:
        backup_type = BACKUP_TYPE_LOGICAL
        db_args = ARG_ALL_DATABASES

    arg_replace = ARG_REPLACE if databases == [mysql_lib.METADATA_DB] else ''
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)

    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port,
                                    db_args=db_args,
                                    net_buffer_length=MAX_INSERT_LENGTH,
                                    arg_repl_coordinate=arg_repl_coordinate,
                                    arg_replace=arg_replace,
                                    arg_no_data=arg_no_data).split()

    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          backup_type)
    procs = dict()
    log.info(' '.join(dump_cmd + ['|']))
    procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                          stdout=subprocess.PIPE)
    if blackhole:
        procs['innodb_to_blackhole'] = subprocess.Popen(
                INNODB_TO_BLACKHOLE,
                shell=True,
                stdin=procs['mysqldump'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([INNODB_TO_BLACKHOLE, '|']))

        # Blackhole only supports indexes up to 1k long, which is shorter
        # than InnoDB. We are therefore removing indexes and
        # auto_inc columns.
        procs['remove_auto_inc_col_arg'] = subprocess.Popen(
                REMOVE_AUTO_INC_COL_ARG,
                shell=True,
                stdin=procs['innodb_to_blackhole'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_AUTO_INC_COL_ARG, '|']))

        procs['remove_auto_inc_start_value'] = subprocess.Popen(
                REMOVE_AUTO_INC_START_VALUE,
                shell=True,
                stdin=procs['remove_auto_inc_col_arg'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_AUTO_INC_START_VALUE, '|']))

        procs['remove_indexes'] = subprocess.Popen(
                REMOVE_INDEXES,
                shell=True,
                stdin=procs['remove_auto_inc_start_value'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_INDEXES, '|']))
        stdout = procs['remove_indexes'].stdout

    elif databases == [mysql_lib.METADATA_DB]:
        # If we are backing up the metadata db, we don't want to nuke
        # existing data, but need to copy existing data over for rbr
        # to work.
        procs['create_if_not_exists_sed'] = subprocess.Popen(
                CREATE_IF_NOT_EXISTS_SED,
                shell=True,
                stdin=procs['mysqldump'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([CREATE_IF_NOT_EXISTS_SED, '|']))
        stdout = procs['create_if_not_exists_sed'].stdout
    else:
        stdout = procs['mysqldump'].stdout

    log.info(' '.join(PIGZ + ['|']))
    procs['pigz'] = subprocess.Popen(PIGZ,
                                     stdin=stdout,
                                     stdout=subprocess.PIPE)
    key = safe_uploader.safe_upload(
            precursor_procs=procs,
            stdin=procs['pigz'].stdout,
            bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
            key=backup_file,
            verbose=True)

    log.info('mysqldump was successful')
    return key