コード例 #1
0
def upload_binlog(instance, binlog, dry_run):
    """ Upload a binlog file to s3

    Args:
    instance - a hostAddr object
    binlog - the full path to the binlog file
    dry_run - if set, do not actually upload a binlog
    """
    s3_upload_path = s3_binlog_path(instance, binlog)
    log.info('Local file {local_file} will uploaded to {s3_upload_path}'
             ''.format(local_file=binlog, s3_upload_path=s3_upload_path))

    if dry_run:
        log.info('In dry_run mode, skipping compression and upload')
        return

    procs = dict()
    try:
        procs['lzop'] = subprocess.Popen(['lzop', binlog, '--to-stdout'],
                                         stdout=subprocess.PIPE)
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['lzop'].stdout,
                                  bucket=environment_specific.S3_BINLOG_BUCKET,
                                  key=s3_upload_path)
    except:
        log.debug('In exception handling for failed binlog upload')
        safe_uploader.kill_precursor_procs(procs)
        raise
    log_binlog_upload(instance, binlog)
コード例 #2
0
def upload_binlog(instance, binlog, dry_run):
    """ Upload a binlog file to s3

    Args:
    instance - a hostAddr object
    binlog - the full path to the binlog file
    dry_run - if set, do not actually upload a binlog
    """
    s3_upload_path = s3_binlog_path(instance, binlog)
    bucket = environment_specific.BACKUP_BUCKET_UPLOAD_MAP[
        host_utils.get_iam_role()]

    if dry_run:
        log.info('In dry_run mode, skipping compression and upload')
        return

    procs = dict()
    procs['lzop'] = subprocess.Popen(['lzop', binlog, '--to-stdout'],
                                     stdout=subprocess.PIPE)
    safe_uploader.safe_upload(precursor_procs=procs,
                              stdin=procs['lzop'].stdout,
                              bucket=bucket,
                              key=s3_upload_path,
                              verbose=True)
    log_binlog_upload(instance, binlog)
コード例 #3
0
def upload_binlog(instance, binlog, dry_run):
    """ Upload a binlog file to s3

    Args:
    instance - a hostAddr object
    binlog - the full path to the binlog file
    dry_run - if set, do not actually upload a binlog
    """
    s3_upload_path = s3_binlog_path(instance, binlog)
    bucket = environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()]
    log.info('Local file {local_file} will uploaded to s3://{buk}/{s3_upload_path}'
             ''.format(local_file=binlog,
                       buk=bucket,
                       s3_upload_path=s3_upload_path))

    if dry_run:
        log.info('In dry_run mode, skipping compression and upload')
        return

    procs = dict()
    try:
        procs['lzop'] = subprocess.Popen(['lzop', binlog, '--to-stdout'],
                                         stdout=subprocess.PIPE)
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['lzop'].stdout,
                                  bucket=bucket,
                                  key=s3_upload_path)
    except:
        log.debug('In exception handling for failed binlog upload')
        safe_uploader.kill_precursor_procs(procs)
        raise
    log_binlog_upload(instance, binlog)
コード例 #4
0
ファイル: backup.py プロジェクト: xiaolushare/mysql_utils
def xtrabackup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(environment_specific.RAID_MOUNT,
                           'log', 'xtrabackup_{ts}.log'.format(
                            ts=time.strftime('%Y-%m-%d-%H:%M:%S', timestamp)))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        cmd = create_xtrabackup_command(instance, timestamp, tmp_log)
        log.info(' '.join(cmd + [' 2> ', tmp_log, ' | ']))
        procs['xtrabackup'] = subprocess.Popen(cmd,
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        procs['pv'] = create_pv_proc(procs['xtrabackup'].stdout)
        log.info('Uploading backup to s3://{buk}/{loc}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  stdin=procs['pv'].stdout,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
コード例 #5
0
ファイル: backup.py プロジェクト: xiaolushare/mysql_utils
def logical_backup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port).split()

    procs = dict()
    try:
        log.info(' '.join(dump_cmd + ['|']))
        procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                              stdout=subprocess.PIPE)
        procs['pv'] = create_pv_proc(procs['mysqldump'].stdout)
        log.info(' '.join(PIGZ + ['|']))
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['pv'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to s3://{buk}/{key}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           key=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  key=backup_file)
        log.info('mysqldump was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
コード例 #6
0
ファイル: backup.py プロジェクト: sdgdsffdsfff/mysql_utils
def xtrabackup_instance(instance, timestamp):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = BACKUP_FILE.format(hostname=instance.hostname,
                                     port=instance.port,
                                     timestamp=time.strftime(
                                         '%Y-%m-%d-%H:%M:%S', timestamp),
                                     backup_type=BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(
        environment_specific.RAID_MOUNT, 'log', ''.join([
            'xtrabackup_',
            time.strftime('%Y-%m-%d-%H:%M:%S', timestamp), '.log'
        ]))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        procs['xtrabackup'] = subprocess.Popen(create_xtrabackup_command(
            instance, timestamp, tmp_log),
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        log.info('Uploading backup to {buk}/{loc}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['xtrabackup'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
コード例 #7
0
ファイル: backup.py プロジェクト: Gejove/mysql_utils
def xtrabackup_instance(instance, timestamp):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = BACKUP_FILE.format(hostname=instance.hostname,
                                     port=instance.port,
                                     timestamp=time.strftime('%Y-%m-%d-%H:%M:%S', timestamp),
                                     backup_type=BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(environment_specific.RAID_MOUNT,
                           'log',
                           ''.join(['xtrabackup_',
                                    time.strftime('%Y-%m-%d-%H:%M:%S', timestamp),
                                    '.log']))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        procs['xtrabackup'] = subprocess.Popen(create_xtrabackup_command(instance, timestamp, tmp_log),
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        log.info('Uploading backup to {buk}/{loc}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['xtrabackup'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
コード例 #8
0
ファイル: backup.py プロジェクト: anhvanngouiowa/mysql_utils
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime('%Y-%m-%d-%H:%M:%S',
                                                           timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)
    procs = dict()
    try:
        procs['mysqldump'] = subprocess.Popen(dump_cmd.split(),
                                              stdout=subprocess.PIPE)
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['mysqldump'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to {buk}/{key}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           key=dump_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=dump_file)
        log.info('mysqldump was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
コード例 #9
0
ファイル: backup.py プロジェクト: sdgdsffdsfff/mysql_utils
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime(
                                       '%Y-%m-%d-%H:%M:%S', timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(
        USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)
    procs = dict()
    try:
        procs['mysqldump'] = subprocess.Popen(dump_cmd.split(),
                                              stdout=subprocess.PIPE)
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['mysqldump'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to {buk}/{key}'
                 ''.format(buk=environment_specific.S3_BUCKET, key=dump_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=dump_file)
        log.info('mysqldump was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
コード例 #10
0
    def mysql_backup_csv_table(self, db, table, tmp_dir_db, conn):
        """ Back up a single table of a single db

        Args:
        db - the db to be backed up
        table - the table to be backed up
        tmp_dir_db - temporary storage used for all tables in the db
        conn - a connection the the mysql instance
        """
        proc_id = multiprocessing.current_process().name
        (_, data_path, _) = environment_specific.get_csv_backup_paths(
                                self.datestamp, db, table,
                                self.instance.replica_type,
                                self.instance.get_zk_replica_set()[0])
        log.debug('{proc_id}: {db}.{table} dump to {path} started'
                  ''.format(proc_id=proc_id,
                            db=db,
                            table=table,
                            path=data_path))
        self.upload_schema(db, table, tmp_dir_db)
        fifo = os.path.join(tmp_dir_db, table)
        procs = dict()
        try:
            # giant try so we can try to clean things up in case of errors
            self.create_fifo(fifo)

            # Start creating processes
            procs['cat'] = subprocess.Popen(['cat', fifo],
                                            stdout=subprocess.PIPE)
            procs['nullescape'] = subprocess.Popen(['nullescape'],
                                                   stdin=procs['cat'].stdout,
                                                   stdout=subprocess.PIPE)
            procs['lzop'] = subprocess.Popen(['lzop'],
                                             stdin=procs['nullescape'].stdout,
                                             stdout=subprocess.PIPE)

            # Start dump query
            return_value = set()
            query_thread = threading.Thread(target=self.run_dump_query,
                                            args=(db, table, fifo,
                                                  conn, procs['cat'], return_value))
            query_thread.daemon = True
            query_thread.start()

            # And run the upload
            safe_uploader.safe_upload(precursor_procs=procs,
                                      stdin=procs['lzop'].stdout,
                                      bucket=self.upload_bucket,
                                      key=data_path,
                                      check_func=self.check_dump_success,
                                      check_arg=return_value)
            os.remove(fifo)
            log.debug('{proc_id}: {db}.{table} clean up complete'
                      ''.format(proc_id=proc_id,
                                db=db,
                                table=table))
        except:
            log.debug('{proc_id}: in exception handling for failed table upload'
                      ''.format(proc_id=proc_id))

            if os.path.exists(fifo):
                self.cleanup_fifo(fifo)

            safe_uploader.kill_precursor_procs(procs)
            raise
コード例 #11
0
    def mysql_backup_one_partition(self, table_tuple, tmp_dir_db, conn):
        """ Back up a single partition of a single table

        Args:
            table_tuple - the table_tuple (db, partition name, partition number)
                          to be backed up
            tmp_dir_db - temporary storage used for all tables in the db
            conn - a connection the the mysql instance
        """
        proc_id = multiprocessing.current_process().name
        (_, data_path,
         _) = backup.get_csv_backup_paths(self.instance,
                                          *table_tuple[0].split('.'),
                                          date=self.datestamp,
                                          partition_number=table_tuple[2])
        log.debug('{proc_id}: {tbl} partition {p} dump to {path} started'
                  ''.format(proc_id=proc_id,
                            tbl=table_tuple[0],
                            p=table_tuple[2],
                            path=data_path))
        self.upload_schema(*table_tuple[0].split('.'), tmp_dir_db=tmp_dir_db)
        fifo = os.path.join(
            tmp_dir_db, '{tbl}{part}'.format(tbl=table_tuple[0].split('.')[1],
                                             part=table_tuple[2]))
        procs = dict()
        try:
            # giant try so we can try to clean things up in case of errors
            self.create_fifo(fifo)

            # Start creating processes
            procs['cat'] = subprocess.Popen(['cat', fifo],
                                            stdout=subprocess.PIPE)
            procs['nullescape'] = subprocess.Popen(['nullescape'],
                                                   stdin=procs['cat'].stdout,
                                                   stdout=subprocess.PIPE)
            procs['lzop'] = subprocess.Popen(['lzop'],
                                             stdin=procs['nullescape'].stdout,
                                             stdout=subprocess.PIPE)

            # Start dump query
            return_value = set()
            query_thread = threading.Thread(target=self.run_dump_query,
                                            args=(table_tuple, fifo, conn,
                                                  procs['cat'], return_value))
            query_thread.daemon = True
            query_thread.start()

            # And run the upload
            safe_uploader.safe_upload(precursor_procs=procs,
                                      stdin=procs['lzop'].stdout,
                                      bucket=self.upload_bucket,
                                      key=data_path,
                                      check_func=self.check_dump_success,
                                      check_arg=return_value)
            os.remove(fifo)
            log.debug('{proc_id}: {tbl} partition {p} clean up complete'
                      ''.format(proc_id=proc_id,
                                tbl=table_tuple[0],
                                p=table_tuple[2]))
        except:
            log.debug('{}: in exception handling for failed table '
                      'upload'.format(proc_id))

            if os.path.exists(fifo):
                self.cleanup_fifo(fifo)
            raise
コード例 #12
0
    def mysql_backup_csv_table(self, db, table, tmp_dir_db, conn):
        """ Back up a single table of a single db

        Args:
        db - the db to be backed up
        table - the table to be backed up
        tmp_dir_db - temporary storage used for all tables in the db
        conn - a connection the the mysql instance
        """
        proc_id = multiprocessing.current_process().name
        s3_upload_path = self.get_s3_backup_path(db, table)
        log.debug('{proc_id}: {db}.{table} dump to {path} started'
                  ''.format(proc_id=proc_id,
                            db=db,
                            table=table,
                            path=s3_upload_path))
        self.upload_schema(db, table, tmp_dir_db)
        fifo = os.path.join(tmp_dir_db, table)
        procs = dict()
        try:
            # giant try so we can try to clean things up in case of errors
            self.create_fifo(fifo)

            # Start creating processes
            procs['cat'] = subprocess.Popen(['cat', fifo],
                                            stdout=subprocess.PIPE)
            procs['nullescape'] = subprocess.Popen(['nullescape'],
                                                   stdin=procs['cat'].stdout,
                                                   stdout=subprocess.PIPE)
            procs['lzop'] = subprocess.Popen(['lzop'],
                                             stdin=procs['nullescape'].stdout,
                                             stdout=subprocess.PIPE)

            # Start dump query
            return_value = set()
            query_thread = threading.Thread(target=self.run_dump_query,
                                            args=(db, table, fifo, conn,
                                                  procs['cat'], return_value))
            query_thread.daemon = True
            query_thread.start()

            # And run the upload
            safe_uploader.safe_upload(
                precursor_procs=procs,
                stdin=procs['lzop'].stdout,
                bucket=environment_specific.S3_CSV_BUCKET,
                key=s3_upload_path,
                check_func=self.check_dump_success,
                check_arg=return_value)
            os.remove(fifo)
            log.debug('{proc_id}: {db}.{table} clean up complete'
                      ''.format(proc_id=proc_id, db=db, table=table))
        except:
            log.debug(
                '{proc_id}: in exception handling for failed table upload'
                ''.format(proc_id=proc_id))

            if os.path.exists(fifo):
                self.cleanup_fifo(fifo)

            safe_uploader.kill_precursor_procs(procs)

            raise
コード例 #13
0
def logical_backup_instance(instance, timestamp, blackhole=False,
                            initial_build=False, databases=None):
    """ Take a compressed mysqldump backup

    Args:
        instance - A hostaddr instance
        timestamp - A timestamp which will be used to create the backup filename
        blackhole - Boolean, if set will backup DBs as blackhole tables
                    with no indexes or data
        initial_build - Boolean, if this is being created right after the server
                      was built
        databases - List, if set backup only a subset of databases

    Returns:
        An S3 key of the backup.
    """
    zk = host_utils.MysqlZookeeper()
    try:
        replica_type = zk.get_replica_type_from_instance(instance)
    except:
        # instance is not in production
        replica_type = host_utils.REPLICA_ROLE_MASTER

    arg_repl_coordinate = ARG_MASTER_DATA \
        if replica_type == host_utils.REPLICA_ROLE_MASTER else ARG_SLAVE_DATA

    arg_no_data = ARG_NO_DATA if blackhole else ''
    if databases:
        backup_type = BACKUP_TYPE_PARTIAL_LOGICAL
        db_args = ARG_DATABASES.format(dbs=' '.join(databases))
    else:
        backup_type = BACKUP_TYPE_LOGICAL
        db_args = ARG_ALL_DATABASES

    arg_replace = ARG_REPLACE if databases == [mysql_lib.METADATA_DB] else ''
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)

    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port,
                                    db_args=db_args,
                                    net_buffer_length=MAX_INSERT_LENGTH,
                                    arg_repl_coordinate=arg_repl_coordinate,
                                    arg_replace=arg_replace,
                                    arg_no_data=arg_no_data).split()

    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          backup_type)
    procs = dict()
    log.info(' '.join(dump_cmd + ['|']))
    procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                          stdout=subprocess.PIPE)
    if blackhole:
        procs['innodb_to_blackhole'] = subprocess.Popen(
                INNODB_TO_BLACKHOLE,
                shell=True,
                stdin=procs['mysqldump'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([INNODB_TO_BLACKHOLE, '|']))

        # Blackhole only supports indexes up to 1k long, which is shorter
        # than InnoDB. We are therefore removing indexes and
        # auto_inc columns.
        procs['remove_auto_inc_col_arg'] = subprocess.Popen(
                REMOVE_AUTO_INC_COL_ARG,
                shell=True,
                stdin=procs['innodb_to_blackhole'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_AUTO_INC_COL_ARG, '|']))

        procs['remove_auto_inc_start_value'] = subprocess.Popen(
                REMOVE_AUTO_INC_START_VALUE,
                shell=True,
                stdin=procs['remove_auto_inc_col_arg'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_AUTO_INC_START_VALUE, '|']))

        procs['remove_indexes'] = subprocess.Popen(
                REMOVE_INDEXES,
                shell=True,
                stdin=procs['remove_auto_inc_start_value'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_INDEXES, '|']))
        stdout = procs['remove_indexes'].stdout

    elif databases == [mysql_lib.METADATA_DB]:
        # If we are backing up the metadata db, we don't want to nuke
        # existing data, but need to copy existing data over for rbr
        # to work.
        procs['create_if_not_exists_sed'] = subprocess.Popen(
                CREATE_IF_NOT_EXISTS_SED,
                shell=True,
                stdin=procs['mysqldump'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([CREATE_IF_NOT_EXISTS_SED, '|']))
        stdout = procs['create_if_not_exists_sed'].stdout
    else:
        stdout = procs['mysqldump'].stdout

    log.info(' '.join(PIGZ + ['|']))
    procs['pigz'] = subprocess.Popen(PIGZ,
                                     stdin=stdout,
                                     stdout=subprocess.PIPE)
    key = safe_uploader.safe_upload(
            precursor_procs=procs,
            stdin=procs['pigz'].stdout,
            bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
            key=backup_file,
            verbose=True)

    log.info('mysqldump was successful')
    return key