Exemplo n.º 1
0
def get_s3_backup(instance, date, backup_type):
    """ Find xbstream file for an instance on s3 on a given day

    Args:
    instance - A hostaddr object for the desired instance
    date - Desired date of restore file
    backup_type - xbstream or mysqldump

    Returns:
    A list of s3 keys
    """
    backup_keys = list()
    prefixes = set()
    try:
        replica_set = instance.get_zk_replica_set()[0]
    except:
        log.debug('Instance {} is not in zk'.format(instance))
        replica_set = None

    if replica_set:
        prefixes.add(BACKUP_SEARCH_PREFIX.format(
                         retention_policy=environment_specific.get_backup_retention_policy(instance),
                         backup_type=backup_type,
                         replica_set=replica_set,
                         hostname=instance.hostname,
                         port=instance.port,
                         timestamp=date))

    prefixes.add(BACKUP_SEARCH_INITIAL_PREFIX.format(
                     backup_type=backup_type,
                     hostname=instance.hostname,
                     port=instance.port,
                     timestamp=date))

    conn = boto.connect_s3()
    for bucket in environment_specific.BACKUP_BUCKET_DOWNLOAD_MAP[host_utils.get_iam_role()]:
        bucket_conn = conn.get_bucket(bucket, validate=False)
        for prefix in prefixes:
            log.info('Looking for backup with prefix '
                     's3://{bucket}/{prefix}'.format(bucket=bucket,
                                                     prefix=prefix))
            bucket_items = bucket_conn.list(prefix=prefix)
            for key in bucket_items:
                if (key.size <= MINIMUM_VALID_BACKUP_SIZE_BYTES):
                    continue

                backup_keys.append(key)

    if not backup_keys:
        msg = ''.join([NO_BACKUP, instance.__str__()])
        raise Exception(msg)
    return backup_keys
Exemplo n.º 2
0
def logical_backup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port).split()

    procs = dict()
    try:
        log.info(' '.join(dump_cmd + ['|']))
        procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                              stdout=subprocess.PIPE)
        procs['pv'] = create_pv_proc(procs['mysqldump'].stdout)
        log.info(' '.join(PIGZ + ['|']))
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['pv'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to s3://{buk}/{key}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           key=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  key=backup_file)
        log.info('mysqldump was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Exemplo n.º 3
0
def xtrabackup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(environment_specific.RAID_MOUNT,
                           'log', 'xtrabackup_{ts}.log'.format(
                            ts=time.strftime('%Y-%m-%d-%H:%M:%S', timestamp)))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        cmd = create_xtrabackup_command(instance, timestamp, tmp_log)
        log.info(' '.join(cmd + [' 2> ', tmp_log, ' | ']))
        procs['xtrabackup'] = subprocess.Popen(cmd,
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        procs['pv'] = create_pv_proc(procs['xtrabackup'].stdout)
        log.info('Uploading backup to s3://{buk}/{loc}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  stdin=procs['pv'].stdout,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Exemplo n.º 4
0
def logical_backup_instance(instance, timestamp, blackhole=False,
                            initial_build=False, databases=None):
    """ Take a compressed mysqldump backup

    Args:
        instance - A hostaddr instance
        timestamp - A timestamp which will be used to create the backup filename
        blackhole - Boolean, if set will backup DBs as blackhole tables
                    with no indexes or data
        initial_build - Boolean, if this is being created right after the server
                      was built
        databases - List, if set backup only a subset of databases

    Returns:
        An S3 key of the backup.
    """
    zk = host_utils.MysqlZookeeper()
    try:
        replica_type = zk.get_replica_type_from_instance(instance)
    except:
        # instance is not in production
        replica_type = host_utils.REPLICA_ROLE_MASTER

    arg_repl_coordinate = ARG_MASTER_DATA \
        if replica_type == host_utils.REPLICA_ROLE_MASTER else ARG_SLAVE_DATA

    arg_no_data = ARG_NO_DATA if blackhole else ''
    if databases:
        backup_type = BACKUP_TYPE_PARTIAL_LOGICAL
        db_args = ARG_DATABASES.format(dbs=' '.join(databases))
    else:
        backup_type = BACKUP_TYPE_LOGICAL
        db_args = ARG_ALL_DATABASES

    arg_replace = ARG_REPLACE if databases == [mysql_lib.METADATA_DB] else ''
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)

    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port,
                                    db_args=db_args,
                                    net_buffer_length=MAX_INSERT_LENGTH,
                                    arg_repl_coordinate=arg_repl_coordinate,
                                    arg_replace=arg_replace,
                                    arg_no_data=arg_no_data).split()

    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          backup_type)
    procs = dict()
    log.info(' '.join(dump_cmd + ['|']))
    procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                          stdout=subprocess.PIPE)
    if blackhole:
        procs['innodb_to_blackhole'] = subprocess.Popen(
                INNODB_TO_BLACKHOLE,
                shell=True,
                stdin=procs['mysqldump'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([INNODB_TO_BLACKHOLE, '|']))

        # Blackhole only supports indexes up to 1k long, which is shorter
        # than InnoDB. We are therefore removing indexes and
        # auto_inc columns.
        procs['remove_auto_inc_col_arg'] = subprocess.Popen(
                REMOVE_AUTO_INC_COL_ARG,
                shell=True,
                stdin=procs['innodb_to_blackhole'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_AUTO_INC_COL_ARG, '|']))

        procs['remove_auto_inc_start_value'] = subprocess.Popen(
                REMOVE_AUTO_INC_START_VALUE,
                shell=True,
                stdin=procs['remove_auto_inc_col_arg'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_AUTO_INC_START_VALUE, '|']))

        procs['remove_indexes'] = subprocess.Popen(
                REMOVE_INDEXES,
                shell=True,
                stdin=procs['remove_auto_inc_start_value'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([REMOVE_INDEXES, '|']))
        stdout = procs['remove_indexes'].stdout

    elif databases == [mysql_lib.METADATA_DB]:
        # If we are backing up the metadata db, we don't want to nuke
        # existing data, but need to copy existing data over for rbr
        # to work.
        procs['create_if_not_exists_sed'] = subprocess.Popen(
                CREATE_IF_NOT_EXISTS_SED,
                shell=True,
                stdin=procs['mysqldump'].stdout,
                stdout=subprocess.PIPE)
        log.info(' '.join([CREATE_IF_NOT_EXISTS_SED, '|']))
        stdout = procs['create_if_not_exists_sed'].stdout
    else:
        stdout = procs['mysqldump'].stdout

    log.info(' '.join(PIGZ + ['|']))
    procs['pigz'] = subprocess.Popen(PIGZ,
                                     stdin=stdout,
                                     stdout=subprocess.PIPE)
    key = safe_uploader.safe_upload(
            precursor_procs=procs,
            stdin=procs['pigz'].stdout,
            bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
            key=backup_file,
            verbose=True)

    log.info('mysqldump was successful')
    return key