Ejemplo n.º 1
0
def find_gtid_for_timestamp(instance, timestamp):
    """ Find the GTID for the supplied timestamp on the specified
        instance. 

    Args:
        instance: a HostAddr object
        timestamp: the timestamp to search for
    Returns:
        If the instance doesn't support GTID, return None.
        If no GTID was found in the binlogs for the supplied
        timestamp, return a blank string.
        Otherwise, return a GTID.
    """
    vars = mysql_lib.get_global_variables(instance)

    # we are not generating GTIDs / no GTID support
    if vars['gtid_mode'] == 'OFF' or vars['gtid_deployment_step'] == 'ON':
        log.warning('This replica set does not currently support GTID')
        return None

    # go in reverse order, because odds are that the log we want
    # is closer to the end than the beginning.
    master_logs = list(reversed(mysql_lib.get_master_logs(instance)))

    (username, password) = mysql_lib.get_mysql_user_for_role('replication')
    for binlog in master_logs:
        # if the timestamp we want is prior to the first entry in the
        # binlog, it can't possibly be in there.
        log_start = get_binlog_start(binlog['Log_name'], instance, username,
                                     password)
        if timestamp < log_start:
            log.debug('Skipping binlog {bl} because desired {ts} < '
                      '{ls}'.format(bl=binlog['Log_name'],
                                    ts=timestamp,
                                    ls=log_start))
            continue

        # The binlog that we end up checking, if we check one at all,
        # is the first one that could possibly contain our GTID, so
        # if it isn't in this one, we're not going to find anything.
        log.debug('Checking for matching GTID in {}'.format(
            binlog['Log_name']))
        gtid = check_one_binlog(timestamp, binlog['Log_name'], instance,
                                username, password)
        if gtid:
            return gtid
        else:
            break

    log.warning("No matching GTID was found for that timestamp.")
    return ''
def archive_mysql_binlogs(port, dry_run):
    """ Flush logs and upload all binary logs that don't exist to s3

    Arguments:
    port - Port of the MySQL instance on which to act
    dry_run - Display output but do not uplad
    """
    binlog_rotator.rotate_binlogs_if_needed(port, dry_run)
    zk = host_utils.MysqlZookeeper()
    instance = host_utils.HostAddr(':'.join((host_utils.HOSTNAME,
                                             str(port))))

    if zk.get_replica_set_from_instance(instance)[0] is None:
        log.info('Instance is not in production, exiting')
        return

    lock_handle = None
    ensure_binlog_archiving_table_sanity(instance)
    try:
        log.info('Taking binlog archiver lock')
        lock_handle = host_utils.take_flock_lock(BINLOG_LOCK_FILE)
        log_bin_dir = host_utils.get_cnf_setting('log_bin', port)
        bin_logs = mysql_lib.get_master_logs(instance)
        logged_uploads = get_logged_binlog_uploads(instance)
        for binlog in bin_logs[:-1]:
            err_count = 0
            local_file = os.path.join(os.path.dirname(log_bin_dir),
                                      binlog['Log_name'])
            if already_uploaded(instance, local_file, logged_uploads):
                continue
            success = False
            while not success:
                try:
                    upload_binlog(instance, local_file, dry_run)
                    success = True
                except:
                    if err_count > MAX_ERRORS:
                        log.error('Error count in thread > MAX_THREAD_ERROR. '
                                  'Aborting :(')
                        raise

                    log.error('error: {e}'.format(e=traceback.format_exc()))
                    err_count = err_count + 1
                    time.sleep(err_count*2)
        log.info('Archiving complete')
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)
Ejemplo n.º 3
0
def archive_mysql_binlogs(port, dry_run):
    """ Flush logs and upload all binary logs that don't exist to s3

    Arguments:
    port - Port of the MySQL instance on which to act
    dry_run - Display output but do not uplad
    """
    lock_handle = None
    try:
        log.info('Taking binlog archiver lock')
        lock_handle = host_utils.take_flock_lock(BINLOG_LOCK_FILE)

        log_bin_dir = host_utils.get_cnf_setting('log_bin', port)
        instance = host_utils.HostAddr(':'.join(
            (host_utils.HOSTNAME, str(port))))
        s3_conn = boto.connect_s3()
        bucket = s3_conn.get_bucket(environment_specific.S3_BUCKET,
                                    validate=False)

        mysql_conn = mysql_lib.connect_mysql(instance)
        bin_logs = mysql_lib.get_master_logs(mysql_conn)
        prefix = os.path.join(BINLOG_S3_DIR, instance.hostname,
                              str(instance.port))
        uploaded_binlogs = bucket.get_all_keys(prefix=prefix)

        for binlog in bin_logs[:-1]:
            compressed_file = ''.join((binlog['Log_name'], '.gz'))
            local_file = os.path.join(os.path.dirname(log_bin_dir),
                                      binlog['Log_name'])
            local_file_gz = os.path.join(TMP_DIR, compressed_file)
            remote_path = os.path.join(BINLOG_S3_DIR, instance.hostname,
                                       str(instance.port), compressed_file)
            log.info(
                'Local file {local_file} will compress to {local_file_gz} '
                'and upload to {remote_path}'.format(
                    local_file=local_file,
                    local_file_gz=local_file_gz,
                    remote_path=remote_path))

            new_key = boto.s3.key.Key(bucket)
            new_key.key = remote_path
            if already_uploaded(remote_path, uploaded_binlogs):
                log.info('Binlog has already been uploaded')
                continue

            if dry_run:
                log.info('In dry_run mode, skipping compression and upload')
                continue

            log.info('Compressing file')
            f_in = open(local_file, 'r')
            f_out = gzip.open(local_file_gz, 'w', compresslevel=2)
            f_out.writelines(f_in)
            f_out.close()
            f_in.close()

            log.info('Uploading file')
            new_key.set_contents_from_filename(local_file_gz)
            log.info('Deleting local compressed file')
            os.remove(local_file_gz)
        log.info('Archiving complete')
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)
Ejemplo n.º 4
0
def archive_mysql_binlogs(port, dry_run):
    """ Flush logs and upload all binary logs that don't exist to s3

    Arguments:
    port - Port of the MySQL instance on which to act
    dry_run - Display output but do not uplad
    """
    lock_handle = None
    try:
        log.info('Taking binlog archiver lock')
        lock_handle = host_utils.take_flock_lock(BINLOG_LOCK_FILE)

        log_bin_dir = host_utils.get_cnf_setting('log_bin', port)
        instance = host_utils.HostAddr(':'.join((host_utils.HOSTNAME,
                                                 str(port))))
        s3_conn = boto.connect_s3()
        bucket = s3_conn.get_bucket(environment_specific.S3_BUCKET, validate=False)

        mysql_conn = mysql_lib.connect_mysql(instance)
        bin_logs = mysql_lib.get_master_logs(mysql_conn)
        prefix = os.path.join(BINLOG_S3_DIR,
                              instance.hostname,
                              str(instance.port))
        uploaded_binlogs = bucket.get_all_keys(prefix=prefix)

        for binlog in bin_logs[:-1]:
            compressed_file = ''.join((binlog['Log_name'], '.gz'))
            local_file = os.path.join(os.path.dirname(log_bin_dir),
                                      binlog['Log_name'])
            local_file_gz = os.path.join(TMP_DIR, compressed_file)
            remote_path = os.path.join(BINLOG_S3_DIR,
                                       instance.hostname,
                                       str(instance.port),
                                       compressed_file)
            log.info('Local file {local_file} will compress to {local_file_gz} '
                     'and upload to {remote_path}'.format(local_file=local_file,
                                                          local_file_gz=local_file_gz,
                                                          remote_path=remote_path))

            new_key = boto.s3.key.Key(bucket)
            new_key.key = remote_path
            if already_uploaded(remote_path, uploaded_binlogs):
                log.info('Binlog has already been uploaded')
                continue

            if dry_run:
                log.info('In dry_run mode, skipping compression and upload')
                continue

            log.info('Compressing file')
            f_in = open(local_file, 'r')
            f_out = gzip.open(local_file_gz, 'w', compresslevel=2)
            f_out.writelines(f_in)
            f_out.close()
            f_in.close()

            log.info('Uploading file')
            new_key.set_contents_from_filename(local_file_gz)
            log.info('Deleting local compressed file')
            os.remove(local_file_gz)
        log.info('Archiving complete')
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)