Esempio n. 1
0
def mysql_backup(instance,
                 backup_type=backup.BACKUP_TYPE_XBSTREAM,
                 initial_build=False):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    backup_type - backup.BACKUP_TYPE_LOGICAL or backup.BACKUP_TYPE_XBSTREAM
    initial_build - Boolean, if this is being created right after the server
                    was built
    """
    log.info('Confirming sanity of replication (if applicable)')
    zk = host_utils.MysqlZookeeper()
    try:
        (_, replica_type) = zk.get_replica_set_from_instance(instance)
    except:
        # instance is not in production
        replica_type = None

    if replica_type and replica_type != host_utils.REPLICA_ROLE_MASTER:
        mysql_lib.assert_replication_sanity(instance)

    log.info('Logging initial status to mysqlops')
    start_timestamp = time.localtime()
    lock_handle = None
    backup_id = mysql_lib.start_backup_log(instance, backup_type,
                                           start_timestamp)

    # Take a lock to prevent multiple backups from running concurrently
    try:
        log.info('Taking backup lock')
        lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)

        # Actually run the backup
        log.info('Running backup')
        if backup_type == backup.BACKUP_TYPE_XBSTREAM:
            backup_file = backup.xtrabackup_instance(instance, start_timestamp,
                                                     initial_build)
        elif backup_type == backup.BACKUP_TYPE_LOGICAL:
            backup_file = backup.logical_backup_instance(
                instance, start_timestamp, initial_build)
        else:
            raise Exception('Unsupported backup type {backup_type}'
                            ''.format(backup_type=backup_type))
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)

    # Update database with additional info now that backup is done.
    if backup_id:
        log.info("Updating database log entry with final backup info")
        mysql_lib.finalize_backup_log(backup_id, backup_file)
    else:
        log.info("The backup is complete, but we were not able to "
                 "write to the central log DB.")
Esempio n. 2
0
def mysql_backup(instance, backup_type=backup.BACKUP_TYPE_XBSTREAM):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    """
    log.info('Logging initial status to mysqlops')
    start_timestamp = time.localtime()
    lock_handle = None
    backup_id = mysql_lib.start_backup_log(instance, backup_type,
                                           start_timestamp)

    # Take a lock to prevent multiple backups from running concurrently
    try:
        log.info('Taking backup lock')
        lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)

        log.info('Cleaning up old backups')
        purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)

        # Actually run the backup
        log.info('Running backup')
        if backup_type == backup.BACKUP_TYPE_XBSTREAM:
            backup_file = backup.xtrabackup_instance(instance, start_timestamp)
        elif backup_type == backup.BACKUP_TYPE_LOGICAL:
            backup_file = backup.logical_backup_instance(
                instance, start_timestamp)
        else:
            raise Exception('Unsupported backup type {backup_type}'
                            ''.format(backup_type=backup_type))

        # Upload file to s3
        log.info('Uploading file to s3')
        backup.s3_upload(backup_file)

    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)

    # Update database with additional info now that backup is done.
    if backup_id:
        log.info("Updating database log entry with final backup info")
        mysql_lib.finalize_backup_log(backup_id,
                                      backup_file,
                                      size=os.stat(backup_file).st_size)
    else:
        log.info("The backup is complete, but we were not able to "
                 "write to the central log DB.")

    # Running purge again
    log.info('Purging backups again')
    purge_mysql_backups.purge_mysql_backups(instance)
Esempio n. 3
0
def mysql_backup(instance, backup_type=backup.BACKUP_TYPE_XBSTREAM, initial_build=False):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    backup_type - backup.BACKUP_TYPE_LOGICAL or backup.BACKUP_TYPE_XBSTREAM
    initial_build - Boolean, if this is being created right after the server
                    was built
    """
    log.info('Confirming sanity of replication (if applicable)')
    zk = host_utils.MysqlZookeeper()
    try:
        (_, replica_type) = zk.get_replica_set_from_instance(instance)
    except:
        # instance is not in production
        replica_type = None

    if replica_type and replica_type != host_utils.REPLICA_ROLE_MASTER:
        mysql_lib.assert_replication_sanity(instance)

    log.info('Logging initial status to mysqlops')
    start_timestamp = time.localtime()
    lock_handle = None
    backup_id = mysql_lib.start_backup_log(instance, backup_type,
                                           start_timestamp)

    # Take a lock to prevent multiple backups from running concurrently
    try:
        log.info('Taking backup lock')
        lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)

        # Actually run the backup
        log.info('Running backup')
        if backup_type == backup.BACKUP_TYPE_XBSTREAM:
            backup_file = backup.xtrabackup_instance(instance, start_timestamp, initial_build)
        elif backup_type == backup.BACKUP_TYPE_LOGICAL:
            backup_file = backup.logical_backup_instance(instance, start_timestamp, initial_build)
        else:
            raise Exception('Unsupported backup type {backup_type}'
                            ''.format(backup_type=backup_type))
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)

    # Update database with additional info now that backup is done.
    if backup_id:
        log.info("Updating database log entry with final backup info")
        mysql_lib.finalize_backup_log(backup_id, backup_file)
    else:
        log.info("The backup is complete, but we were not able to "
                 "write to the central log DB.")
Esempio n. 4
0
def mysql_backup(instance, backup_type=backup.BACKUP_TYPE_XBSTREAM):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    """
    log.info("Logging initial status to mysqlops")
    start_timestamp = time.localtime()
    lock_handle = None
    backup_id = mysql_lib.start_backup_log(instance, backup_type, start_timestamp)

    # Take a lock to prevent multiple backups from running concurrently
    try:
        log.info("Taking backup lock")
        lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)

        log.info("Cleaning up old backups")
        purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)

        # Actually run the backup
        log.info("Running backup")
        if backup_type == backup.BACKUP_TYPE_XBSTREAM:
            backup_file = backup.xtrabackup_instance(instance, start_timestamp)
        elif backup_type == backup.BACKUP_TYPE_LOGICAL:
            backup_file = backup.logical_backup_instance(instance, start_timestamp)
        else:
            raise Exception("Unsupported backup type {backup_type}" "".format(backup_type=backup_type))

        # Upload file to s3
        log.info("Uploading file to s3")
        backup.s3_upload(backup_file)

    finally:
        if lock_handle:
            log.info("Releasing lock")
            host_utils.release_flock_lock(lock_handle)

    # Update database with additional info now that backup is done.
    if backup_id:
        log.info("Updating database log entry with final backup info")
        mysql_lib.finalize_backup_log(backup_id, backup_file, size=os.stat(backup_file).st_size)
    else:
        log.info("The backup is complete, but we were not able to " "write to the central log DB.")

    # Running purge again
    log.info("Purging backups again")
    purge_mysql_backups.purge_mysql_backups(instance)
def xtrabackup_backup_instance(instance):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    """
    starttime_sql = time.strftime('%Y-%m-%d %H:%M:%S')

    log.info('Logging initial status to mysqlops')
    row_id = None
    lock_handle = None
    try:
        reporting_conn = mysql_lib.get_mysqlops_connections()
        cursor = reporting_conn.cursor()

        sql = ("INSERT INTO mysqlops.mysql_backups "
               "SET "
               "hostname = %(hostname)s, "
               "port = %(port)s, "
               "started = %(started)s, "
               "backup_type = 'xbstream' ")

        metadata = {'hostname': instance.hostname,
                    'port': instance.port,
                    'started': starttime_sql}

        cursor.execute(sql, metadata)
        row_id = cursor.lastrowid
        reporting_conn.commit()
    except Exception as e:
        log.warning("Unable to write log entry to "
                    "mysqlopsdb001: {e}".format(e=e))
        log.warning("However, we will attempt to continue with the backup.")

    # Take a lock to prevent multiple backups from running concurrently
    try:
        log.info('Taking backup lock')
        lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)

        log.info('Cleaning up old backups')
        purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)

        # Actually run the backup
        log.info('Running backup')
        backup_file = backup.xtrabackup_instance(instance)
        finished = time.strftime('%Y-%m-%d %H:%M:%S')

        # Upload file to s3
        log.info('Uploading file to s3')
        backup.s3_upload(backup_file)

        # Update database with additional info now that backup is done.
        if row_id is None:
            log.info("The backup is complete, but we were not able to "
                     "write to the central log DB.")
        else:
            log.info("Updating database log entry with final backup info")
            try:
                sql = ("UPDATE mysqlops.mysql_backups "
                       "SET "
                       "filename = %(filename)s, "
                       "finished = %(finished)s, "
                       "size = %(size)s "
                       "WHERE id = %(id)s")
                metadata = {'filename': backup_file,
                            'finished': finished,
                            'size': os.stat(backup_file).st_size,
                            'id': row_id}

                cursor.execute(sql, metadata)
                reporting_conn.commit()
                reporting_conn.close()
            except Exception as e:
                log.warning("Unable to update mysqlopsdb with "
                            "backup status: {e}".format(e=e))

            # Running purge again most for the chmod
        purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)
Esempio n. 6
0
def mysql_backup(instance,
                 backup_type=backup.BACKUP_TYPE_XBSTREAM,
                 initial_build=False,
                 lock_handle=None):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    backup_type - backup.BACKUP_TYPE_LOGICAL or backup.BACKUP_TYPE_XBSTREAM
    initial_build - Boolean, if this is being created right after the server
                    was built
    lock_handle - A lock handle, if we have one from the caller.
    """

    if backup_type == backup.BACKUP_TYPE_XBSTREAM and \
            os.path.isfile(backup.XTRABACKUP_SKIP_FILE):
        log.info('Found {}. Skipping xtrabackup '
                 'run.'.format(backup.XTRABACKUP_SKIP_FILE))
        return

    log.info('Confirming sanity of replication (if applicable)')
    zk = host_utils.MysqlZookeeper()
    try:
        replica_type = zk.get_replica_type_from_instance(instance)
    except:
        # instance is not in production
        replica_type = None

    if replica_type and replica_type != host_utils.REPLICA_ROLE_MASTER:
        mysql_lib.assert_replication_sanity(instance)

    log.info('Logging initial status to mysqlops')
    start_timestamp = time.localtime()
    backup_id = mysql_lib.start_backup_log(instance, backup_type,
                                           start_timestamp)

    # Take a lock to prevent multiple backups from running concurrently
    # unless we already have a lock from the caller.  This means we
    # also don't have to release the lock at the end; either we
    # exit the script entirely, and it gets cleaned up or the caller
    # maintains it.
    if lock_handle is None:
        log.info('Taking backup lock')
        lock_handle = host_utils.bind_lock_socket(
            backup.STD_BACKUP_LOCK_SOCKET)
    else:
        log.info('Not acquiring backup lock, we already have one.')

    # Actually run the backup
    log.info('Running backup')
    if backup_type == backup.BACKUP_TYPE_XBSTREAM:
        backup_file = backup.xtrabackup_instance(instance, start_timestamp,
                                                 initial_build)
    elif backup_type == backup.BACKUP_TYPE_LOGICAL:
        # We don't need a backup-skip file here since this isn't
        # regularly scheduled.
        backup_file = backup.logical_backup_instance(instance, start_timestamp,
                                                     initial_build)
    else:
        raise Exception('Unsupported backup type {}'.format(backup_type))

    # Update database with additional info now that backup is done.
    if backup_id:
        log.info("Updating database log entry with final backup info")
        mysql_lib.finalize_backup_log(backup_id, backup_file)
    else:
        log.info("The backup is complete, but we were not able to "
                 "write to the central log DB.")
def xtrabackup_backup_instance(instance):
    """ Run a file based backup on a supplied local instance

    Args:
    instance - A hostaddr object
    """
    starttime_sql = time.strftime('%Y-%m-%d %H:%M:%S')

    log.info('Logging initial status to mysqlops')
    row_id = None
    lock_handle = None
    try:
        reporting_conn = mysql_lib.get_mysqlops_connections()
        cursor = reporting_conn.cursor()

        sql = ("INSERT INTO mysqlops.mysql_backups "
               "SET "
               "hostname = %(hostname)s, "
               "port = %(port)s, "
               "started = %(started)s, "
               "backup_type = 'xbstream' ")

        metadata = {
            'hostname': instance.hostname,
            'port': instance.port,
            'started': starttime_sql
        }

        cursor.execute(sql, metadata)
        row_id = cursor.lastrowid
        reporting_conn.commit()
    except Exception as e:
        log.warning("Unable to write log entry to "
                    "mysqlopsdb001: {e}".format(e=e))
        log.warning("However, we will attempt to continue with the backup.")

    # Take a lock to prevent multiple backups from running concurrently
    try:
        log.info('Taking backup lock')
        lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)

        log.info('Cleaning up old backups')
        purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)

        # Actually run the backup
        log.info('Running backup')
        backup_file = backup.xtrabackup_instance(instance)
        finished = time.strftime('%Y-%m-%d %H:%M:%S')

        # Upload file to s3
        log.info('Uploading file to s3')
        backup.s3_upload(backup_file)

        # Update database with additional info now that backup is done.
        if row_id is None:
            log.info("The backup is complete, but we were not able to "
                     "write to the central log DB.")
        else:
            log.info("Updating database log entry with final backup info")
            try:
                sql = ("UPDATE mysqlops.mysql_backups "
                       "SET "
                       "filename = %(filename)s, "
                       "finished = %(finished)s, "
                       "size = %(size)s "
                       "WHERE id = %(id)s")
                metadata = {
                    'filename': backup_file,
                    'finished': finished,
                    'size': os.stat(backup_file).st_size,
                    'id': row_id
                }

                cursor.execute(sql, metadata)
                reporting_conn.commit()
                reporting_conn.close()
            except Exception as e:
                log.warning("Unable to update mysqlopsdb with "
                            "backup status: {e}".format(e=e))

            # Running purge again most for the chmod
        purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)
    finally:
        if lock_handle:
            log.info('Releasing lock')
            host_utils.release_flock_lock(lock_handle)