Ejemplo n.º 1
0
def upload_binlog(instance, binlog, dry_run):
    """ Upload a binlog file to s3

    Args:
    instance - a hostAddr object
    binlog - the full path to the binlog file
    dry_run - if set, do not actually upload a binlog
    """
    s3_upload_path = s3_binlog_path(instance, binlog)
    bucket = environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()]
    log.info('Local file {local_file} will uploaded to s3://{buk}/{s3_upload_path}'
             ''.format(local_file=binlog,
                       buk=bucket,
                       s3_upload_path=s3_upload_path))

    if dry_run:
        log.info('In dry_run mode, skipping compression and upload')
        return

    procs = dict()
    try:
        procs['lzop'] = subprocess.Popen(['lzop', binlog, '--to-stdout'],
                                         stdout=subprocess.PIPE)
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['lzop'].stdout,
                                  bucket=bucket,
                                  key=s3_upload_path)
    except:
        log.debug('In exception handling for failed binlog upload')
        safe_uploader.kill_precursor_procs(procs)
        raise
    log_binlog_upload(instance, binlog)
def upload_binlog(instance, binlog, dry_run):
    """ Upload a binlog file to s3

    Args:
    instance - a hostAddr object
    binlog - the full path to the binlog file
    dry_run - if set, do not actually upload a binlog
    """
    s3_upload_path = s3_binlog_path(instance, binlog)
    log.info('Local file {local_file} will uploaded to {s3_upload_path}'
             ''.format(local_file=binlog, s3_upload_path=s3_upload_path))

    if dry_run:
        log.info('In dry_run mode, skipping compression and upload')
        return

    procs = dict()
    try:
        procs['lzop'] = subprocess.Popen(['lzop', binlog, '--to-stdout'],
                                         stdout=subprocess.PIPE)
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['lzop'].stdout,
                                  bucket=environment_specific.S3_BINLOG_BUCKET,
                                  key=s3_upload_path)
    except:
        log.debug('In exception handling for failed binlog upload')
        safe_uploader.kill_precursor_procs(procs)
        raise
    log_binlog_upload(instance, binlog)
Ejemplo n.º 3
0
def xtrabackup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(environment_specific.RAID_MOUNT,
                           'log', 'xtrabackup_{ts}.log'.format(
                            ts=time.strftime('%Y-%m-%d-%H:%M:%S', timestamp)))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        cmd = create_xtrabackup_command(instance, timestamp, tmp_log)
        log.info(' '.join(cmd + [' 2> ', tmp_log, ' | ']))
        procs['xtrabackup'] = subprocess.Popen(cmd,
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        procs['pv'] = create_pv_proc(procs['xtrabackup'].stdout)
        log.info('Uploading backup to s3://{buk}/{loc}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  stdin=procs['pv'].stdout,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Ejemplo n.º 4
0
def logical_backup_instance(instance, timestamp, initial_build):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename
    initial_build - Boolean, if this is being created right after the server
                    was built

    Returns:
    A string of the path to the finished backup
    """
    backup_file = create_backup_file_name(instance, timestamp,
                                          initial_build,
                                          BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port).split()

    procs = dict()
    try:
        log.info(' '.join(dump_cmd + ['|']))
        procs['mysqldump'] = subprocess.Popen(dump_cmd,
                                              stdout=subprocess.PIPE)
        procs['pv'] = create_pv_proc(procs['mysqldump'].stdout)
        log.info(' '.join(PIGZ + ['|']))
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['pv'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to s3://{buk}/{key}'
                 ''.format(buk=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                           key=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.BACKUP_BUCKET_UPLOAD_MAP[host_utils.get_iam_role()],
                                  key=backup_file)
        log.info('mysqldump was successful')
        return backup_file
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Ejemplo n.º 5
0
def xtrabackup_instance(instance, timestamp):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = BACKUP_FILE.format(hostname=instance.hostname,
                                     port=instance.port,
                                     timestamp=time.strftime(
                                         '%Y-%m-%d-%H:%M:%S', timestamp),
                                     backup_type=BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(
        environment_specific.RAID_MOUNT, 'log', ''.join([
            'xtrabackup_',
            time.strftime('%Y-%m-%d-%H:%M:%S', timestamp), '.log'
        ]))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        procs['xtrabackup'] = subprocess.Popen(create_xtrabackup_command(
            instance, timestamp, tmp_log),
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        log.info('Uploading backup to {buk}/{loc}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['xtrabackup'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Ejemplo n.º 6
0
def xtrabackup_instance(instance, timestamp):
    """ Take a compressed mysql backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    # Prevent issues with too many open files
    resource.setrlimit(resource.RLIMIT_NOFILE, (131072, 131072))
    backup_file = BACKUP_FILE.format(hostname=instance.hostname,
                                     port=instance.port,
                                     timestamp=time.strftime('%Y-%m-%d-%H:%M:%S', timestamp),
                                     backup_type=BACKUP_TYPE_XBSTREAM)

    tmp_log = os.path.join(environment_specific.RAID_MOUNT,
                           'log',
                           ''.join(['xtrabackup_',
                                    time.strftime('%Y-%m-%d-%H:%M:%S', timestamp),
                                    '.log']))
    tmp_log_handle = open(tmp_log, "w")
    procs = dict()
    try:
        procs['xtrabackup'] = subprocess.Popen(create_xtrabackup_command(instance, timestamp, tmp_log),
                                               stdout=subprocess.PIPE,
                                               stderr=tmp_log_handle)
        log.info('Uploading backup to {buk}/{loc}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           loc=backup_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['xtrabackup'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=backup_file,
                                  check_func=check_xtrabackup_log,
                                  check_arg=tmp_log)
        log.info('Xtrabackup was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Ejemplo n.º 7
0
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime('%Y-%m-%d-%H:%M:%S',
                                                           timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    (dump_user,
     dump_pass) = mysql_lib.get_mysql_user_for_role(USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)
    procs = dict()
    try:
        procs['mysqldump'] = subprocess.Popen(dump_cmd.split(),
                                              stdout=subprocess.PIPE)
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['mysqldump'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to {buk}/{key}'
                 ''.format(buk=environment_specific.S3_BUCKET,
                           key=dump_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=dump_file)
        log.info('mysqldump was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Ejemplo n.º 8
0
def logical_backup_instance(instance, timestamp):
    """ Take a compressed mysqldump backup

    Args:
    instance - A hostaddr instance
    timestamp - A timestamp which will be used to create the backup filename

    Returns:
    A string of the path to the finished backup
    """
    dump_file = BACKUP_FILE.format(hostname=instance.hostname,
                                   port=instance.port,
                                   timestamp=time.strftime(
                                       '%Y-%m-%d-%H:%M:%S', timestamp),
                                   backup_type=BACKUP_TYPE_LOGICAL)
    dump_user, dump_pass = mysql_lib.get_mysql_user_for_role(
        USER_ROLE_MYSQLDUMP)
    dump_cmd = MYSQLDUMP_CMD.format(dump_user=dump_user,
                                    dump_pass=dump_pass,
                                    host=instance.hostname,
                                    port=instance.port)
    procs = dict()
    try:
        procs['mysqldump'] = subprocess.Popen(dump_cmd.split(),
                                              stdout=subprocess.PIPE)
        procs['pigz'] = subprocess.Popen(PIGZ,
                                         stdin=procs['mysqldump'].stdout,
                                         stdout=subprocess.PIPE)
        log.info('Uploading backup to {buk}/{key}'
                 ''.format(buk=environment_specific.S3_BUCKET, key=dump_file))
        safe_uploader.safe_upload(precursor_procs=procs,
                                  stdin=procs['pigz'].stdout,
                                  bucket=environment_specific.S3_BUCKET,
                                  key=dump_file)
        log.info('mysqldump was successful')
    except:
        safe_uploader.kill_precursor_procs(procs)
        raise
Ejemplo n.º 9
0
    def mysql_backup_csv_table(self, db, table, tmp_dir_db, conn):
        """ Back up a single table of a single db

        Args:
        db - the db to be backed up
        table - the table to be backed up
        tmp_dir_db - temporary storage used for all tables in the db
        conn - a connection the the mysql instance
        """
        proc_id = multiprocessing.current_process().name
        (_, data_path, _) = environment_specific.get_csv_backup_paths(
                                self.datestamp, db, table,
                                self.instance.replica_type,
                                self.instance.get_zk_replica_set()[0])
        log.debug('{proc_id}: {db}.{table} dump to {path} started'
                  ''.format(proc_id=proc_id,
                            db=db,
                            table=table,
                            path=data_path))
        self.upload_schema(db, table, tmp_dir_db)
        fifo = os.path.join(tmp_dir_db, table)
        procs = dict()
        try:
            # giant try so we can try to clean things up in case of errors
            self.create_fifo(fifo)

            # Start creating processes
            procs['cat'] = subprocess.Popen(['cat', fifo],
                                            stdout=subprocess.PIPE)
            procs['nullescape'] = subprocess.Popen(['nullescape'],
                                                   stdin=procs['cat'].stdout,
                                                   stdout=subprocess.PIPE)
            procs['lzop'] = subprocess.Popen(['lzop'],
                                             stdin=procs['nullescape'].stdout,
                                             stdout=subprocess.PIPE)

            # Start dump query
            return_value = set()
            query_thread = threading.Thread(target=self.run_dump_query,
                                            args=(db, table, fifo,
                                                  conn, procs['cat'], return_value))
            query_thread.daemon = True
            query_thread.start()

            # And run the upload
            safe_uploader.safe_upload(precursor_procs=procs,
                                      stdin=procs['lzop'].stdout,
                                      bucket=self.upload_bucket,
                                      key=data_path,
                                      check_func=self.check_dump_success,
                                      check_arg=return_value)
            os.remove(fifo)
            log.debug('{proc_id}: {db}.{table} clean up complete'
                      ''.format(proc_id=proc_id,
                                db=db,
                                table=table))
        except:
            log.debug('{proc_id}: in exception handling for failed table upload'
                      ''.format(proc_id=proc_id))

            if os.path.exists(fifo):
                self.cleanup_fifo(fifo)

            safe_uploader.kill_precursor_procs(procs)
            raise
Ejemplo n.º 10
0
    def mysql_backup_csv_table(self, db, table, tmp_dir_db, conn):
        """ Back up a single table of a single db

        Args:
        db - the db to be backed up
        table - the table to be backed up
        tmp_dir_db - temporary storage used for all tables in the db
        conn - a connection the the mysql instance
        """
        proc_id = multiprocessing.current_process().name
        s3_upload_path = self.get_s3_backup_path(db, table)
        log.debug('{proc_id}: {db}.{table} dump to {path} started'
                  ''.format(proc_id=proc_id,
                            db=db,
                            table=table,
                            path=s3_upload_path))
        self.upload_schema(db, table, tmp_dir_db)
        fifo = os.path.join(tmp_dir_db, table)
        procs = dict()
        try:
            # giant try so we can try to clean things up in case of errors
            self.create_fifo(fifo)

            # Start creating processes
            procs['cat'] = subprocess.Popen(['cat', fifo],
                                            stdout=subprocess.PIPE)
            procs['nullescape'] = subprocess.Popen(['nullescape'],
                                                   stdin=procs['cat'].stdout,
                                                   stdout=subprocess.PIPE)
            procs['lzop'] = subprocess.Popen(['lzop'],
                                             stdin=procs['nullescape'].stdout,
                                             stdout=subprocess.PIPE)

            # Start dump query
            return_value = set()
            query_thread = threading.Thread(target=self.run_dump_query,
                                            args=(db, table, fifo, conn,
                                                  procs['cat'], return_value))
            query_thread.daemon = True
            query_thread.start()

            # And run the upload
            safe_uploader.safe_upload(
                precursor_procs=procs,
                stdin=procs['lzop'].stdout,
                bucket=environment_specific.S3_CSV_BUCKET,
                key=s3_upload_path,
                check_func=self.check_dump_success,
                check_arg=return_value)
            os.remove(fifo)
            log.debug('{proc_id}: {db}.{table} clean up complete'
                      ''.format(proc_id=proc_id, db=db, table=table))
        except:
            log.debug(
                '{proc_id}: in exception handling for failed table upload'
                ''.format(proc_id=proc_id))

            if os.path.exists(fifo):
                self.cleanup_fifo(fifo)

            safe_uploader.kill_precursor_procs(procs)

            raise