コード例 #1
0
def start_backup(storage, node_backup, cassandra, differential_mode,
                 stagger_time, start, mode, enable_md5_checks_flag,
                 backup_name, config, monitoring):
    try:
        # Make sure that priority remains to Cassandra/limiting backups resource usage
        throttle_backup()
    except Exception:
        logging.warning(
            "Throttling backup impossible. It's probable that ionice is not available."
        )

    logging.info('Saving tokenmap and schema')
    schema, tokenmap = get_schema_and_tokenmap(cassandra)

    node_backup.schema = schema
    node_backup.tokenmap = json.dumps(tokenmap)
    if differential_mode is True:
        node_backup.differential = mode
    add_backup_start_to_index(storage, node_backup)

    if stagger_time:
        stagger_end = start + stagger_time
        logging.info(
            'Staggering backup run, trying until {}'.format(stagger_end))
        while not stagger(config.storage.fqdn, storage, tokenmap):
            if datetime.datetime.now() < stagger_end:
                logging.info('Staggering this backup run...')
                time.sleep(60)
            else:
                raise IOError('Backups on previous nodes did not complete'
                              ' within our stagger time.')

    # Perform the actual backup
    actual_start = datetime.datetime.now()
    enable_md5 = enable_md5_checks_flag or medusa.utils.evaluate_boolean(
        config.checks.enable_md5_checks)
    num_files, node_backup_cache = do_backup(cassandra, node_backup, storage,
                                             differential_mode, enable_md5,
                                             config, backup_name)
    end = datetime.datetime.now()
    actual_backup_duration = end - actual_start

    print_backup_stats(actual_backup_duration, actual_start, end, node_backup,
                       node_backup_cache, num_files, start)
    update_monitoring(actual_backup_duration, backup_name, monitoring,
                      node_backup)
    return {
        "actual_backup_duration": actual_backup_duration,
        "actual_start_time": actual_start,
        "end_time": end,
        "node_backup": node_backup,
        "node_backup_cache": node_backup_cache,
        "num_files": num_files,
        "start_time": start,
        "backup_name": backup_name
    }
コード例 #2
0
def main(config, backup_name_arg, stagger_time, mode):
    start = datetime.datetime.now()
    backup_name = backup_name_arg or start.strftime('%Y%m%d%H')
    monitoring = Monitoring(config=config.monitoring)

    try:
        storage = Storage(config=config.storage)
        cassandra = Cassandra(config)

        differential_mode = False
        if mode == "differential":
            differential_mode = True

        node_backup = storage.get_node_backup(
            fqdn=config.storage.fqdn,
            name=backup_name,
            differential_mode=differential_mode
        )

        if node_backup.exists():
            raise IOError('Error: Backup {} already exists'.format(backup_name))

        # Make sure that priority remains to Cassandra/limiting backups resource usage
        try:
            throttle_backup()
        except Exception:
            logging.warning("Throttling backup impossible. It's probable that ionice is not available.")

        logging.info('Saving tokenmap and schema')
        schema, tokenmap = get_schema_and_tokenmap(cassandra)

        node_backup.schema = schema
        node_backup.tokenmap = json.dumps(tokenmap)
        if differential_mode is True:
            node_backup.differential = mode
        add_backup_start_to_index(storage, node_backup)

        if stagger_time:
            stagger_end = start + stagger_time
            logging.info('Staggering backup run, trying until {}'.format(stagger_end))
            while not stagger(config.storage.fqdn, storage, tokenmap):
                if datetime.datetime.now() < stagger_end:
                    logging.info('Staggering this backup run...')
                    time.sleep(60)
                else:
                    raise IOError('Backups on previous nodes did not complete'
                                  ' within our stagger time.')

        actual_start = datetime.datetime.now()

        num_files, node_backup_cache = do_backup(
            cassandra, node_backup, storage, differential_mode, config, backup_name)

        end = datetime.datetime.now()
        actual_backup_duration = end - actual_start

        print_backup_stats(actual_backup_duration, actual_start, end, node_backup, node_backup_cache, num_files, start)

        update_monitoring(actual_backup_duration, backup_name, monitoring, node_backup)
        return (actual_backup_duration, actual_start, end, node_backup, node_backup_cache, num_files, start)

    except Exception as e:
        tags = ['medusa-node-backup', 'backup-error', backup_name]
        monitoring.send(tags, 1)
        medusa.utils.handle_exception(
            e,
            "This error happened during the backup: {}".format(str(e)),
            config
        )