def main(argv=None):
    if argv is None:
        argv = sys.argv
    if (len(argv) > 5) or (len(argv) < 4):
        print "Usage: " + argv[
            0] + " <config file> <domain> <v_node> [YYYY_MM_DD]"
        print "The config file is the same format as used for backups, backup dir, snapshot name and swift credentials are used"
        print 'The domain is the domain to be restored from swift and the v_node is the vertica node name to restore data for'
        print 'If the year/month/day is specified the most recent backup on that day will be downloaded rather than prompting'
        return 1

    config_file = argv[1]
    domain = argv[2]
    v_node_name = argv[3]
    if len(argv) == 5:
        day = argv[4]
    else:
        day = None
    config = yaml.load(open(config_file, 'r'))

    # Setup logging
    logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
    log = logging.getLogger(__name__)

    with LogTime(log.info, "Restore download completed"):

        # Setup swift/paths
        base_dir, prefix_dir = calculate_paths(config, v_node_name)
        swift_store = SwiftStore(config['swift_key'],
                                 config['swift_region'],
                                 config['swift_tenant'],
                                 config['swift_url'],
                                 config['swift_user'],
                                 prefix_dir,
                                 domain=domain,
                                 vnode=v_node_name)
        fs_store = FSStore(base_dir, prefix_dir)

        # Get the metadata from the last restore (if any)
        current_metadata = DirectoryMetadata(fs_store)

        # Grab the swift metadata we want to restore
        if day is None:
            pickle = choose_one(swift_store.list_pickles(),
                                "Please choose a pickle to restore from")
        else:
            # Since the list is sorted this will find the newest that matches the given day, or None otherwise
            pickle = None
            for option in swift_store.list_pickles():
                if option.startswith(day):
                    pickle = option

        if pickle is None:
            log.error('No backups found in swift.')
            sys.exit(1)
        swift_metadata = DirectoryMetadata.load_pickle(swift_store, pickle)

        # Compare the files in the current restore and swift and download/delete as necessary
        with LogTime(log.debug, "Diff completed", seconds=True):
            to_download, to_del = swift_metadata.diff(current_metadata)

        size_downloaded = 0
        with LogTime(log.info, "Download Completed"):
            for relative_path in to_download:
                size_downloaded += swift_store.download(
                    relative_path, base_dir)
        log.info("\tDownloaded %s in %d items" %
                 (sizeof_fmt(size_downloaded), len(to_download)))

        with LogTime(log.info, "Deleted %d items" % len(to_del)):
            for relative_path in to_del:
                fs_store.delete(relative_path)

        EpochFiles(os.path.join(base_dir, prefix_dir), config['snapshot_name'],
                   swift_metadata.date).restore()

        # Save the swift metadata to the local fs, to indicate the restore is done
        swift_metadata.save(fs_store)

    delete_pickles(fs_store)
Ejemplo n.º 2
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    if len(argv) != 2:
        print "Usage: " + argv[0] + " <config file> "
        return 1

    config_file = argv[1]
    config = yaml.load(open(config_file, 'r'))

    # Setup logging
    log_path = os.path.join(config['log_dir'], 'backup_' + datetime.today().strftime('%A') + '.log')
    logging.basicConfig(format='%(asctime)s %(message)s', filename=log_path, level=logging.INFO)

    # log_time is not used here so the timing can be reported to nagios
    start = time.time()
    exit_status = 0

    # Run the vbr backup command - The vbr run is quite fast typically completing in less than a minute
    if config['run_vbr']:
        run_vbr(config)  # If this fails it will sys.exit with an appropriately bad nagios error

    try:
        base_dir, prefix_dir = calculate_paths(config)
        swift_store = SwiftStore(config['swift_key'], config['swift_region'], config['swift_tenant'],
                                 config['swift_url'], config['swift_user'], prefix_dir, config['auth_version'])
        fs_store = FSStore(base_dir, prefix_dir)
        upload_time = datetime.today()

        epoch_files = EpochFiles(os.path.join(base_dir, prefix_dir), config['snapshot_name'], upload_time)
        epoch_files.archive()

        # Grab the local and swift metadata
        current_metadata = DirectoryMetadata(fs_store, upload_time)
        current_metadata.save(fs_store)
        swift_metadata = DirectoryMetadata(swift_store)

        # Compare the files in the current backup and swift and upload as necessary, then delete as necessary
        with LogTime(log.debug, "Diff operation completed", seconds=True):
            to_add, do_not_del = current_metadata.diff(swift_metadata)

        size_uploaded = 0
        with LogTime(log.info, "Uploaded Completed"):
            for relative_path in to_add:
                size_uploaded += swift_store.upload(relative_path, base_dir)
        log.info("\tUploaded %s in %d items" % (sizeof_fmt(size_uploaded), len(to_add)))

        with LogTime(log.info, "Determining items to delete, retaining %d backups" % config['retain']):
            # Grab the pickle names I want to combine, relying on these being in order by date, newest first
            pickles = swift_store.list_pickles()
            combine_pickles = pickles[:config['retain']]

            # Take metadata in all these pickles combine.
            # It would be good to check that there is no overlap in filenames with different content.
            combined_metadata = DirectoryMetadata()
            for pickle in combine_pickles:
                pickle_metadata = DirectoryMetadata.load_pickle(swift_store, pickle)
                combined_metadata.metadata.update(pickle_metadata.metadata)

            # Do a diff with all that is in swift, anything in swift but not in the combined set can be deleted.
            should_be_empty, to_del = combined_metadata.diff(swift_metadata)
            if len(should_be_empty) != 0:
                exit_status = 1
                log.error(
                    "ERROR: Found files in the %d combined retained backups that were not in swift.\n%s"
                    % (config['retain'], should_be_empty)
                )

        with LogTime(log.info, "Deleted %d items" % len(to_del)):
            for relative_path in to_del:
                swift_store.delete(relative_path)

        # Upload today's metadata pickle, this is done last so its presence an indication the backup is done.
        current_metadata.save(swift_store)

        #Clean up old pickles
        delete_pickles(fs_store)
        delete_pickles(swift_store, config['retain'])

    except:
        log.exception('Unhandled Exception in Backup upload')
        # Move the Epoch files back to their original names so a retry run does not encounter issues with them
        epoch_files.restore()
        exit_status = 1

    # Status message and exit
    stop = time.time()
    duration = (stop - start) / 60
    duration_msg = "Backup completed in %d minutes total. Thresholds, warn %d.|%d" % \
                   (duration, config['warning'], duration)
    log.info(duration_msg)

    nagios_exit(exit_status, duration_msg, duration, config['warning'])
Ejemplo n.º 3
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    if len(argv) != 2:
        print "Usage: " + argv[0] + " <config file> "
        return 1

    requests.packages.urllib3.disable_warnings()
    config_file = argv[1]
    config = yaml.load(open(config_file, 'r'))

    # Setup logging
    log_path = os.path.join(
        config['log_dir'],
        'backup_' + datetime.today().strftime('%A') + '.log')
    logging.basicConfig(format='%(asctime)s %(message)s',
                        filename=log_path,
                        level=logging.INFO)

    # log_time is not used here so the timing can be reported to nagios
    start = time.time()
    exit_status = 0
    epoch_files = None

    # Run the vbr backup command - The vbr run is quite fast typically completing in less than a minute
    if config['run_vbr']:
        run_vbr(config, 'init')
        run_vbr(config, 'backup')

    try:
        catalog_dir = config['catalog_dir']
        base_dir = config['backup_dir']
        prefix_dir = ''
        swift_store = SwiftStore(config['swift_key'], config['swift_region'],
                                 config['swift_tenant'], config['swift_url'],
                                 config['swift_user'], prefix_dir)
        fs_store = FSStore(base_dir, prefix_dir)
        upload_time = datetime.today()

        epoch_files = EpochFiles(os.path.join(base_dir,
                                              prefix_dir), catalog_dir,
                                 config['snapshot_name'], upload_time)
        epoch_files.archive()

        # Grab the local and swift metadata
        current_metadata = DirectoryMetadata(fs_store, upload_time)
        current_metadata.save(fs_store)
        swift_metadata = DirectoryMetadata(swift_store)

        # Compare the files in the current backup and swift and upload as necessary, then delete as necessary
        with LogTime(log.debug, "Diff operation completed", seconds=True):
            to_add, do_not_del = current_metadata.diff(swift_metadata)

        size_uploaded = 0
        with LogTime(log.info, "Uploaded Completed"):
            for relative_path in to_add:
                size_uploaded += swift_store.upload(relative_path, base_dir)
        log.info("\tUploaded %s in %d items" %
                 (sizeof_fmt(size_uploaded), len(to_add)))

        with LogTime(
                log.info, "Determining items to delete, retaining %d backups" %
                config['retain']):
            # Grab the pickle names I want to combine, relying on these being in order by date, newest first
            pickles = swift_store.list_pickles()
            combine_pickles = pickles[:config['retain']]

            # Take metadata in all these pickles combine.
            # It would be good to check that there is no overlap in filenames with different content.
            combined_metadata = DirectoryMetadata()
            for pickle in combine_pickles:
                pickle_metadata = DirectoryMetadata.load_pickle(
                    swift_store, pickle)
                combined_metadata.metadata.update(pickle_metadata.metadata)

            # Do a diff with all that is in swift, anything in swift but not in the combined set can be deleted.
            should_be_empty, to_del = combined_metadata.diff(swift_metadata)
            if len(should_be_empty) != 0:
                exit_status = 1
                log.error(
                    "ERROR: Found files in the %d combined retained backups that were not in swift.\n%s"
                    % (config['retain'], should_be_empty))

        with LogTime(log.info, "Deleted %d items" % len(to_del)):
            for relative_path in to_del:
                swift_store.delete(relative_path)

        # Upload today's metadata pickle, this is done last so its presence an indication the backup is done.
        current_metadata.save(swift_store)

        #Clean up old pickles
        delete_pickles(fs_store)
        delete_pickles(swift_store, config['retain'])

    except Exception:
        log.exception('Unhandled Exception in Backup upload')
        # Move the Epoch files back to their original names so a retry run does not encounter issues with them
        if epoch_files is not None:
            epoch_files.restore()
        exit_status = 1

    # Status message and exit
    stop = time.time()
    duration = (stop - start) / 60
    duration_msg = "Backup completed in %d minutes total. Thresholds, warn %d.|%d" % \
                   (duration, config['warning'], duration)
    log.info(duration_msg)

    nagios_exit(exit_status, duration_msg, duration, config['warning'])
def main(argv=None):
    if argv is None:
        argv = sys.argv
    if (len(argv) > 6) or (len(argv) < 5):
        print "Usage: " + argv[0] + " <config file> <domain> <v_node> <hostname> [YYYY_MM_DD]"
        print "The config file is the same format as used for backups, backup dir, snapshot name and swift credentials are used"
        print 'The domain is the domain to be restored from swift and the v_node is the vertica node name to restore data for'
        print 'If the year/month/day is specified the most recent backup on that day will be downloaded rather than prompting'
        return 1

    config_file = argv[1]
    domain = argv[2]
    v_node_name = argv[3]
    hostname = argv[4]
    if len(argv) == 6:
        day = argv[5]
    else:
        day = None
    config = yaml.load(open(config_file, 'r'))

    # Setup logging
    logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
    log = logging.getLogger(__name__)

    with LogTime(log.info, "Restore download completed"):

        # Setup swift/paths
        base_dir, prefix_dir = calculate_paths(config, v_node_name)
        swift_store = SwiftStore(config['swift_key'], config['swift_region'], config['swift_tenant'],
                                 config['swift_url'], config['swift_user'], prefix_dir, domain=domain,
                                 vnode=v_node_name, hostname=hostname)
        fs_store = FSStore(base_dir, prefix_dir)

        # Get the metadata from the last restore (if any)
        current_metadata = DirectoryMetadata(fs_store)

        # Grab the swift metadata we want to restore
        if day is None:
            pickle = choose_one(swift_store.list_pickles(), "Please choose a pickle to restore from")
        else:
            # Since the list is sorted this will find the newest that matches the given day, or None otherwise
            pickle = None
            for option in swift_store.list_pickles():
                if option.startswith(day):
                    pickle = option

        if pickle is None:
            log.error('No backups found in swift.')
            sys.exit(1)
        swift_metadata = DirectoryMetadata.load_pickle(swift_store, pickle)

        # Compare the files in the current restore and swift and download/delete as necessary
        with LogTime(log.debug, "Diff completed", seconds=True):
            to_download, to_del = swift_metadata.diff(current_metadata)

        size_downloaded = 0
        with LogTime(log.info, "Download Completed"):
            for relative_path in to_download:
                size_downloaded += swift_store.download(relative_path, base_dir)
        log.info("\tDownloaded %s in %d items" % (sizeof_fmt(size_downloaded), len(to_download)))

        with LogTime(log.info, "Deleted %d items" % len(to_del)):
            for relative_path in to_del:
                fs_store.delete(relative_path)

        EpochFiles(os.path.join(base_dir, prefix_dir), config['catalog_dir'], config['snapshot_name'], swift_metadata.date).restore()

        # Save the swift metadata to the local fs, to indicate the restore is done
        swift_metadata.save(fs_store)

    delete_pickles(fs_store)