Ejemplo n.º 1
0
    def __backup_snapshotted_files_if_needed(self):
        """
        Look over the existing snapshots and, if some files require backing up,
        start backup for them.
        """
        assert not in_main_thread()

        max_birth = datetime.utcnow() - FILE_COOL_DOWN_TO_BACKUP

        with db.RDB() as rdbw:
            dirs_with_ugroups = \
                HostQueries.HostFiles.get_base_directories_with_ugroups(rdbw)

            dirs_with_ugroups = list(dirs_with_ugroups)  # to close rdbw

        for base_dir, ugroup in dirs_with_ugroups:
            logger.debug('Checking for non-backed data in %r for %r',
                         base_dir, ugroup)

            with db.RDB() as rdbw:
                need_backup, files = \
                    inonempty(HostQueries.HostFiles
                                         .get_files_for_backup_older_than(
                                              max_birth, ugroup.uuid, rdbw))

                    # files is an iterable over LocalPhysicalFileStateRel

                # eagerly evaluate to leave RDB context
                files = list(files)

            # Now we've received the flag whether some file exist which
            # need to be backed up; and also we've received the iterator
            # over the file names to be backed up, together with their
            # sizes and last change time.
            # The extra data (size, last change time) is needed so that we
            # backup only the specific version of the file, but won't
            # backup if it has been changed since (it will be backed up
            # in a different transaction).

            if not need_backup:
                logger.debug('No changes to be synced for %r', ugroup)
            else:
                logger.debug('On auto sync, creating new dataset and '
                                 'launching backup for %r',
                             ugroup)

                self.__backup_some_phys_files(base_dir, files, ugroup)
Ejemplo n.º 2
0
    def __backup_snapshotted_files_if_needed(self):
        """
        Look over the existing snapshots and, if some files require backing up,
        start backup for them.
        """
        assert not in_main_thread()

        max_birth = datetime.utcnow() - FILE_COOL_DOWN_TO_BACKUP

        with db.RDB() as rdbw:
            dirs_with_ugroups = \
                HostQueries.HostFiles.get_base_directories_with_ugroups(rdbw)

            dirs_with_ugroups = list(dirs_with_ugroups)  # to close rdbw

        for base_dir, ugroup in dirs_with_ugroups:
            logger.debug('Checking for non-backed data in %r for %r', base_dir,
                         ugroup)

            with db.RDB() as rdbw:
                need_backup, files = \
                    inonempty(HostQueries.HostFiles
                                         .get_files_for_backup_older_than(
                                              max_birth, ugroup.uuid, rdbw))

                # files is an iterable over LocalPhysicalFileStateRel

                # eagerly evaluate to leave RDB context
                files = list(files)

            # Now we've received the flag whether some file exist which
            # need to be backed up; and also we've received the iterator
            # over the file names to be backed up, together with their
            # sizes and last change time.
            # The extra data (size, last change time) is needed so that we
            # backup only the specific version of the file, but won't
            # backup if it has been changed since (it will be backed up
            # in a different transaction).

            if not need_backup:
                logger.debug('No changes to be synced for %r', ugroup)
            else:
                logger.debug(
                    'On auto sync, creating new dataset and '
                    'launching backup for %r', ugroup)

                self.__backup_some_phys_files(base_dir, files, ugroup)
Ejemplo n.º 3
0
Archivo: cli.py Proyecto: shvar/redfs
        def on_reactor_start(app):
            """
            @type app: UHostApp
            """

            @exceptions_logged(logger)
            @contract_epydoc
            def on_backup_completed(backup_succeeded):
                """
                @param backup_succeeded: whether the backup attempt
                    has succeeded in overall.
                @type backup_succeeded: bool
                """
                if backup_succeeded:
                    print('Backup completed successfully!')
                else:
                    print('The node disallowed the backup.')

                if (stay_alive_on_success if backup_succeeded
                                          else stay_alive_on_failure):
                    print("Stayin' alive. Stayin' alive.")
                else:
                    app.terminate_host()


            with db.RDB() as rdbw:
                all_datasets = Queries.Datasets.get_just_datasets(my_uuid,
                                                                  rdbw)

            incomplete_datasets_exist, incomplete_datasets = \
                inonempty(ds for ds in all_datasets if not ds.completed)

            if not incomplete_datasets_exist:
                # No incomplete datasets to backup
                on_backup_completed(False)
            else:
                # Start the backup of the first dataset in the sequence.
                incomplete_dataset_to_start = incomplete_datasets.next()
                app.auto_start_backup = False
                app.start_backup(incomplete_dataset_to_start.uuid,
                                 on_backup_completed)
Ejemplo n.º 4
0
Archivo: cli.py Proyecto: shvar/redfs
        def on_reactor_start(app):
            """
            @type app: UHostApp
            """
            @exceptions_logged(logger)
            @contract_epydoc
            def on_backup_completed(backup_succeeded):
                """
                @param backup_succeeded: whether the backup attempt
                    has succeeded in overall.
                @type backup_succeeded: bool
                """
                if backup_succeeded:
                    print('Backup completed successfully!')
                else:
                    print('The node disallowed the backup.')

                if (stay_alive_on_success
                        if backup_succeeded else stay_alive_on_failure):
                    print("Stayin' alive. Stayin' alive.")
                else:
                    app.terminate_host()

            with db.RDB() as rdbw:
                all_datasets = Queries.Datasets.get_just_datasets(
                    my_uuid, rdbw)

            incomplete_datasets_exist, incomplete_datasets = \
                inonempty(ds for ds in all_datasets if not ds.completed)

            if not incomplete_datasets_exist:
                # No incomplete datasets to backup
                on_backup_completed(False)
            else:
                # Start the backup of the first dataset in the sequence.
                incomplete_dataset_to_start = incomplete_datasets.next()
                app.auto_start_backup = False
                app.start_backup(incomplete_dataset_to_start.uuid,
                                 on_backup_completed)
Ejemplo n.º 5
0
    def __restore_datasets_to_host(self, me, host, ds_uuids):
        """
        Launch RESTORE transaction(s) to restore several datasets
        (with dataset uuids in C{ds_uuids}) to the host C{host}.

        @param me: my node
        @type me: Node

        @type host: Host
        @type ds_uuids: col.Iterable
        """
        tr_manager = self.__app.tr_manager
        ds_uuids_present, ds_uuids_asked_to_restore = inonempty(ds_uuids)

        if not ds_uuids_present:
            logger.debug('Actually, nothing to restore to %r', host)
        else:
            sync_ds_uuids_for_this_host = \
                {state.ds_uuid
                    for state in tr_manager.get_tr_states(class_name='RESTORE',
                                                          dst_uuid=host.uuid)
                    if state.is_sync}

            if sync_ds_uuids_for_this_host:
                logger.debug('Restoring something to %r, while '
                                 'the following RESTORE transactions are '
                                 'already syncing to it: %r',
                             host, sync_ds_uuids_for_this_host)

            # Let's evaluate the sequence to be able to multiply reiterate it,
            # as well as count its length.
            ds_uuids_asked_to_restore = frozenset(ds_uuids_asked_to_restore)
            assert ds_uuids_asked_to_restore, repr(ds_uuids_asked_to_restore)

            ds_uuids_to_restore = \
                ds_uuids_asked_to_restore - sync_ds_uuids_for_this_host
            logger.verbose('While asked to restore %i dataset(s) (%r), '
                               'will in fact restore %i one(s) (%r)',
                           len(ds_uuids_asked_to_restore),
                           ds_uuids_asked_to_restore,
                           len(ds_uuids_to_restore),
                           ds_uuids_to_restore)

            # # If we are syncing a sole dataset, let's sync it;
            # # if there are multiple ones, let's merge them.
            if len(ds_uuids_to_restore) == 1:
                ds_uuids_will_restore = ds_uuids_to_restore
            else:
                with db.RDB() as rdbw:
                    ds_uuids_will_restore = \
                        TrustedQueries.TrustedDatasets.merge_sync_datasets(
                            host.uuid, ds_uuids_to_restore, rdbw)
                    # To get it outside RDB wrapper,...
                    ds_uuids_will_restore = list(ds_uuids_will_restore)
                logger.debug('Merged DS UUIDs: %r', ds_uuids_will_restore)

            logger.debug('Will in fact restore these datasets: %r',
                         ds_uuids_will_restore)
            for ds_uuid in ds_uuids_will_restore:
                logger.debug('Restoring files from %s to %r', ds_uuid, host)

                r_tr = tr_manager.create_new_transaction(
                           name='RESTORE',
                           src=me,
                           dst=host,
                           parent=None,
                           # RESTORE-specific
                           ds_uuid=ds_uuid,
                           # None means "all files"
                           file_paths_for_basedirs=None,
                           wr_uuid=None)