コード例 #1
0
ファイル: cli.py プロジェクト: shvar/redfs
def select_files(arguments):
    """
    Generate digest for the username.
    """
    if len(arguments) < 3:
        cli_error('The host UUID, the backup name, and at least one directory '
                  'with the files should be passed!')
    else:
        my_uuid, ds_name = (try_parse_uuid(arguments.popleft()),
                            arguments.popleft())

        proceed_with_host_uuid_cli(my_uuid)

        paths = __get_all_remaining_args(arguments)

        if not paths:
            cli_error('No paths passed!')
        else:
            host_app = UHostApp(my_uuid, uhost_settings.detect_edition(),
                                __create_chunk_storage())
            ugroup_uuid = host_app.host.user.base_group.uuid
            path_map = {
                k: {
                    'f+': ['all'],
                    'f-': [],
                    'stat': safe_stat(k)
                }
                for k in paths
            }

            host_app.select_paths_for_backup(ds_name=ds_name,
                                             ds_uuid=gen_uuid(),
                                             ugroup_uuid=ugroup_uuid,
                                             sync=False,
                                             paths_map=path_map)
コード例 #2
0
ファイル: cli.py プロジェクト: shvar/redfs
def select_files(arguments):
    """
    Generate digest for the username.
    """
    if len(arguments) < 3:
        cli_error('The host UUID, the backup name, and at least one directory '
                      'with the files should be passed!')
    else:
        my_uuid, ds_name = (try_parse_uuid(arguments.popleft()),
                            arguments.popleft())

        proceed_with_host_uuid_cli(my_uuid)

        paths = __get_all_remaining_args(arguments)

        if not paths:
            cli_error('No paths passed!')
        else:
            host_app = UHostApp(my_uuid,
                                uhost_settings.detect_edition(),
                                __create_chunk_storage())
            ugroup_uuid = host_app.host.user.base_group.uuid
            path_map = {k: {'f+': ['all'], 'f-': [], 'stat': safe_stat(k)}
                            for k in paths}

            host_app.select_paths_for_backup(ds_name=ds_name,
                                             ds_uuid=gen_uuid(),
                                             ugroup_uuid=ugroup_uuid,
                                             sync=False,
                                             paths_map=path_map)
コード例 #3
0
ファイル: uhost_process.py プロジェクト: shvar/redfs
    def __backup_some_phys_files(self, base_dir, files, ugroup,
                                 __do_start_backup=True):
        r"""Given some files, create a new dataset and start to backup them.

        >>> # ugroup = UserGroup(
        >>> #     uuid=UserGroupUUID('00000000-bbbb-0000-0000-000000000001'),
        >>> #     name='AlphA',
        >>> #     private=True,
        >>> #     enc_key='\x01\xe6\x13\xdab)\xd2n\xd6\xafTH\x03h\x02\x12'
        >>> #             '\x17D\x1a\xeb\x8b6\xc0\x9b\xa6\x7f\xcc\x06N\xcf'
        >>> #             '\x8b\xcd'
        >>> # )

        >>> # __backup_some_phys_files(
        >>> #     base_dir='u'/home/john/FreeBrie',
        >>> #     files=[
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f1.mp3',
        >>> #             size=13829879,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,904430)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f2.avi',
        >>> #             size=3522710,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,988433)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b',
        >>> #             rel_file=u'bbb',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 42 19808)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b/c',
        >>> #             rel_file=u'ccc',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 41 979807))
        >>> #     ],
        >>> #     ugroup=ugroup)

        @todo: complete the unit test, which is half-done!

        @param base_dir: the directory being backed up.
        @type base_dir: basestring

        @param files: the iterable over the files which should be backed up.
            Contains C{LocalPhysicalFileStateRel} objects.
            The caller should ensure that C{files} is non-empty!
        @type files: col.Iterable

        @type ugroup: UserGroup

        @return: the created dataset (if succeeded).
        @rtype: DatasetOnPhysicalFiles, NoneType
        """
        logger.debug('__backup_some_phys_files(%r, %r)',
                     base_dir, ugroup)

        # Group files by rel_dir; then ignore base_dir,
        # keep only rel_dir, rel_file, size and time_changed
        files_grouped_by_rel_dir = \
            ((RelVirtualFile(rel_dir=f.rel_dir,
                             filename=f.rel_file,
                             # If we can read real stat, read it;
                             # otherwise we'll emulate it with fake_stat
                             stat=coalesce(os_ex.safe_stat(  # real stat
                                               os.path.join(base_dir,
                                                            f.rel_path)),
                                           os_ex.fake_stat(  # deleted file
                                               st_mode=None,
                                               atime=f.time_changed,
                                               mtime=f.time_changed,
                                               ctime=f.time_changed,
                                               size=None)),
                             stat_getter=lambda f=f:
                                             os_ex.safe_stat(
                                                 os.path.join(base_dir,
                                                              f.rel_path)),
                             file_getter=lambda f=f:
                                             open(os.path.join(base_dir,
                                                               f.rel_path),
                                                  'rb'))
                 for f in per_rel_dir)
                     for rel_dir, per_rel_dir
                         in sorted_groupby(files, attrgetter('rel_dir')))

        # Example:
        # files_grouped_by_rel_dir = [
        #     [
        #         RelVirtualFile(...),
        #         RelVirtualFile(...),
        #         RelVirtualFile(...)
        #     ],
        #     [
        #         RelVirtualFile(...),
        #     [
        #         RelVirtualFile(...)
        #     ]
        # ]
        _path_map = {base_dir: {'ifiles': files_grouped_by_rel_dir,
                                'stat': os_ex.safe_stat(base_dir)}}

        ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
        ds = self.select_paths_for_backup(ds_name='',
                                          ds_uuid=ds_uuid,
                                          ugroup_uuid=ugroup.uuid,
                                          sync=True,
                                          paths_map=_path_map)
        if ds is not None and __do_start_backup:
            self.start_backup(ds_uuid)

        return ds
コード例 #4
0
ファイル: uhost_process.py プロジェクト: shvar/redfs
    def __store_fs_change_after_cooling(self,
                                        base_dir, base_dir_id, file_path):
        """
        After a file operation has been "cooled down" (i.e. no more operation
        occured for the same file during some period of time), write it
        to the DB.
        """
        assert not in_main_thread()

        logger.verbose('Cooled down the file %r, storing it', file_path)

        with self.__cooling_down_to_store_lock:
            _call_to_store_fs_change = \
                self.__cooling_down_to_store.get(file_path)

            # There may be a tiniest period between a moment when
            # the callLater already fired, and when it was removed by,
            # say, __heat_fs_path().
            # So even though this callLater fired, at this point
            # _call_to_store_fs_change might be None.

            if _call_to_store_fs_change is not None:
                try:
                    del self.__cooling_down_to_store[file_path]
                except KeyError:
                    # Funny, how comes?
                    logger.warning('Inside the lock, the key %r disappeared '
                                       'during storing',
                                   file_path)

        # TODO: frankly, C{_call_to_store_fs_change is None}
        # would mean that this state has moved away or something, that is,
        # it should not be stored to the DB.
        # But this needs further investigation, and for now, let's just
        # store it in the DB.

        # Get the up-to-date file state
        _st = os_ex.safe_stat(file_path)
        if _st is not None:
            # Path is present
            file_size = _st.st_size
            file_mtime = os_ex.stat_get_mtime(_st)
        else:
            # Path is deleted
            file_size = None
            file_mtime = datetime.utcnow()

        # This path is ok to snapshot
        rel_dir = relpath_nodot(os.path.dirname(file_path), base_dir)

        # TODO: ticket:141 - LocalPhysicalFileStateRel must be able
        # to support directories; but for now, just don't write
        # the directories.
        # But we still support the deleted paths (_st = None)!
        if _st is None or not os_ex.stat_isdir(_st):
            _state = LocalPhysicalFileStateRel(
                         rel_dir=rel_dir,
                         rel_file=os.path.basename(file_path),
                         size=file_size,
                         time_changed=file_mtime,
                         isdir=False)

            # ... and do you think we write it to the DB right now?
            # Sorry no, we'll just put it to a regularly-dumped buffer.
            with self.__file_states_ready_to_write_lock:
                # Be sure to copy, to prevent any alterations of the state!
                self.__file_states_ready_to_write[file_path] = (base_dir_id,
                                                                _state)
コード例 #5
0
    def __backup_some_phys_files(self,
                                 base_dir,
                                 files,
                                 ugroup,
                                 __do_start_backup=True):
        r"""Given some files, create a new dataset and start to backup them.

        >>> # ugroup = UserGroup(
        >>> #     uuid=UserGroupUUID('00000000-bbbb-0000-0000-000000000001'),
        >>> #     name='AlphA',
        >>> #     private=True,
        >>> #     enc_key='\x01\xe6\x13\xdab)\xd2n\xd6\xafTH\x03h\x02\x12'
        >>> #             '\x17D\x1a\xeb\x8b6\xc0\x9b\xa6\x7f\xcc\x06N\xcf'
        >>> #             '\x8b\xcd'
        >>> # )

        >>> # __backup_some_phys_files(
        >>> #     base_dir='u'/home/john/FreeBrie',
        >>> #     files=[
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f1.mp3',
        >>> #             size=13829879,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,904430)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f2.avi',
        >>> #             size=3522710,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,988433)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b',
        >>> #             rel_file=u'bbb',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 42 19808)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b/c',
        >>> #             rel_file=u'ccc',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 41 979807))
        >>> #     ],
        >>> #     ugroup=ugroup)

        @todo: complete the unit test, which is half-done!

        @param base_dir: the directory being backed up.
        @type base_dir: basestring

        @param files: the iterable over the files which should be backed up.
            Contains C{LocalPhysicalFileStateRel} objects.
            The caller should ensure that C{files} is non-empty!
        @type files: col.Iterable

        @type ugroup: UserGroup

        @return: the created dataset (if succeeded).
        @rtype: DatasetOnPhysicalFiles, NoneType
        """
        logger.debug('__backup_some_phys_files(%r, %r)', base_dir, ugroup)

        # Group files by rel_dir; then ignore base_dir,
        # keep only rel_dir, rel_file, size and time_changed
        files_grouped_by_rel_dir = \
            ((RelVirtualFile(rel_dir=f.rel_dir,
                             filename=f.rel_file,
                             # If we can read real stat, read it;
                             # otherwise we'll emulate it with fake_stat
                             stat=coalesce(os_ex.safe_stat(  # real stat
                                               os.path.join(base_dir,
                                                            f.rel_path)),
                                           os_ex.fake_stat(  # deleted file
                                               st_mode=None,
                                               atime=f.time_changed,
                                               mtime=f.time_changed,
                                               ctime=f.time_changed,
                                               size=None)),
                             stat_getter=lambda f=f:
                                             os_ex.safe_stat(
                                                 os.path.join(base_dir,
                                                              f.rel_path)),
                             file_getter=lambda f=f:
                                             open(os.path.join(base_dir,
                                                               f.rel_path),
                                                  'rb'))
                 for f in per_rel_dir)
                     for rel_dir, per_rel_dir
                         in sorted_groupby(files, attrgetter('rel_dir')))

        # Example:
        # files_grouped_by_rel_dir = [
        #     [
        #         RelVirtualFile(...),
        #         RelVirtualFile(...),
        #         RelVirtualFile(...)
        #     ],
        #     [
        #         RelVirtualFile(...),
        #     [
        #         RelVirtualFile(...)
        #     ]
        # ]
        _path_map = {
            base_dir: {
                'ifiles': files_grouped_by_rel_dir,
                'stat': os_ex.safe_stat(base_dir)
            }
        }

        ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
        ds = self.select_paths_for_backup(ds_name='',
                                          ds_uuid=ds_uuid,
                                          ugroup_uuid=ugroup.uuid,
                                          sync=True,
                                          paths_map=_path_map)
        if ds is not None and __do_start_backup:
            self.start_backup(ds_uuid)

        return ds
コード例 #6
0
    def __store_fs_change_after_cooling(self, base_dir, base_dir_id,
                                        file_path):
        """
        After a file operation has been "cooled down" (i.e. no more operation
        occured for the same file during some period of time), write it
        to the DB.
        """
        assert not in_main_thread()

        logger.verbose('Cooled down the file %r, storing it', file_path)

        with self.__cooling_down_to_store_lock:
            _call_to_store_fs_change = \
                self.__cooling_down_to_store.get(file_path)

            # There may be a tiniest period between a moment when
            # the callLater already fired, and when it was removed by,
            # say, __heat_fs_path().
            # So even though this callLater fired, at this point
            # _call_to_store_fs_change might be None.

            if _call_to_store_fs_change is not None:
                try:
                    del self.__cooling_down_to_store[file_path]
                except KeyError:
                    # Funny, how comes?
                    logger.warning(
                        'Inside the lock, the key %r disappeared '
                        'during storing', file_path)

        # TODO: frankly, C{_call_to_store_fs_change is None}
        # would mean that this state has moved away or something, that is,
        # it should not be stored to the DB.
        # But this needs further investigation, and for now, let's just
        # store it in the DB.

        # Get the up-to-date file state
        _st = os_ex.safe_stat(file_path)
        if _st is not None:
            # Path is present
            file_size = _st.st_size
            file_mtime = os_ex.stat_get_mtime(_st)
        else:
            # Path is deleted
            file_size = None
            file_mtime = datetime.utcnow()

        # This path is ok to snapshot
        rel_dir = relpath_nodot(os.path.dirname(file_path), base_dir)

        # TODO: ticket:141 - LocalPhysicalFileStateRel must be able
        # to support directories; but for now, just don't write
        # the directories.
        # But we still support the deleted paths (_st = None)!
        if _st is None or not os_ex.stat_isdir(_st):
            _state = LocalPhysicalFileStateRel(
                rel_dir=rel_dir,
                rel_file=os.path.basename(file_path),
                size=file_size,
                time_changed=file_mtime,
                isdir=False)

            # ... and do you think we write it to the DB right now?
            # Sorry no, we'll just put it to a regularly-dumped buffer.
            with self.__file_states_ready_to_write_lock:
                # Be sure to copy, to prevent any alterations of the state!
                self.__file_states_ready_to_write[file_path] = (base_dir_id,
                                                                _state)