Example #1
0
    def create_new_host_for_user(self, username):
        """
        Implements the @abstractmethod from
        C{AbstractUsersEnabledPeerBookMixin}.

        @rtype: Host
        """
        uuid_created = False

        while not uuid_created:
            host_uuid = HostUUID.safe_cast_uuid(gen_uuid())
            # Ensure it doesn't start with 0x00 byte
            uuid_created = (host_uuid.bytes[0] != '\x00')

        host = Host(uuid=host_uuid,
                    urls=[],
                    # Host-specific
                    name=str(host_uuid),
                    user=self.get_user_by_name(username))

        with self._rdbw_factory() as rdbw, self.__fdbw_factory() as fdbw:
            dw = DataWrapper(rdbw=rdbw, fdbw=fdbw, bdbw=None)
            DataQueries.Inhabitants.add_host(username=username,
                                             hostname=str(host_uuid),
                                             host_uuid=host_uuid,
                                             trusted_host_caps=None,
                                             dw=dw)

        return host
Example #2
0
def add_trusted_host(arguments):
    if len(arguments) < 3:
        cli_error('You must pass at least the user name, password digest '
                      'and the host UUID!')
    else:
        username = str(arguments.popleft())
        digest = str(arguments.popleft())
        host_uuid = try_parse_uuid(arguments.popleft())

        node_map = proceed_with_node()

        if node_map is not None:
            # Finally, add user
            NodeApp.add_user(UserGroupUUID.safe_cast_uuid(gen_uuid()),
                             username=str(username),
                             digest=digest,
                             is_trusted=True)
            _for_storage = True
            _for_restore = False
            NodeApp.add_host(username=username,
                             hostname='Trusted: {}'.format(host_uuid),
                             host_uuid=host_uuid,
                             trusted_host_tuple=(_for_storage,
                                                 _for_restore))
            NodeApp.change_user(username, digest)
            print(u'Added Trusted Host {!r}'.format(username))
Example #3
0
    def create_new_host_for_user(self, username):
        """
        Implements the @abstractmethod from
        C{AbstractUsersEnabledPeerBookMixin}.

        @rtype: Host
        """
        uuid_created = False

        while not uuid_created:
            host_uuid = HostUUID.safe_cast_uuid(gen_uuid())
            # Ensure it doesn't start with 0x00 byte
            uuid_created = (host_uuid.bytes[0] != '\x00')

        host = Host(
            uuid=host_uuid,
            urls=[],
            # Host-specific
            name=str(host_uuid),
            user=self.get_user_by_name(username))

        with self._rdbw_factory() as rdbw, self.__fdbw_factory() as fdbw:
            dw = DataWrapper(rdbw=rdbw, fdbw=fdbw, bdbw=None)
            DataQueries.Inhabitants.add_host(username=username,
                                             hostname=str(host_uuid),
                                             host_uuid=host_uuid,
                                             trusted_host_caps=None,
                                             dw=dw)

        return host
Example #4
0
def __add_new_regular_user(username, arguments):
    if not arguments:
        cli_error('No digest string passed!')
    else:
        # Parse digest
        digest_str = arguments.popleft()
        try:
            digest = '{:040x}'.format(int(digest_str, 16))
        except:
            cli_error('The argument \"{}\" is not a valid hexadecimal digest!'
                          .format(digest_str))
        if len(digest) != 40:
            cli_error('The digest argument should be a hexadecimal number '
                          'up to 40 hexadigits long rather than {}!'
                          .format(digest))

        # Parse group UUID (optional)
        try:
            group_uuid = UUID(arguments[0])
            # Parsed successfully
            arguments.popleft()
        except (IndexError, ValueError):
            group_uuid = gen_uuid()

        # Finally, add user
        NodeApp.add_user(UserGroupUUID.safe_cast_uuid(group_uuid),
                         username=str(username),
                         digest=digest,
                         is_trusted=False)
        print(u'Added user "{}"'.format(username))
Example #5
0
def greet_user(arguments):
    node_map = proceed_with_node()

    if node_map is not None:
        if not arguments:
            cli_error('No user name passed')
        elif len(arguments) < 1:
            cli_error('You must pass at least 1 arguments to this command: '
                          'the name of the user.')
        else:
            username_str = arguments.popleft()
            with ds.FDB() as fdbw:
                FDBQueries.UserMessages.add_message(
                    username=str(username_str),
                    key='user_greeting {}'.format(gen_uuid()),
                    body={
                        'c': ('<p class="bold">{username}, welcome to our '
                              'system</p>'
                              '<p>This is a test message.</p>').format(
                                 username=username_str),
                        'en': ('<p class="bold">{username}, welcome to our '
                               'system</p>'
                               '<p>This is a test message.</p>').format(
                                  username=username_str),
                        'ru': (u'<p class="bold">Привет, {username}</p>'
                               u'<p>Это тестовое сообщение.</p>').format(
                                  username=username_str)
                    },
                    fdbw=fdbw)
            print(u'The greeting message has been sent to "{}"'
                      .format(username_str))
    def create_new_host_for_user(self, username):
        """
        Implements the @abstractmethod from
        C{AbstractUsersEnabledPeerBookMixin}.

        @rtype: Host
        """
        super(TrustedPeerBookInMemory, self).create_new_host_for_user(username)
        uuid_created = False

        while not uuid_created:
            host_uuid = gen_uuid()
            # Ensure it doesn't start with 0x00 byte
            uuid_created = (host_uuid.bytes[0] != '\x00')

        host = HostAtNode(
            uuid=host_uuid,
            urls=[],
            # Host-specific
            name=str(host_uuid),
            user=self.get_user_by_name(username),
            # HostAtNode-specific
            last_seen=None)
        self[host_uuid] = host
        return host
Example #7
0
def add_group(arguments):
    node_map = proceed_with_node()

    if node_map is not None:
        if len(arguments) < 1:
            cli_error('You must pass at least 1 argument to this command: '
                          'the name of the group.')
        else:
            groupname_str = arguments.popleft()

            # Parse group UUID (optional)
            try:
                group_uuid = UUID(arguments[0])
                # Parsed successfully
                arguments.popleft()
            except (IndexError, ValueError):
                group_uuid = gen_uuid()

            user_group = UserGroup(uuid=UserGroupUUID.safe_cast_uuid(
                                            group_uuid),
                                   name=str(groupname_str),
                                   private=False,
                                   enc_key=gen_rand_key())
            NodeApp.add_ugroup(user_group)
            print(u'Added group "{}" with UUID {}'
                      .format(groupname_str, group_uuid))
Example #8
0
File: cli.py Project: shvar/redfs
def select_files(arguments):
    """
    Generate digest for the username.
    """
    if len(arguments) < 3:
        cli_error('The host UUID, the backup name, and at least one directory '
                      'with the files should be passed!')
    else:
        my_uuid, ds_name = (try_parse_uuid(arguments.popleft()),
                            arguments.popleft())

        proceed_with_host_uuid_cli(my_uuid)

        paths = __get_all_remaining_args(arguments)

        if not paths:
            cli_error('No paths passed!')
        else:
            host_app = UHostApp(my_uuid,
                                uhost_settings.detect_edition(),
                                __create_chunk_storage())
            ugroup_uuid = host_app.host.user.base_group.uuid
            path_map = {k: {'f+': ['all'], 'f-': [], 'stat': safe_stat(k)}
                            for k in paths}

            host_app.select_paths_for_backup(ds_name=ds_name,
                                             ds_uuid=gen_uuid(),
                                             ugroup_uuid=ugroup_uuid,
                                             sync=False,
                                             paths_map=path_map)
Example #9
0
File: cli.py Project: shvar/redfs
def select_files(arguments):
    """
    Generate digest for the username.
    """
    if len(arguments) < 3:
        cli_error('The host UUID, the backup name, and at least one directory '
                  'with the files should be passed!')
    else:
        my_uuid, ds_name = (try_parse_uuid(arguments.popleft()),
                            arguments.popleft())

        proceed_with_host_uuid_cli(my_uuid)

        paths = __get_all_remaining_args(arguments)

        if not paths:
            cli_error('No paths passed!')
        else:
            host_app = UHostApp(my_uuid, uhost_settings.detect_edition(),
                                __create_chunk_storage())
            ugroup_uuid = host_app.host.user.base_group.uuid
            path_map = {
                k: {
                    'f+': ['all'],
                    'f-': [],
                    'stat': safe_stat(k)
                }
                for k in paths
            }

            host_app.select_paths_for_backup(ds_name=ds_name,
                                             ds_uuid=gen_uuid(),
                                             ugroup_uuid=ugroup_uuid,
                                             sync=False,
                                             paths_map=path_map)
Example #10
0
    def _dummy_schedule_for_time(cls, dt):
        """
        Create a dummy schedule item, with only the datetime field sensible,
        for various sorting purposes.

        @param dt: Datetime for the schedule item, assumed naive
                   (but may be non-naive as well).
        @type dt: datetime
        """
        naive_dt = dt if is_naive_dt(dt) \
                      else dt.astimezone(pytz.utc).replace(tzinfo=None)

        return cls(host_uuid=gen_uuid(),
                   uuid=gen_uuid(),
                   name='',
                   period='once',
                   next_backup_datetime=naive_dt,
                   tz_info=pytz.utc,
                   paths={})
Example #11
0
    def _dummy_schedule_for_time(cls, dt):
        """
        Create a dummy schedule item, with only the datetime field sensible,
        for various sorting purposes.

        @param dt: Datetime for the schedule item, assumed naive
                   (but may be non-naive as well).
        @type dt: datetime
        """
        naive_dt = dt if is_naive_dt(dt) \
                      else dt.astimezone(pytz.utc).replace(tzinfo=None)

        return cls(host_uuid=gen_uuid(),
                   uuid=gen_uuid(),
                   name='',
                   period='once',
                   next_backup_datetime=naive_dt,
                   tz_info=pytz.utc,
                   paths={})
Example #12
0
            def test_from_uuid(self):
                u = gen_uuid()
                u_as_a = TestChildA._from_uuid(u)
                u_as_aa = TestChildAA._from_uuid(u)
                u_as_b = TestChildB._from_uuid(u)
                u_as_bb = TestChildBB._from_uuid(u)

                self.assertEqual(all(i.hex == u.hex
                                         for i in (u_as_a, u_as_aa,
                                                   u_as_b, u_as_bb)),
                                 True)
Example #13
0
    def __poll_restore_requests_in_thread(self):
        """Perform another iteration of polling the restore requests."""
        assert not in_main_thread()

        poll_uuid = gen_uuid()
        logger.debug('Polling restore requests (%s)', poll_uuid)

        restore_request = True
        while restore_request is not None:
            with ds.FDB() as fdbw:
                restore_request = \
                    FDBQueries.RestoreRequests \
                              .atomic_start_oldest_restore_request(fdbw=fdbw)

            logger.debug('Poll (%s) returned %r', poll_uuid, restore_request)
            if restore_request is not None:
                # We've indeed have some restore request that needs processing.

                # Create new "virtual" dataset with all the data
                # to be restored.
                with db.RDB() as rdbw:
                    new_ds_uuid = \
                        Queries.Datasets.restore_files_to_dataset_clone(
                            restore_request.base_ds_uuid,
                            restore_request.paths,
                            restore_request.ts_start,
                            rdbw)

                # Now we know the new dataset to be restored.
                # Btw, write it into the docstore.
                # Doesn't need to be atomic, as only a single node
                # may be processing it at a time.
                with ds.FDB() as fdbw:
                    FDBQueries.RestoreRequests.set_ds_uuid(
                        _id=restore_request._id,
                        new_ds_uuid=new_ds_uuid,
                        fdbw=fdbw)

                # After creating the dataset, let's restore it to all host
                # which are alive.
                _syncer = self.__server_process.app.syncer
                _syncer.restore_dataset_to_lacking_hosts(
                    me=self.__server_process.me,
                    host=None,
                    ds_uuid=new_ds_uuid)

        logger.debug('Polling restore requests (%s) - done', poll_uuid)
Example #14
0
    def __poll_restore_requests_in_thread(self):
        """Perform another iteration of polling the restore requests."""
        assert not in_main_thread()

        poll_uuid = gen_uuid()
        logger.debug('Polling restore requests (%s)', poll_uuid)

        restore_request = True
        while restore_request is not None:
            with ds.FDB() as fdbw:
                restore_request = \
                    FDBQueries.RestoreRequests \
                              .atomic_start_oldest_restore_request(fdbw=fdbw)

            logger.debug('Poll (%s) returned %r', poll_uuid, restore_request)
            if restore_request is not None:
                # We've indeed have some restore request that needs processing.

                # Create new "virtual" dataset with all the data
                # to be restored.
                with db.RDB() as rdbw:
                    new_ds_uuid = \
                        Queries.Datasets.restore_files_to_dataset_clone(
                            restore_request.base_ds_uuid,
                            restore_request.paths,
                            restore_request.ts_start,
                            rdbw)

                # Now we know the new dataset to be restored.
                # Btw, write it into the docstore.
                # Doesn't need to be atomic, as only a single node
                # may be processing it at a time.
                with ds.FDB() as fdbw:
                    FDBQueries.RestoreRequests.set_ds_uuid(
                        _id=restore_request._id,
                        new_ds_uuid=new_ds_uuid,
                        fdbw=fdbw)

                # After creating the dataset, let's restore it to all host
                # which are alive.
                _syncer = self.__server_process.app.syncer
                _syncer.restore_dataset_to_lacking_hosts(
                    me=self.__server_process.me,
                    host=None,
                    ds_uuid=new_ds_uuid)

        logger.debug('Polling restore requests (%s) - done', poll_uuid)
Example #15
0
    def __create_some_dummy_chunks(self, how_many):
        """
        Create some dummy chunks (in FS and in DB).
        Yields the UUIDs of dummy chunks after creation of each one.

        @param how_many: How many dummy chunks to create.

        @rtype: col.Iterable
        """
        with self.__chunk_op_lock:
            logger.debug('Creating %i new dummy chunk(s)', how_many)

            # One by one, adding the dummy chunks
            for i in xrange(how_many):
                dummy_chunk_uuid = gen_uuid()
                HostQueries.HostChunks.create_dummy_chunk(dummy_chunk_uuid)
                fallocate(self.__get_chunk_file_path(dummy_chunk_uuid,
                                                     is_dummy=True),
                          0x100000)
                yield dummy_chunk_uuid
            logger.debug('Created %i dummy chunk(s)', how_many)
Example #16
0
    def create_new_host_for_user(self, username):
        """
        Implements the @abstractmethod from
        C{AbstractUsersEnabledPeerBookMixin}.

        @rtype: Host
        """
        super(TrustedPeerBookInMemory, self).create_new_host_for_user(username)
        uuid_created = False

        while not uuid_created:
            host_uuid = gen_uuid()
            # Ensure it doesn't start with 0x00 byte
            uuid_created = (host_uuid.bytes[0] != '\x00')

        host = HostAtNode(uuid=host_uuid,
                          urls=[],
                          # Host-specific
                          name=str(host_uuid),
                          user=self.get_user_by_name(username),
                          # HostAtNode-specific
                          last_seen=None)
        self[host_uuid] = host
        return host
Example #17
0
    )
    # consists_of(_vfiles, RelVirtualFile)

    # Group RelVirtualFile's by rel_dir
    _files_grouped_by_rel_dir = (
        (rvf for rvf in per_rel_dir) for rel_dir, per_rel_dir in sorted_groupby(_vfiles, attrgetter("rel_dir"))
    )

    paths_map = {
        base_dir: {
            "ifiles": _files_grouped_by_rel_dir,
            "stat": fake_stat(isdir=True, atime=upload_time, mtime=upload_time, ctime=upload_time),
        }
    }

    ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
    dataset = DatasetOnVirtualFiles.from_paths(
        ds_name, ds_uuid, group_uuid, sync, paths_map, upload_time, cryptographer
    )

    assert dataset is not None

    thosts = list(TrustedQueries.HostAtNode.get_all_trusted_hosts(for_storage=True, rdbw=rdbw))

    logger.debug("Uploading dataset %r,... like, to %r", dataset, thosts)

    # Use group_uuid as host_uuid
    fake_host_uuid = None

    dummy_ds_uuid = Queries.Datasets.create_dataset_for_backup(fake_host_uuid, dataset, rdbw)
Example #18
0
             for rel_dir, per_rel_dir
                 in sorted_groupby(_vfiles, attrgetter('rel_dir')))

    paths_map = {
        base_dir: {
            'ifiles':
            _files_grouped_by_rel_dir,
            'stat':
            fake_stat(isdir=True,
                      atime=upload_time,
                      mtime=upload_time,
                      ctime=upload_time)
        }
    }

    ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
    dataset = DatasetOnVirtualFiles.from_paths(ds_name, ds_uuid, group_uuid,
                                               sync, paths_map, upload_time,
                                               cryptographer)

    assert dataset is not None

    thosts = list(
        TrustedQueries.HostAtNode.get_all_trusted_hosts(for_storage=True,
                                                        rdbw=rdbw))

    logger.debug('Uploading dataset %r,... like, to %r', dataset, thosts)

    # Use group_uuid as host_uuid
    fake_host_uuid = None
Example #19
0
def before_request():
    g.request_id = gen_uuid()
Example #20
0
    def __backup_some_phys_files(self,
                                 base_dir,
                                 files,
                                 ugroup,
                                 __do_start_backup=True):
        r"""Given some files, create a new dataset and start to backup them.

        >>> # ugroup = UserGroup(
        >>> #     uuid=UserGroupUUID('00000000-bbbb-0000-0000-000000000001'),
        >>> #     name='AlphA',
        >>> #     private=True,
        >>> #     enc_key='\x01\xe6\x13\xdab)\xd2n\xd6\xafTH\x03h\x02\x12'
        >>> #             '\x17D\x1a\xeb\x8b6\xc0\x9b\xa6\x7f\xcc\x06N\xcf'
        >>> #             '\x8b\xcd'
        >>> # )

        >>> # __backup_some_phys_files(
        >>> #     base_dir='u'/home/john/FreeBrie',
        >>> #     files=[
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f1.mp3',
        >>> #             size=13829879,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,904430)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f2.avi',
        >>> #             size=3522710,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,988433)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b',
        >>> #             rel_file=u'bbb',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 42 19808)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b/c',
        >>> #             rel_file=u'ccc',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 41 979807))
        >>> #     ],
        >>> #     ugroup=ugroup)

        @todo: complete the unit test, which is half-done!

        @param base_dir: the directory being backed up.
        @type base_dir: basestring

        @param files: the iterable over the files which should be backed up.
            Contains C{LocalPhysicalFileStateRel} objects.
            The caller should ensure that C{files} is non-empty!
        @type files: col.Iterable

        @type ugroup: UserGroup

        @return: the created dataset (if succeeded).
        @rtype: DatasetOnPhysicalFiles, NoneType
        """
        logger.debug('__backup_some_phys_files(%r, %r)', base_dir, ugroup)

        # Group files by rel_dir; then ignore base_dir,
        # keep only rel_dir, rel_file, size and time_changed
        files_grouped_by_rel_dir = \
            ((RelVirtualFile(rel_dir=f.rel_dir,
                             filename=f.rel_file,
                             # If we can read real stat, read it;
                             # otherwise we'll emulate it with fake_stat
                             stat=coalesce(os_ex.safe_stat(  # real stat
                                               os.path.join(base_dir,
                                                            f.rel_path)),
                                           os_ex.fake_stat(  # deleted file
                                               st_mode=None,
                                               atime=f.time_changed,
                                               mtime=f.time_changed,
                                               ctime=f.time_changed,
                                               size=None)),
                             stat_getter=lambda f=f:
                                             os_ex.safe_stat(
                                                 os.path.join(base_dir,
                                                              f.rel_path)),
                             file_getter=lambda f=f:
                                             open(os.path.join(base_dir,
                                                               f.rel_path),
                                                  'rb'))
                 for f in per_rel_dir)
                     for rel_dir, per_rel_dir
                         in sorted_groupby(files, attrgetter('rel_dir')))

        # Example:
        # files_grouped_by_rel_dir = [
        #     [
        #         RelVirtualFile(...),
        #         RelVirtualFile(...),
        #         RelVirtualFile(...)
        #     ],
        #     [
        #         RelVirtualFile(...),
        #     [
        #         RelVirtualFile(...)
        #     ]
        # ]
        _path_map = {
            base_dir: {
                'ifiles': files_grouped_by_rel_dir,
                'stat': os_ex.safe_stat(base_dir)
            }
        }

        ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
        ds = self.select_paths_for_backup(ds_name='',
                                          ds_uuid=ds_uuid,
                                          ugroup_uuid=ugroup.uuid,
                                          sync=True,
                                          paths_map=_path_map)
        if ds is not None and __do_start_backup:
            self.start_backup(ds_uuid)

        return ds
Example #21
0
def create_directory(base_dir,
                     rel_path,
                     ds_name,
                     group_uuid,
                     sync,
                     cryptographer,
                     rdbw,
                     ospath=posixpath):
    """Create new directory in the cloud.

    @note: all the paths should be in POSIX format.

    @param base_dir: the base directory path (in the dataset) where to upload
        the file.
    @type base_dir: basestring

    @param rel_path: the name of directory which should be created.
    @type rel_path: basestring

    @param group_uuid: the UUID of the user group, for which the file
        should be bound.
    @type group_uuid: UserGroupUUID

    @param sync: whether the created dataset should be considered a
        "sync dataset".
    @type sync: bool

    @param rdbw: RelDB wrapper.
    @type rdbw: DatabaseWrapperSQLAlchemy

    @return: the UUID of newly created dataset.
    @rtype: DatasetUUID
    """
    upload_time = datetime.utcnow()

    # For each FileToUpload, create fake stat
    dir_fake_stat = fake_stat(isdir=True,
                              atime=upload_time,
                              mtime=upload_time,
                              ctime=upload_time,
                              size=None)

    # Turn the original FileToUpload's to RelVirtualFile's
    _vfile = RelVirtualFile(
        rel_dir=ospath.dirname(rel_path),
        filename=ospath.basename(rel_path),
        stat=dir_fake_stat,
        stat_getter=lambda dir_fake_stat=dir_fake_stat: dir_fake_stat)
    # isinstance(ftu, FileToUpload)

    # Group RelVirtualFile's by rel_dir
    # _files_grouped_by_rel_dir = \
    #     ((rvf for rvf in per_rel_dir)
    #          for rel_dir, per_rel_dir
    #          in sorted_groupby(_vfiles, attrgetter('rel_dir')))

    paths_map = {
        base_dir: {
            'ifiles': [[_vfile]],
            'stat':
            fake_stat(isdir=True,
                      atime=upload_time,
                      mtime=upload_time,
                      ctime=upload_time)
        }
    }

    ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
    dataset = DatasetOnVirtualFiles.from_paths(ds_name, ds_uuid, group_uuid,
                                               sync, paths_map, upload_time,
                                               cryptographer)

    assert dataset is not None

    # Use group_uuid as host_uuid
    fake_host_uuid = None

    dummy_ds_uuid = Queries.Datasets.create_dataset_for_backup(
        fake_host_uuid, dataset, rdbw)

    dataset.time_completed = datetime.utcnow()

    # That's all, folks!
    Queries.Datasets.update_dataset(fake_host_uuid, dataset, rdbw)

    return ds_uuid
Example #22
0
File: cli.py Project: shvar/redfs
        print('For username {0}, the digest is {1}'
                  .format(username,
                          crypto.generate_digest(
                              str(username),
                              password,
                              common_settings.HTTP_AUTH_REALM_NODE)))


@cli_command(
    ('-dd', '--delete-dataset'),
    'Delete a dataset from the node.\n'
        'Arg. 1: base host UUID.\n'
        'Arg. 2: dataset UUID.\n'
        'Example:\n'
        '  {bin} {cmd} 00000000-1111-0000-0000-000000000001 '
        + str(gen_uuid()))
def delete_dataset(arguments):
    """
    Delete the dataset from the Node.
    """
    global _LOG_ACTIONS

    if len(arguments) < 2:
        cli_error('At least the host UUID and the dataset UUID '
                      'should be passed!')
    else:
        my_uuid, ds_uuid = (try_parse_uuid(arguments.popleft()),
                            try_parse_uuid(arguments.popleft()))

        print('Trying to delete the dataset {}'.format(ds_uuid))
Example #23
0
 def setUp(self):
     self.u = gen_uuid()
     self.child_a = TestChildA(self.u.hex)
     self.child_aa = TestChildAA(self.u.hex)
     self.child_b = TestChildB(self.u.hex)
Example #24
0
File: cli.py Project: shvar/redfs
                  'passed as the arguments!')
    else:
        username = arguments.popleft()
        password = _get_password_from_arguments(arguments)
        print('For username {0}, the digest is {1}'.format(
            username,
            crypto.generate_digest(str(username), password,
                                   common_settings.HTTP_AUTH_REALM_NODE)))


@cli_command(
    ('-dd', '--delete-dataset'), 'Delete a dataset from the node.\n'
    'Arg. 1: base host UUID.\n'
    'Arg. 2: dataset UUID.\n'
    'Example:\n'
    '  {bin} {cmd} 00000000-1111-0000-0000-000000000001 ' + str(gen_uuid()))
def delete_dataset(arguments):
    """
    Delete the dataset from the Node.
    """
    global _LOG_ACTIONS

    if len(arguments) < 2:
        cli_error('At least the host UUID and the dataset UUID '
                  'should be passed!')
    else:
        my_uuid, ds_uuid = (try_parse_uuid(arguments.popleft()),
                            try_parse_uuid(arguments.popleft()))

        print('Trying to delete the dataset {}'.format(ds_uuid))
Example #25
0
    def __backup_some_phys_files(self, base_dir, files, ugroup,
                                 __do_start_backup=True):
        r"""Given some files, create a new dataset and start to backup them.

        >>> # ugroup = UserGroup(
        >>> #     uuid=UserGroupUUID('00000000-bbbb-0000-0000-000000000001'),
        >>> #     name='AlphA',
        >>> #     private=True,
        >>> #     enc_key='\x01\xe6\x13\xdab)\xd2n\xd6\xafTH\x03h\x02\x12'
        >>> #             '\x17D\x1a\xeb\x8b6\xc0\x9b\xa6\x7f\xcc\x06N\xcf'
        >>> #             '\x8b\xcd'
        >>> # )

        >>> # __backup_some_phys_files(
        >>> #     base_dir='u'/home/john/FreeBrie',
        >>> #     files=[
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f1.mp3',
        >>> #             size=13829879,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,904430)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir='',
        >>> #             rel_file=u'f2.avi',
        >>> #             size=3522710,
        >>> #             time_changed=datetime(2012, 11, 5, 12,12,41,988433)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b',
        >>> #             rel_file=u'bbb',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 42 19808)),
        >>> #         LocalPhysicalFileStateRel(
        >>> #             rel_dir=u'a/b/c',
        >>> #             rel_file=u'ccc',
        >>> #             size=4,
        >>> #             time_changed=datetime(2012, 10, 11, 15 33 41 979807))
        >>> #     ],
        >>> #     ugroup=ugroup)

        @todo: complete the unit test, which is half-done!

        @param base_dir: the directory being backed up.
        @type base_dir: basestring

        @param files: the iterable over the files which should be backed up.
            Contains C{LocalPhysicalFileStateRel} objects.
            The caller should ensure that C{files} is non-empty!
        @type files: col.Iterable

        @type ugroup: UserGroup

        @return: the created dataset (if succeeded).
        @rtype: DatasetOnPhysicalFiles, NoneType
        """
        logger.debug('__backup_some_phys_files(%r, %r)',
                     base_dir, ugroup)

        # Group files by rel_dir; then ignore base_dir,
        # keep only rel_dir, rel_file, size and time_changed
        files_grouped_by_rel_dir = \
            ((RelVirtualFile(rel_dir=f.rel_dir,
                             filename=f.rel_file,
                             # If we can read real stat, read it;
                             # otherwise we'll emulate it with fake_stat
                             stat=coalesce(os_ex.safe_stat(  # real stat
                                               os.path.join(base_dir,
                                                            f.rel_path)),
                                           os_ex.fake_stat(  # deleted file
                                               st_mode=None,
                                               atime=f.time_changed,
                                               mtime=f.time_changed,
                                               ctime=f.time_changed,
                                               size=None)),
                             stat_getter=lambda f=f:
                                             os_ex.safe_stat(
                                                 os.path.join(base_dir,
                                                              f.rel_path)),
                             file_getter=lambda f=f:
                                             open(os.path.join(base_dir,
                                                               f.rel_path),
                                                  'rb'))
                 for f in per_rel_dir)
                     for rel_dir, per_rel_dir
                         in sorted_groupby(files, attrgetter('rel_dir')))

        # Example:
        # files_grouped_by_rel_dir = [
        #     [
        #         RelVirtualFile(...),
        #         RelVirtualFile(...),
        #         RelVirtualFile(...)
        #     ],
        #     [
        #         RelVirtualFile(...),
        #     [
        #         RelVirtualFile(...)
        #     ]
        # ]
        _path_map = {base_dir: {'ifiles': files_grouped_by_rel_dir,
                                'stat': os_ex.safe_stat(base_dir)}}

        ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
        ds = self.select_paths_for_backup(ds_name='',
                                          ds_uuid=ds_uuid,
                                          ugroup_uuid=ugroup.uuid,
                                          sync=True,
                                          paths_map=_path_map)
        if ds is not None and __do_start_backup:
            self.start_backup(ds_uuid)

        return ds
Example #26
0
def create_directory(base_dir, rel_path, ds_name, group_uuid, sync, cryptographer, rdbw, ospath=posixpath):
    """Create new directory in the cloud.

    @note: all the paths should be in POSIX format.

    @param base_dir: the base directory path (in the dataset) where to upload
        the file.
    @type base_dir: basestring

    @param rel_path: the name of directory which should be created.
    @type rel_path: basestring

    @param group_uuid: the UUID of the user group, for which the file
        should be bound.
    @type group_uuid: UserGroupUUID

    @param sync: whether the created dataset should be considered a
        "sync dataset".
    @type sync: bool

    @param rdbw: RelDB wrapper.
    @type rdbw: DatabaseWrapperSQLAlchemy

    @return: the UUID of newly created dataset.
    @rtype: DatasetUUID
    """
    upload_time = datetime.utcnow()

    # For each FileToUpload, create fake stat
    dir_fake_stat = fake_stat(isdir=True, atime=upload_time, mtime=upload_time, ctime=upload_time, size=None)

    # Turn the original FileToUpload's to RelVirtualFile's
    _vfile = RelVirtualFile(
        rel_dir=ospath.dirname(rel_path),
        filename=ospath.basename(rel_path),
        stat=dir_fake_stat,
        stat_getter=lambda dir_fake_stat=dir_fake_stat: dir_fake_stat,
    )
    # isinstance(ftu, FileToUpload)

    # Group RelVirtualFile's by rel_dir
    # _files_grouped_by_rel_dir = \
    #     ((rvf for rvf in per_rel_dir)
    #          for rel_dir, per_rel_dir
    #          in sorted_groupby(_vfiles, attrgetter('rel_dir')))

    paths_map = {
        base_dir: {
            "ifiles": [[_vfile]],
            "stat": fake_stat(isdir=True, atime=upload_time, mtime=upload_time, ctime=upload_time),
        }
    }

    ds_uuid = DatasetUUID.safe_cast_uuid(gen_uuid())
    dataset = DatasetOnVirtualFiles.from_paths(
        ds_name, ds_uuid, group_uuid, sync, paths_map, upload_time, cryptographer
    )

    assert dataset is not None

    # Use group_uuid as host_uuid
    fake_host_uuid = None

    dummy_ds_uuid = Queries.Datasets.create_dataset_for_backup(fake_host_uuid, dataset, rdbw)

    dataset.time_completed = datetime.utcnow()

    # That's all, folks!
    Queries.Datasets.update_dataset(fake_host_uuid, dataset, rdbw)

    return ds_uuid