Exemple #1
0
    def __test_web_upload_remove(op_upload, arguments):
        """Common code for CLI operations for web upload/web remove."""
        op_name = 'upload' if op_upload else 'remove'

        if len(arguments) < 2:
            cli_error(u'You must pass at least 2 arguments to this command: '
                      u'the UUID of the group, and at least '
                      u'one filename to {op}.'
                          .format(op=op_name))
        else:
            group_uuid = UserGroupUUID.safe_cast_uuid(
                            try_parse_uuid(arguments.popleft()))

            file_paths = []
            while arguments:
                file_paths.append(arguments.popleft())

            print(u'{} file paths for group {}:\n{}'.format(
                      op_name.capitalize(), group_uuid, '\n'.join(file_paths)))

            node_map = proceed_with_node()

            chunk_storage = ChunkStorageBigDB(bdbw_factory=ds.BDB)

            with db.RDB() as rdbw:
                __test_upload_remove_files(group_uuid, file_paths,
                                           chunk_storage, op_upload, rdbw)
Exemple #2
0
def add_trusted_host(arguments):
    if len(arguments) < 3:
        cli_error('You must pass at least the user name, password digest '
                      'and the host UUID!')
    else:
        username = str(arguments.popleft())
        digest = str(arguments.popleft())
        host_uuid = try_parse_uuid(arguments.popleft())

        node_map = proceed_with_node()

        if node_map is not None:
            # Finally, add user
            NodeApp.add_user(UserGroupUUID.safe_cast_uuid(gen_uuid()),
                             username=str(username),
                             digest=digest,
                             is_trusted=True)
            _for_storage = True
            _for_restore = False
            NodeApp.add_host(username=username,
                             hostname='Trusted: {}'.format(host_uuid),
                             host_uuid=host_uuid,
                             trusted_host_tuple=(_for_storage,
                                                 _for_restore))
            NodeApp.change_user(username, digest)
            print(u'Added Trusted Host {!r}'.format(username))
Exemple #3
0
def __add_new_regular_user(username, arguments):
    if not arguments:
        cli_error('No digest string passed!')
    else:
        # Parse digest
        digest_str = arguments.popleft()
        try:
            digest = '{:040x}'.format(int(digest_str, 16))
        except:
            cli_error('The argument \"{}\" is not a valid hexadecimal digest!'
                          .format(digest_str))
        if len(digest) != 40:
            cli_error('The digest argument should be a hexadecimal number '
                          'up to 40 hexadigits long rather than {}!'
                          .format(digest))

        # Parse group UUID (optional)
        try:
            group_uuid = UUID(arguments[0])
            # Parsed successfully
            arguments.popleft()
        except (IndexError, ValueError):
            group_uuid = gen_uuid()

        # Finally, add user
        NodeApp.add_user(UserGroupUUID.safe_cast_uuid(group_uuid),
                         username=str(username),
                         digest=digest,
                         is_trusted=False)
        print(u'Added user "{}"'.format(username))
Exemple #4
0
def add_user(arguments):
    node_map = proceed_with_node()

    if node_map is not None:
        if not arguments:
            cli_error('No user name passed')
        else:
            username_str = arguments.popleft()

            if arguments and arguments[0] == '--to-group':
                dummy = arguments.popleft()
                if not arguments:
                    cli_error('Attempt to add user to group, '
                                  'but no group specified!')
                else:
                    username = str(username_str)
                    group_uuid = try_parse_uuid(arguments.popleft())

                    # Let's add the user to group
                    NodeApp.add_user_to_group(
                        username=username,
                        group_uuid=UserGroupUUID.safe_cast_uuid(group_uuid))
                    print(u'Added user "{}" to the group "{}"'
                              .format(username, group_uuid))

            else:
                # Let's add the user to the system
                __add_new_regular_user(username_str, arguments)
Exemple #5
0
def add_group(arguments):
    node_map = proceed_with_node()

    if node_map is not None:
        if len(arguments) < 1:
            cli_error('You must pass at least 1 argument to this command: '
                          'the name of the group.')
        else:
            groupname_str = arguments.popleft()

            # Parse group UUID (optional)
            try:
                group_uuid = UUID(arguments[0])
                # Parsed successfully
                arguments.popleft()
            except (IndexError, ValueError):
                group_uuid = gen_uuid()

            user_group = UserGroup(uuid=UserGroupUUID.safe_cast_uuid(
                                            group_uuid),
                                   name=str(groupname_str),
                                   private=False,
                                   enc_key=gen_rand_key())
            NodeApp.add_ugroup(user_group)
            print(u'Added group "{}" with UUID {}'
                      .format(groupname_str, group_uuid))
Exemple #6
0
    def __restore_files(self):
        """Internal procedure which actually restores the files.

        @todo: the Fingerprint calculation should be turned into
            "file is read by blocks and then repacked into 16KiB segments";
            then recalculation of the fingerprint in case of FP mismatch
            won't be needed.
        """
        _message = self.message
        my_host = self.manager.app.host
        feature_set = self.manager.app.feature_set
        ds = _message.dataset
        wr_uuid = _message.wr_uuid
        ugroup = _message.ugroup

        restore_directory = self.__get_restore_directory()
        assert _message.sync == (wr_uuid is None) == (ds.uuid is not None), \
               (_message.sync, wr_uuid, ds)

        base_dir_id = None  # will be used later

        if restore_directory is None:
            logger.error('Do not know the restore directory')
        else:
            logger.debug('Going to restore dataset %r for %r to %r',
                         ds, ugroup, restore_directory)
            if not os.path.exists(restore_directory):
                os.makedirs(restore_directory)

            group_key = ugroup.enc_key if feature_set.per_group_encryption \
                                       else None
            cryptographer = Cryptographer(group_key=group_key,
                                          key_generator=None)

            is_whole_dataset_restored = _message.sync

            logger.debug('Restoring %s files from dataset: %r',
                         'all' if is_whole_dataset_restored else 'selected',
                         coalesce(ds, 'N/A'))

            # TODO: use the "delete time" from the LocalPhysicalFileState!
            _now = datetime.utcnow()

            # If we are syncing-in the whole dataset, we should write it
            # into the DB as a whole. The files/file_locals will be bound to it
            # so that after restore, we'll know on this Host that these states
            # are fully synced to the cloud already (in fact, they came
            # from the cloud).
            if _message.sync:
                # Let's hack into the files and substitute the base_dir.
                # TODO: do it better!
                for f in _message.files.iterkeys():
                    f.base_dir = restore_directory

                # Write the whole dataset to the DB
                _small_files = _message.files.keys()  # not iterkeys(0 for now!
                _dirs = {restore_directory: (_small_files, [])}

                # Given the information in the inbound message about
                # the whole dataset, store this dataset in the DB.
                dataset = DatasetWithDirectories(
                              name=ds.name,
                              sync=ds.sync,
                              directories=_dirs,
                              # TODO: transport real data
                              # from the node
                              uuid=DatasetUUID.safe_cast_uuid(ds.uuid),
                              ugroup_uuid=UserGroupUUID.safe_cast_uuid(
                                              ugroup.uuid),
                              time_started=ds.time_started,
                              time_completed=_now)

                with db.RDB() as rdbw:
                    # Do we already have the dataset?
                    _ds_in_progress = \
                        HostQueries.HostDatasets.get_my_ds_in_progress(
                            host_uuid=my_host.uuid,
                            ds_uuid=dataset.uuid,
                            rdbw=rdbw)

                    if _ds_in_progress is None:
                        # We don't have it, insert.
                        dummy_ds_uuid = \
                            HostQueries.HostDatasets.create_dataset_for_backup(
                                my_host.uuid, dataset, rdbw)
                        assert dummy_ds_uuid == dataset.uuid, \
                               (dummy_ds_uuid, dataset.uuid)

                    base_dir_id = \
                        HostQueries.HostFiles.add_or_get_base_directory(
                            restore_directory, ugroup.uuid, rdbw)

            error_in_any_file_occured = False

            #
            # Finally, loop over the files and restore each one
            #
            for file_, file_blocks in _message.files.iteritems():
                self.__restore_op_for_path(file_, file_blocks,
                                           is_whole_dataset_restored,
                                           base_dir_id,
                                           restore_directory,
                                           cryptographer,
                                           ds)

            # Loop over the files completed
            if is_whole_dataset_restored:
                logger.debug('Restoring %r completed, there were %s issues.',
                             ds,
                             'some' if error_in_any_file_occured else 'no')
                if not error_in_any_file_occured:
                    with db.RDB() as rdbw:
                        logger.debug('Updating %r at host %s...',
                                     ds, my_host.uuid)
                        ds_to_finish = \
                            Queries.Datasets.get_dataset_by_uuid(ds.uuid,
                                                                 my_host.uuid,
                                                                 rdbw)

                        ds_to_finish.time_completed = datetime.utcnow()
                        logger.debug('Updating %r as completed', dataset)

                        # Mark the current dataset as completed
                        # only after the response from the node is received.
                        Queries.Datasets.update_dataset(my_host.uuid,
                                                        ds_to_finish,
                                                        rdbw)

            # Everything seems ok to this moment
            with self.open_state(for_update=True) as state:
                state.ack_result_code = RestoreMessage.ResultCodes.OK
Exemple #7
0
    def __restore_files(self):
        """Internal procedure which actually restores the files.

        @todo: the Fingerprint calculation should be turned into
            "file is read by blocks and then repacked into 16KiB segments";
            then recalculation of the fingerprint in case of FP mismatch
            won't be needed.
        """
        _message = self.message
        my_host = self.manager.app.host
        feature_set = self.manager.app.feature_set
        ds = _message.dataset
        wr_uuid = _message.wr_uuid
        ugroup = _message.ugroup

        restore_directory = self.__get_restore_directory()
        assert _message.sync == (wr_uuid is None) == (ds.uuid is not None), \
               (_message.sync, wr_uuid, ds)

        base_dir_id = None  # will be used later

        if restore_directory is None:
            logger.error('Do not know the restore directory')
        else:
            logger.debug('Going to restore dataset %r for %r to %r', ds,
                         ugroup, restore_directory)
            if not os.path.exists(restore_directory):
                os.makedirs(restore_directory)

            group_key = ugroup.enc_key if feature_set.per_group_encryption \
                                       else None
            cryptographer = Cryptographer(group_key=group_key,
                                          key_generator=None)

            is_whole_dataset_restored = _message.sync

            logger.debug('Restoring %s files from dataset: %r',
                         'all' if is_whole_dataset_restored else 'selected',
                         coalesce(ds, 'N/A'))

            # TODO: use the "delete time" from the LocalPhysicalFileState!
            _now = datetime.utcnow()

            # If we are syncing-in the whole dataset, we should write it
            # into the DB as a whole. The files/file_locals will be bound to it
            # so that after restore, we'll know on this Host that these states
            # are fully synced to the cloud already (in fact, they came
            # from the cloud).
            if _message.sync:
                # Let's hack into the files and substitute the base_dir.
                # TODO: do it better!
                for f in _message.files.iterkeys():
                    f.base_dir = restore_directory

                # Write the whole dataset to the DB
                _small_files = _message.files.keys()  # not iterkeys(0 for now!
                _dirs = {restore_directory: (_small_files, [])}

                # Given the information in the inbound message about
                # the whole dataset, store this dataset in the DB.
                dataset = DatasetWithDirectories(
                    name=ds.name,
                    sync=ds.sync,
                    directories=_dirs,
                    # TODO: transport real data
                    # from the node
                    uuid=DatasetUUID.safe_cast_uuid(ds.uuid),
                    ugroup_uuid=UserGroupUUID.safe_cast_uuid(ugroup.uuid),
                    time_started=ds.time_started,
                    time_completed=_now)

                with db.RDB() as rdbw:
                    # Do we already have the dataset?
                    _ds_in_progress = \
                        HostQueries.HostDatasets.get_my_ds_in_progress(
                            host_uuid=my_host.uuid,
                            ds_uuid=dataset.uuid,
                            rdbw=rdbw)

                    if _ds_in_progress is None:
                        # We don't have it, insert.
                        dummy_ds_uuid = \
                            HostQueries.HostDatasets.create_dataset_for_backup(
                                my_host.uuid, dataset, rdbw)
                        assert dummy_ds_uuid == dataset.uuid, \
                               (dummy_ds_uuid, dataset.uuid)

                    base_dir_id = \
                        HostQueries.HostFiles.add_or_get_base_directory(
                            restore_directory, ugroup.uuid, rdbw)

            error_in_any_file_occured = False

            #
            # Finally, loop over the files and restore each one
            #
            for file_, file_blocks in _message.files.iteritems():
                self.__restore_op_for_path(file_, file_blocks,
                                           is_whole_dataset_restored,
                                           base_dir_id, restore_directory,
                                           cryptographer, ds)

            # Loop over the files completed
            if is_whole_dataset_restored:
                logger.debug('Restoring %r completed, there were %s issues.',
                             ds, 'some' if error_in_any_file_occured else 'no')
                if not error_in_any_file_occured:
                    with db.RDB() as rdbw:
                        logger.debug('Updating %r at host %s...', ds,
                                     my_host.uuid)
                        ds_to_finish = \
                            Queries.Datasets.get_dataset_by_uuid(ds.uuid,
                                                                 my_host.uuid,
                                                                 rdbw)

                        ds_to_finish.time_completed = datetime.utcnow()
                        logger.debug('Updating %r as completed', dataset)

                        # Mark the current dataset as completed
                        # only after the response from the node is received.
                        Queries.Datasets.update_dataset(
                            my_host.uuid, ds_to_finish, rdbw)

            # Everything seems ok to this moment
            with self.open_state(for_update=True) as state:
                state.ack_result_code = RestoreMessage.ResultCodes.OK