コード例 #1
0
    def test_bootstrap_picks_up_user(self):
        user = self.obj_factory.make_user()

        TransactionLog.bootstrap(user)

        txlog = get_filesync_store().find(TransactionLog, op_type=TransactionLog.OP_USER_CREATED).one()
        self.assertTxLogDetailsMatchesUserDetails(user, txlog)
コード例 #2
0
    def test_txlog_for_new_storageuser(self):
        user_id = self.obj_factory.get_unique_integer()
        name = self.obj_factory.get_unique_unicode()
        visible_name = self.obj_factory.get_unique_unicode()

        user = StorageUser.new(self.store, user_id, name, visible_name)

        store = get_filesync_store()
        txlog = store.find(TransactionLog, owner_id=user.id).one()
        self.assertTxLogDetailsMatchesUserDetails(user, txlog)
コード例 #3
0
    def test_bootstrap_picks_up_shares(self):
        user = self.obj_factory.make_user()
        directory = self.obj_factory.make_directory(user)
        share = self.obj_factory.make_share(directory)
        self.store.commit()

        TransactionLog.bootstrap(user)

        txlog = get_filesync_store().find(TransactionLog, op_type=TransactionLog.OP_SHARE_ACCEPTED).one()
        expected_attrs = self._get_dict_with_txlog_attrs_from_share(share, directory, TransactionLog.OP_SHARE_ACCEPTED)
        self.assert_txlog_correct(txlog, expected_attrs)
コード例 #4
0
ファイル: model.py プロジェクト: CSRedRat/magicicada-server
 def _record_share_accepted_or_deleted(cls, share, op_type):
     store = get_filesync_store()
     node = store.get(StorageObject, share.subtree)
     when_last_changed = share.when_last_changed
     extra_data = dict(
         shared_to=share.shared_to, share_id=str(share.id),
         share_name=share.name, access_level=share.access,
         when_shared=get_epoch_secs(share.when_shared),
         when_last_changed=get_epoch_secs(when_last_changed))
     txlog = cls(
         node.id, node.owner_id, node.volume_id, op_type, node.full_path,
         node.mimetype, generation=None,
         extra_data=json.dumps(extra_data).decode('ascii'))
     return Store.of(node).add(txlog)
コード例 #5
0
ファイル: model.py プロジェクト: CSRedRat/magicicada-server
    def record_user_created(cls, user):
        """Create a TransactionLog entry representing a new user.

        We abuse the TransactionLog table to store the details of newly
        created users because our derived services need information about
        users as well as their files.

        A TransactionLog representing a newly created user will have
        no node_id, volume_id, generation or path. And its owner_id will be
        the ID of the newly created user.
        """
        extra_data = json.dumps(dict(
            name=user.username, visible_name=user.visible_name))
        txlog = cls(
            None, user.id, None, cls.OP_USER_CREATED, None, None,
            extra_data=extra_data.decode('ascii'))
        store = get_filesync_store()
        return store.add(txlog)
コード例 #6
0
    def test_over_quota(self):
        """Test that 0 bytes free (versus a negative number) is reported
        when over quota."""
        self.usr0.update(max_storage_bytes=2 ** 16)
        # need to do something that just can't happen normally
        store = dbmanager.get_filesync_store()
        info = store.get(StorageUserInfo, 0)
        info.used_storage_bytes = 2 ** 17
        store.commit()

        @defer.inlineCallbacks
        def do_test(client):
            """Do the actual test."""
            yield client.dummy_authenticate("open sesame")
            result = yield client.get_free_space(request.ROOT)
            self.assertEqual(0, result.free_bytes)
            self.assertEqual(request.ROOT, result.share_id)
        return self.callback_test(do_test,
                                  add_default_callbacks=True)
コード例 #7
0
 def setUp(self):
     """Set up."""
     super(StorageDALTestCase, self).setUp()
     self.obj_factory = DAOObjectFactory()
     self.store = get_filesync_store()
     self.save_utils_set_public_uuid = utils.set_public_uuid
コード例 #8
0
 def store(self):
     """Get the store, dont cache, threading issues may arise"""
     return get_filesync_store()
コード例 #9
0
ファイル: model.py プロジェクト: CSRedRat/magicicada-server
    def bootstrap(cls, user):
        store = get_filesync_store()
        cls.record_user_created(user)
        # Number of TransactionLog rows we inserted.
        rows = 1

        for udf in store.find(UserVolume, owner_id=user.id,
                              status=STATUS_LIVE):
            cls.record_udf_created(udf)
            rows += 1

        # If this becomes a problem it can be done as a single INSERT, but
        # we'd need to duplicate the get_public_file_url() in plpython.
        udf_join = Join(
            StorageObject,
            UserVolume, StorageObject.volume_id == UserVolume.id)
        conditions = [StorageObject.kind == StorageObject.DIRECTORY,
                      StorageObject.owner_id == user.id,
                      StorageObject.status == STATUS_LIVE,
                      StorageObject._publicfile_id != None,  # NOQA
                      UserVolume.status == STATUS_LIVE]
        dirs = store.using(udf_join).find(StorageObject, *conditions)
        for directory in dirs:
            cls.record_public_access_change(directory)
            rows += 1

        # XXX: If this takes too long it will get killed by the transaction
        # watcher. Need to check what's the limit we could have here.
        # Things to check:
        #  * If it still takes too long, we could find out the IDs of the
        #    people who have a lot of music/photos, run it just for them with
        #    the transaction watcher disabled and then run it for everybody
        #    else afterwards.
        query = """
            INSERT INTO txlog_transaction_log (
                node_id, owner_id, volume_id, op_type, path, generation,
                mimetype, extra_data)
            SELECT O.id, O.owner_id, O.volume_id, ?,
                   txlog_path_join(O.path, O.name), O.generation, O.mimetype,
                   txlog_get_extra_data_to_recreate_file_1(
                        kind, size, storage_key, publicfile_id,
                        public_uuid, content_hash,
                        extract(epoch from O.when_created at time zone 'UTC'),
                        extract(epoch
                                from O.when_last_modified at time zone 'UTC'),
                        UserDefinedFolder.path
                    ) as extra_data
            FROM Object as O
            JOIN UserDefinedFolder on UserDefinedFolder.id = O.volume_id
            LEFT JOIN ContentBlob on ContentBlob.hash = O.content_hash
            WHERE
                O.kind != 'Directory'
                AND O.owner_id = ?
                AND O.status = 'Live'
                AND UserDefinedFolder.status = 'Live'
            """
        params = (cls.OPERATIONS_MAP[cls.OP_PUT_CONTENT], user.id)
        rows += store.execute(query, params=params).rowcount

        # Cannot create TransactionLogs for Shares in a single INSERT like
        # above because TransactionLogs and Shares live in separate databases.
        share_join = LeftJoin(
            Share, StorageUser, Share.shared_to == StorageUser.id)
        conditions = [Share.shared_by == user.id,
                      Share.status == STATUS_LIVE,
                      Share.accepted == True]  # NOQA
        shares = get_filesync_store().using(share_join).find(
            Share, *conditions)
        for share in shares:
            cls.record_share_accepted(share)
            rows += 1

        return rows