def test_resume_migration(self):

        class IncompleteDPC(migrate.DocumentProcessorController):

            def _processing_complete(self):
                self.document_iterator.discard_state()

        with tempdir() as tmp:
            filename = join(tmp, "file.txt")
            with replattr(migrate, "DocumentProcessorController", IncompleteDPC):
                # interrupted migration
                migrated1, skipped = MIGRATIONS[self.slug].migrate(filename)
                self.assertGreaterEqual(migrated1, self.test_size)
                self.assertFalse(skipped)

            # resumed migration: all docs already migrated, so BlobMeta records
            # exist, but should not cause errors on attempting to insert them
            migrated2, skipped = MIGRATIONS[self.slug].migrate(filename)
            self.assertEqual(migrated1, migrated2)
            self.assertFalse(skipped)

            mod.BlobMigrationState.objects.get(slug=self.slug)

        parent_ids = chain(
            (doc.id for doc in self.couch_docs),
            (doc.parent_id for doc in self.sql_docs),
        )

        # should have one blob per parent
        for parent_id in parent_ids:
            db = get_db_alias_for_partitioned_doc(parent_id)
            metas = list(BlobMeta.objects.using(db).filter(parent_id=parent_id))
            self.assertEqual(len(metas), 1, metas)
Example #2
0
def test_get_completions():
    @async_test
    async def test(input_value, expected_result):
        server = object()
        with test_command():

            @command.command(String("value"))
            def count(editor, args):
                return result(value=args.value)

            res = await mod.get_completions(server, [input_value])
            eq(res, expected_result)

    yield test, "cm", result([item("cmd ", 0, is_completion=True)], "cm")
    yield test, "cmd", result([item("a", 4), item("b", 4)],
                              "cmd ",
                              placeholder="cmd a")
    yield test, "cmd ", result([item("a", 4), item("b", 4)],
                               "cmd ",
                               placeholder="cmd a")
    yield test, "cmd a", result([item("a", 4)], "cmd a")
    yield test, "c", result([
        item("cmd ", 0, is_completion=True),
        item("count ", 0, is_completion=True),
    ], "c")

    def err_completions(self, arg):
        raise Exception("boink!")

    with replattr(String, "get_completions", err_completions):
        yield test, "count error", result(["boink!"])
Example #3
0
def stdfake():
    class fake(object):
        stdout = StringIO()
        stderr = StringIO()

    with replattr((sys, "stdout", fake.stdout), (sys, "stderr", fake.stderr)):
        yield fake
Example #4
0
 def test_copy_blob_masks_old_blob(self):
     content = BytesIO(b"fs content")
     meta = self.fsdb.put(content, meta=new_meta())
     content.seek(0)
     self.db.copy_blob(content, key=meta.key)
     self.assertEndsWith(self.fsdb.get_path(key=meta.key), "/" + meta.key)
     with replattr(self.fsdb, "get", blow_up, sigcheck=False):
         with self.assertRaises(Boom):
             self.fsdb.get(key=meta.key)
         with self.db.get(key=meta.key) as fh:
             self.assertEqual(fh.read(), b"fs content")
Example #5
0
 def test_copy_blob_masks_old_blob(self):
     content = BytesIO(b"fs content")
     meta = self.fsdb.put(content, meta=new_meta())
     content.seek(0)
     self.db.copy_blob(content, key=meta.key)
     self.assertEndsWith(self.fsdb.get_path(key=meta.key), "/" + meta.key)
     with replattr(self.fsdb, "get", blow_up, sigcheck=False):
         with self.assertRaises(Boom):
             self.fsdb.get(key=meta.key)
         with self.db.get(key=meta.key) as fh:
             self.assertEqual(fh.read(), b"fs content")
Example #6
0
async def get_completions(input_value, editor=None):
    async def no_history(server, command_name, argstr=""):
        return []

    if editor is None:
        editor = FakeEditor()
    srv = object()
    with replattr(
        (server, "Editor", lambda srv: editor),
        (server, "get_history", no_history),
    ):
        return await server.get_completions(srv, [input_value])
Example #7
0
def test_profile_decorator():
    output = StringIO()
    args = []

    @profile
    def func(arg):
        args.append(arg)

    with replattr(sys.stderr, "write", output.write):
        func(1)
    eq(args, [1])
    eq(output.getvalue(), Regex(r"test_decorators.py:\d+\(func\)"))
Example #8
0
 def test_copy_blob_masks_old_blob(self):
     content = StringIO(b"fs content")
     info = self.fsdb.put(content, "test")
     content.seek(0)
     self.db.copy_blob(content, info, DEFAULT_BUCKET)
     self.assertEndsWith(
         self.fsdb.get_path(info.name), "/" + self.db.get_path(info.name))
     with replattr(self.fsdb, "get", blow_up, sigcheck=False):
         with self.assertRaises(Boom):
             self.fsdb.get(info.name)
         with self.db.get(info.name) as fh:
             self.assertEqual(fh.read(), b"fs content")
 def test_copy_blob_masks_old_blob(self):
     content = StringIO(b"fs content")
     info = self.fsdb.put(content, "test")
     content.seek(0)
     self.db.copy_blob(content, info, DEFAULT_BUCKET)
     self.assertEndsWith(self.fsdb.get_path(info.identifier),
                         "/" + self.db.get_path(info.identifier))
     with replattr(self.fsdb, "get", blow_up, sigcheck=False):
         with self.assertRaises(Boom):
             self.fsdb.get(info.identifier)
         with self.db.get(info.identifier) as fh:
             self.assertEqual(fh.read(), b"fs content")
Example #10
0
    def make_unmigrated(media_class, filename, data):
        media = media_class.get_by_data(data)
        if media._id:
            media.delete()
        media = media_class.get_by_data(data)
        assert not media._id, media.aux_media

        class OldAttachmentMedia(media_class):
            def put_attachment(self, *args, **kw):
                return super(BlobMixin, self).put_attachment(*args, **kw)

        with replattr(media, "__class__", OldAttachmentMedia):
            media.attach_data(data, filename)
        return media
Example #11
0
    def make_unmigrated(media_class, filename, data):
        media = media_class.get_by_data(data)
        if media._id:
            media.delete()
        media = media_class.get_by_data(data)
        assert not media._id, media.aux_media

        class OldAttachmentMedia(media_class):
            def put_attachment(self, *args, **kw):
                return super(BlobMixin, self).put_attachment(*args, **kw)

        with replattr(media, "__class__", OldAttachmentMedia):
            media.attach_data(data, filename)
        return media
Example #12
0
    def do_failed_migration(self, docs, modify_doc):
        self.docs_to_delete.extend(docs)
        test_types = {d.doc_type for d in docs}
        if test_types != self.doc_types:
            raise Exception(
                "bad test: must have at least one document per doc "
                "type (got: {})".format(test_types))

        # verify: attachments are in couch, not blob db
        for doc in docs:
            self.assertGreaterEqual(len(doc._attachments), 1)
            self.assertEqual(len(doc.external_blobs), 0)

        # hook doc_migrator_class to simulate concurrent modification
        modified = set()
        docs_by_id = {d._id: d for d in docs}
        migrator = mod.MIGRATIONS[self.slug]

        class ConcurrentModify(migrator.doc_migrator_class):
            def _do_migration(self, doc):
                if doc["_id"] not in modified and doc["_id"] in docs_by_id:
                    # do concurrent modification
                    modify_doc(docs_by_id[doc["_id"]])
                    modified.add(doc["_id"])
                return super(ConcurrentModify, self)._do_migration(doc)

        with replattr(migrator, "doc_migrator_class", ConcurrentModify):
            # do migration
            migrated, skipped = migrator.migrate(max_retry=0)
            self.assertGreaterEqual(skipped, len(docs))

        self.assertEqual(modified, {d._id for d in docs})

        # verify: migration state not set when docs are skipped
        with self.assertRaises(mod.BlobMigrationState.DoesNotExist):
            mod.BlobMigrationState.objects.get(slug=self.slug)

        for doc, (num_attachments, num_blobs) in docs.items():
            exp = type(doc).get(doc._id)
            if not num_attachments:
                raise Exception("bad test: modify function should leave "
                                "unmigrated attachments")
            # verify: attachments were not migrated
            print(exp)
            self.assertEqual(len(exp._attachments), num_attachments)
            self.assertEqual(len(exp.external_blobs), num_blobs)
Example #13
0
    def do_failed_migration(self, docs, modify_doc):
        self.docs_to_delete.extend(docs)
        test_types = {d.doc_type for d in docs}
        if test_types != self.doc_types:
            raise Exception("bad test: must have at least one document per doc "
                            "type (got: {})".format(test_types))

        # verify: attachments are in couch, not blob db
        for doc in docs:
            self.assertGreaterEqual(len(doc._attachments), 1)
            self.assertEqual(len(doc.external_blobs), 0)

        # hook doc_migrator_class to simulate concurrent modification
        modified = set()
        docs_by_id = {d._id: d for d in docs}
        migrator = mod.MIGRATIONS[self.slug]

        class ConcurrentModify(migrator.doc_migrator_class):
            def _do_migration(self, doc):
                if doc["_id"] not in modified and doc["_id"] in docs_by_id:
                    # do concurrent modification
                    modify_doc(docs_by_id[doc["_id"]])
                    modified.add(doc["_id"])
                return super(ConcurrentModify, self)._do_migration(doc)

        with replattr(migrator, "doc_migrator_class", ConcurrentModify):
            # do migration
            migrated, skipped = migrator.migrate(max_retry=0)
            self.assertGreaterEqual(skipped, len(docs))

        self.assertEqual(modified, {d._id for d in docs})

        # verify: migration state not set when docs are skipped
        with self.assertRaises(mod.BlobMigrationState.DoesNotExist):
            mod.BlobMigrationState.objects.get(slug=self.slug)

        for doc, (num_attachments, num_blobs) in docs.items():
            exp = type(doc).get(doc._id)
            if not num_attachments:
                raise Exception("bad test: modify function should leave "
                                "unmigrated attachments")
            # verify: attachments were not migrated
            print(exp)
            self.assertEqual(len(exp._attachments), num_attachments)
            self.assertEqual(len(exp.external_blobs), num_blobs)
Example #14
0
def test_command(*args, name="cmd", with_history=False):
    async def no_history(server, command_name, argstr=""):
        return []

    if not args:
        args = Choice("a b", name="value"),
    replaces = []
    if not with_history:
        replaces.append((server, "get_history", no_history))
    with replattr((command, "REGISTRY", {}), *replaces):

        @command.command(name=name, has_placeholder_item=False, *args)
        async def cmd(editor, args):
            if args.value == "error":
                return error("error")
            return result(value=args.value)

        yield
Example #15
0
async def do_command(input_value, editor=None):
    def reraise(message):
        if sys.exc_info()[1] is not None:
            raise sys.exc_info()[1]
        raise Error(message)

    def do_not_update_history(server, input_value, command):
        pass

    if editor is None:
        editor = FakeEditor()
    srv = object()
    with replattr(
        (server, "Editor", lambda srv: editor),
        (server, "error", reraise),
        (history, "update_history", do_not_update_history),
    ):
        return await server.do_command(srv, [input_value])
Example #16
0
def fake_history(cache=None):
    def async_do(proxy):
        path = str(proxy)
        server, params = proxy._resolve()
        server["calls"].append(path)

    async def get(proxy):
        path = str(proxy)
        server, params = proxy._resolve()
        server["calls"].append(path)
        return server.get(path, path)

    with replattr(
        (history, "async_do", async_do),
        (history, "cache", cache or {}),
        (jsproxy, "_get", get),
    ):
        yield
Example #17
0
    def test_migrate_with_concurrent_modification(self):
        # setup data
        saved = SavedBasicExport(configuration=_mk_config())
        saved.save()
        name = saved.get_attachment_name()
        new_payload = 'something new'
        old_payload = 'something old'
        super(BlobMixin, saved).put_attachment(old_payload, name)
        super(BlobMixin, saved).put_attachment(old_payload, "other")
        saved.save()

        # verify: attachments are in couch
        self.assertEqual(len(saved._attachments), 2)
        self.assertEqual(len(saved.external_blobs), 0)

        modified = []
        print_status = mod.print_status

        # setup concurrent modification
        def modify_doc_and_print_status(num, total):
            if not modified:
                # do concurrent modification
                doc = SavedBasicExport.get(saved._id)
                doc.set_payload(new_payload)
                doc.save()
                modified.append(True)
            print_status(num, total)

        # hook print_status() call to simulate concurrent modification
        with replattr(mod, "print_status", modify_doc_and_print_status):
            # do migration
            migrated, skipped = mod.MIGRATIONS[self.slug].migrate()
            self.assertGreaterEqual(skipped, 1)

        # verify: migration state not set when docs are skipped
        with self.assertRaises(mod.BlobMigrationState.DoesNotExist):
            mod.BlobMigrationState.objects.get(slug=self.slug)

        # verify: attachments were not migrated
        exp = SavedBasicExport.get(saved._id)
        self.assertEqual(len(exp._attachments), 1, exp._attachments)
        self.assertEqual(len(exp.external_blobs), 1, exp.external_blobs)
        self.assertEqual(exp.get_payload(), new_payload)
        self.assertEqual(exp.fetch_attachment("other"), old_payload)
Example #18
0
    def test_migrate_saved_exports(self):
        # setup data
        saved = SavedBasicExport(configuration=_mk_config())
        saved.save()
        payload = 'something small and simple'
        name = saved.get_attachment_name()
        super(BlobMixin, saved).put_attachment(payload, name)
        saved.save()

        # verify: attachment is in couch and migration not complete
        self.assertEqual(len(saved._attachments), 1)
        self.assertEqual(len(saved.external_blobs), 0)

        with tempdir() as tmp, replattr(SavedBasicExport, "migrating_blobs_from_couch", True):
            filename = join(tmp, "file.txt")

            # do migration
            migrated, skipped = mod.MIGRATIONS[self.slug].migrate(filename)
            self.assertGreaterEqual(migrated, 1)

            # verify: migration state recorded
            mod.BlobMigrationState.objects.get(slug=self.slug)

            # verify: migrated data was written to the file
            with open(filename) as fh:
                lines = list(fh)
            doc = {d["_id"]: d for d in (json.loads(x) for x in lines)}[saved._id]
            self.assertEqual(doc["_rev"], saved._rev)
            self.assertEqual(len(lines), migrated, lines)

        # verify: attachment was moved to blob db
        exp = SavedBasicExport.get(saved._id)
        self.assertNotEqual(exp._rev, saved._rev)
        self.assertEqual(len(exp.blobs), 1, repr(exp.blobs))
        self.assertFalse(exp._attachments, exp._attachments)
        self.assertEqual(len(exp.external_blobs), 1)
        self.assertEqual(exp.get_payload(), payload)
Example #19
0
def stdfake():
    class fake(object):
        stdout = StringIO()
        stderr = StringIO()
    with replattr((sys, "stdout", fake.stdout), (sys, "stderr", fake.stderr)):
        yield fake
Example #20
0
 def test(input, output, *args):
     if input.startswith("/"):
         input = tmp + "/"
     with replattr(os.path, "expanduser", expanduser):
         check(input, output, *args)
Example #21
0
 async def test(input, output):
     if input.startswith("/"):
         input = tmp + "/"
     with replattr(os.path, "expanduser", expanduser):
         arg = await mod.Arg(field, input, 0, None)
         eq_(await field.get_completions(arg), output)
Example #22
0
def test_File():
    field = File('path')
    eq_(str(field), 'path')
    eq_(repr(field), "File('path')")

    with tempdir() as tmp:
        os.mkdir(join(tmp, "dir"))
        os.mkdir(join(tmp, "space dir"))
        for path in [
                "dir/a.txt",
                "dir/b.txt",
                "dir/B file",
                ".hidden",
                "file.txt",
                "file.doc",
                "space dir/file",
        ]:
            assert not isabs(path), path
            with open(join(tmp, path), "w"):
                pass

        test = make_consume_checker(field)
        yield test, "relative.txt", 0, ("relative.txt", 13)

        test = make_completions_checker(field)
        yield test, "", []

        project_path = join(tmp, "dir")
        editor = FakeEditor(join(tmp, "dir/file.txt"), project_path)
        field = await_coroutine(field.with_context(editor))

        test = make_completions_checker(field)
        yield test, ".../", ["a.txt", "B file", "b.txt"], 4
        with replattr(editor, "_project_path", project_path + "/"):
            yield test, ".../", ["a.txt", "B file", "b.txt"], 4
            yield test, "...//", ["a.txt", "B file", "b.txt"], 5
        with replattr((editor, "_project_path", None),
                      (editor, "_file_path", join(tmp, "space dir/file")),
                      sigcheck=False):
            yield test, "", ["file"], 0
            yield test, "../", ["dir/", "file.doc", "file.txt",
                                "space dir/"], 3
            # yield test, "..//", ["dir", "file.doc", "file.txt", "space dir"], 4
            yield test, "../f", ["file.doc", "file.txt"], 3
            yield test, "../dir/", ["a.txt", "B file", "b.txt"], 7

        test = make_arg_string_checker(field)
        yield test, "/str", "/str"
        yield test, "/a b", '"/a b"'
        yield test, os.path.expanduser("~/a b"), '"~/a b"'
        yield test, join(tmp, "dir/file"), "file"
        yield test, join(tmp, "dir/a b"), '"a b"'
        yield test, join(tmp, "file"), join(tmp, "file")
        yield test, "arg/", Error("not a file: path='arg/'")

        test = make_consume_checker(field)
        yield test, '', 0, (None, 1)
        yield test, 'a', 0, (join(tmp, 'dir/a'), 2)
        yield test, 'abc', 0, (join(tmp, 'dir/abc'), 4)
        yield test, 'abc ', 0, (join(tmp, 'dir/abc'), 4)
        yield test, 'file.txt', 0, (join(tmp, 'dir/file.txt'), 9)
        yield test, '../file.txt', 0, (join(tmp, 'dir/../file.txt'), 12)
        yield test, '/file.txt', 0, ('/file.txt', 10)
        yield test, '~/file.txt', 0, (os.path.expanduser('~/file.txt'), 11)
        yield test, '...', 0, (join(tmp, 'dir'), 4)
        yield test, '.../file.txt', 0, (join(tmp, 'dir/file.txt'), 13)
        yield test, '"ab c"', 0, (join(tmp, 'dir/ab c'), 6)
        yield test, "'ab c'", 0, (join(tmp, 'dir/ab c'), 6)
        yield test, "'ab c/'", 0, (join(tmp, 'dir/ab c/'), 7)

        # completions
        def expanduser(path):
            if path.startswith("~"):
                if len(path) == 1:
                    return tmp
                assert path.startswith("~/"), path
                return tmp + path[1:]
            return path

        @async_test
        async def test(input, output):
            if input.startswith("/"):
                input = tmp + "/"
            with replattr(os.path, "expanduser", expanduser):
                arg = await mod.Arg(field, input, 0, None)
                eq_(await field.get_completions(arg), output)

        yield test, "", ["a.txt", "B file", "b.txt"]
        yield test, "a", ["a.txt"]
        yield test, "a.txt", ["a.txt"]
        yield test, "b", ["B file", "b.txt"]
        yield test, "B", ["B file"]
        yield test, "..", ["../"]
        yield test, "../", ["dir/", "file.doc", "file.txt", "space dir/"]
        yield test, "../.", [".hidden"]
        yield test, "...", [".../"]
        yield test, ".../", ["a.txt", "B file", "b.txt"]
        yield test, "../dir", ["dir/"]
        yield test, "../dir/", ["a.txt", "B file", "b.txt"]
        yield test, "../sp", ["space dir/"]
        yield test, "../space\\ d", ["space dir/"]
        yield test, "../space\\ dir", ["space dir/"]
        yield test, "../space\\ dir/", ["file"]
        yield test, "val", []
        yield test, "/", ["dir/", "file.doc", "file.txt", "space dir/"]
        yield test, "~", ["~/"]
        yield test, "~/", ["dir/", "file.doc", "file.txt", "space dir/"]

        # delimiter completion
        @async_test
        async def test(input, output, start=0):
            arg = await mod.Arg(field, input, 0, None)
            words = await field.get_completions(arg)
            assert all(isinstance(w, CompleteWord) for w in words), \
                repr([w for w in words if not isinstance(w, CompleteWord)])
            eq_([w.complete() for w in words], output)
            eq_([w.start for w in words], [start] * len(words), words)

        yield test, "", ["a.txt ", "B\\ file ", "b.txt "]
        yield test, "x", []
        yield test, "..", ["../"]
        yield test, "../", ["dir/", "file.doc ", "file.txt ",
                            "space\\ dir/"], 3
        yield test, "../dir", ["dir/"], 3
        yield test, "../di", ["dir/"], 3
        yield test, "../sp", ["space\\ dir/"], 3
        yield test, "../space\\ d", ["space\\ dir/"], 3
        yield test, "../space\\ dir", ["space\\ dir/"], 3
        yield test, ".../", ["a.txt ", "B\\ file ", "b.txt "], 4
        yield test, "../space\\ dir/", ["file "], 14
        yield test, "~", ["~/"], None

        field = File('dir', directory=True)
        eq_(str(field), 'dir')
        eq_(repr(field), "File('dir', directory=True)")
        field = await_coroutine(field.with_context(editor))

        test = make_consume_checker(field)
        yield test, '', 0, (None, 1)
        yield test, 'a', 0, (join(tmp, 'dir/a'), 2)
        yield test, 'abc', 0, (join(tmp, 'dir/abc'), 4)
        yield test, 'abc ', 0, (join(tmp, 'dir/abc'), 4)
        yield test, 'abc/', 0, (join(tmp, 'dir/abc/'), 5)
        yield test, '...', 0, (join(tmp, 'dir'), 4)
        yield test, '.../abc/', 0, (join(tmp, 'dir/abc/'), 9)

        test = make_completions_checker(field)
        yield test, "", [], 0
        yield test, "a", [], 0
        yield test, "..", ["../"], 0
        yield test, "../", ["dir/", "space dir/"], 3

        test = make_arg_string_checker(field)
        yield test, "/a", "/a"
        yield test, "/a/", "/a/"
        yield test, "/dir/a", "/dir/a"
        yield test, "/dir/a/", "/dir/a/"

        field = File('dir', default="~/dir")
        check = make_completions_checker(field)

        def test(input, output, *args):
            if input.startswith("/"):
                input = tmp + "/"
            with replattr(os.path, "expanduser", expanduser):
                check(input, output, *args)

        yield test, "", [], 0

        test = make_placeholder_checker(field)
        yield test, "", 0, ("", "~/dir")
        yield test, " ", 0, ("~/dir", "")
Example #23
0
def setup_editor(srv=None):
    with replattr(
        (mod, "expanduser", lambda path: "/home/user"),
        (jsproxy, "_get", fake_get),
    ):
        yield mod.Editor(srv or {})