コード例 #1
0
    def test_reindexation(self):
        # Adding an addon.
        addon = amo.tests.addon_factory()
        self.refresh()

        # The search should return the addon.
        wanted = [addon]
        self.check_results(wanted)

        # Current indices with aliases.
        old_indices = self.get_indices_aliases()

        # This is to start a reindexation in the background.
        class ReindexThread(threading.Thread):
            def __init__(self):
                self.stdout = StringIO.StringIO()
                super(ReindexThread, self).__init__()

            def run(self):
                management.call_command('reindex', stdout=self.stdout)
        t = ReindexThread()
        t.start()

        # Wait for the reindex in the thread to flag the database.
        # The database transaction isn't shared with the thread, so force the
        # commit.
        while t.is_alive() and not is_reindexing_amo():
            connection._commit()
            connection.clean_savepoints()

        # We should still be able to search in the foreground while the reindex
        # is being done in the background. We should also be able to index new
        # documents, and they should not be lost.
        old_addons_count = len(wanted)
        while t.is_alive() and len(wanted) < old_addons_count + 3:
            wanted.append(amo.tests.addon_factory())
            connection._commit()
            connection.clean_savepoints()
            amo.search.get_es().refresh()
            self.check_results(wanted)

        if len(wanted) == old_addons_count:
            raise AssertionError('Could not index objects in foreground while '
                                 'reindexing in the background.')

        t.join()  # Wait for the thread to finish.
        t.stdout.seek(0)
        stdout = t.stdout.read()
        assert 'Reindexation done' in stdout, stdout

        # The reindexation is done, let's double check we have all our docs.
        connection._commit()
        connection.clean_savepoints()
        amo.search.get_es().refresh()
        self.check_results(wanted)

        # New indices have been created, and aliases now point to them.
        new_indices = self.get_indices_aliases()
        eq_(len(old_indices), len(new_indices), (old_indices, new_indices))
        assert new_indices != old_indices, stdout
コード例 #2
0
    def test_reindexation(self):
        # Adding an addon.
        addon = amo.tests.addon_factory()
        self.refresh()

        # The search should return the addon.
        wanted = [addon]
        self.check_results(wanted)

        # Current indices with aliases.
        old_indices = self.get_indices_aliases()

        # This is to start a reindexation in the background.
        class ReindexThread(threading.Thread):
            def __init__(self):
                self.stdout = StringIO.StringIO()
                super(ReindexThread, self).__init__()

            def run(self):
                management.call_command('reindex', stdout=self.stdout)
        t = ReindexThread()
        t.start()

        # Wait for the reindex in the thread to flag the database.
        # The database transaction isn't shared with the thread, so force the
        # commit.
        while t.is_alive() and not is_reindexing_amo():
            connection._commit()
            connection.clean_savepoints()

        # We should still be able to search in the foreground while the reindex
        # is being done in the background. We should also be able to index new
        # documents, and they should not be lost.
        old_addons_count = len(wanted)
        while t.is_alive() and len(wanted) < old_addons_count + 3:
            wanted.append(amo.tests.addon_factory())
            connection._commit()
            connection.clean_savepoints()
            amo.search.get_es().refresh()
            self.check_results(wanted)

        if len(wanted) == old_addons_count:
            raise AssertionError('Could not index objects in foreground while '
                                 'reindexing in the background.')

        t.join()  # Wait for the thread to finish.
        t.stdout.seek(0)
        stdout = t.stdout.read()
        assert 'Reindexation done' in stdout, stdout

        # The reindexation is done, let's double check we have all our docs.
        connection._commit()
        connection.clean_savepoints()
        amo.search.get_es().refresh()
        self.check_results(wanted)

        # New indices have been created, and aliases now point to them.
        new_indices = self.get_indices_aliases()
        eq_(len(old_indices), len(new_indices), (old_indices, new_indices))
        assert new_indices != old_indices
コード例 #3
0
    def _test_reindexation(self):
        # Current indices with aliases.
        old_indices = self.get_indices_aliases()

        # This is to start a reindexation in the background.
        class ReindexThread(threading.Thread):
            def __init__(self):
                self.stdout = StringIO.StringIO()
                super(ReindexThread, self).__init__()

            def run(self):
                # We need to wait at least a second, to make sure the alias
                # name is going to be different, since we already create an
                # alias in setUpClass.
                time.sleep(1)
                management.call_command('reindex', stdout=self.stdout)

        t = ReindexThread()
        t.start()

        # Wait for the reindex in the thread to flag the database.
        # The database transaction isn't shared with the thread, so force the
        # commit.
        while t.is_alive() and not is_reindexing_amo():
            connection._commit()
            connection.clean_savepoints()

        # We should still be able to search in the foreground while the reindex
        # is being done in the background. We should also be able to index new
        # documents, and they should not be lost.
        old_addons_count = len(self.expected)
        while t.is_alive() and len(self.expected) < old_addons_count + 3:
            self.expected.append(addon_factory())
            connection._commit()
            connection.clean_savepoints()
            self.refresh()
            self.check_results(self.expected)

        if len(self.expected) == old_addons_count:
            raise AssertionError('Could not index objects in foreground while '
                                 'reindexing in the background.')

        t.join()  # Wait for the thread to finish.
        t.stdout.seek(0)
        stdout = t.stdout.read()
        assert 'Reindexation done' in stdout, stdout

        # The reindexation is done, let's double check we have all our docs.
        connection._commit()
        connection.clean_savepoints()
        self.refresh()
        self.check_results(self.expected)

        # New indices have been created, and aliases now point to them.
        new_indices = self.get_indices_aliases()
        assert len(new_indices)
        assert old_indices != new_indices, (stdout, old_indices, new_indices)

        self.check_settings(new_indices)
コード例 #4
0
ファイル: test_commands.py プロジェクト: bqbn/addons-server
    def _test_reindexation(self):
        # Current indices with aliases.
        old_indices = self.get_indices_aliases()

        # This is to start a reindexation in the background.
        class ReindexThread(threading.Thread):
            def __init__(self):
                self.stdout = StringIO.StringIO()
                super(ReindexThread, self).__init__()

            def run(self):
                # We need to wait at least a second, to make sure the alias
                # name is going to be different, since we already create an
                # alias in setUpClass.
                time.sleep(1)
                management.call_command('reindex', stdout=self.stdout)
        t = ReindexThread()
        t.start()

        # Wait for the reindex in the thread to flag the database.
        # The database transaction isn't shared with the thread, so force the
        # commit.
        while t.is_alive() and not is_reindexing_amo():
            connection._commit()
            connection.clean_savepoints()

        # We should still be able to search in the foreground while the reindex
        # is being done in the background. We should also be able to index new
        # documents, and they should not be lost.
        old_addons_count = len(self.expected)
        while t.is_alive() and len(self.expected) < old_addons_count + 3:
            self.expected.append(addon_factory())
            connection._commit()
            connection.clean_savepoints()
            self.refresh()
            self.check_results(self.expected)

        if len(self.expected) == old_addons_count:
            raise AssertionError('Could not index objects in foreground while '
                                 'reindexing in the background.')

        t.join()  # Wait for the thread to finish.
        t.stdout.seek(0)
        stdout = t.stdout.read()
        assert 'Reindexation done' in stdout, stdout

        # The reindexation is done, let's double check we have all our docs.
        connection._commit()
        connection.clean_savepoints()
        self.refresh()
        self.check_results(self.expected)

        # New indices have been created, and aliases now point to them.
        new_indices = self.get_indices_aliases()
        assert len(new_indices)
        assert old_indices != new_indices, (stdout, old_indices, new_indices)

        self.check_settings(new_indices)
コード例 #5
0
    def test_reindexation(self):
        # adding a web app
        webapp2 = self._create_app('neat app 2')
        self.refresh()

        # this search should return both apps
        r = self.check_results({'sort': 'popularity'},
                               [webapp2.pk, self.webapp.pk])

        # adding 5 more apps
        webapps = [self._create_app('moarneatapp %d' % i)
                   for i in range(5)]
        self.refresh()

        # XXX is there a cleaner way ?
        # all I want is to have those webapp in the DB
        # so the reindex command sees them
        connection._commit()
        connection.clean_savepoints()

        # right now, the DB should be composed of
        # two indexes, and two aliases, let's check
        # we have two aliases
        aliases = call_es('_aliases').json()
        old_aliases = [(index, aliases['aliases'].keys()[0])
                       for index, aliases in aliases.items()
                       if len(aliases['aliases']) > 0 and
                       index.startswith('test')]
        old_aliases.sort()

        # now doing a reindexation in a background process
        args = [sys.executable, 'manage.py', 'reindex', '--prefix=test_',
                '--settings=%s' % self.settings]

        indexer = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   cwd=settings.ROOT)

        try:
            # we should be able to continue some searching in the foreground
            # and always get our documents
            #
            # we should also be able to index new documents, and
            # they should not be lost
            count = 1
            wanted = [app.pk for app in webapps] + [webapp2.pk, self.webapp.pk]

            # let's add more apps, and also do some searches
            while indexer.poll() is None and count < 8:
                r = self.client.get(urlparams(self.url, sort='popularity'),
                                    follow=True)
                eq_(r.status_code, 200, str(r.content))
                got = self.get_results(r)
                got.sort()
                self.assertEqual(len(got), len(wanted), (got, wanted))
                wanted.append(self._create_app('moar %d' % count).pk)
                self.refresh()
                connection._commit()
                connection.clean_savepoints()
                count += 1
                time.sleep(.1)

            if count < 3:
                raise AssertionError("Could not index enough objects for the "
                                     "test to be meaningful.")
        except Exception:
            indexer.terminate()
            raise

        stdout, stderr = indexer.communicate()
        self.assertTrue('Reindexation done' in stdout, stdout + '\n' + stderr)

        amo.search.get_es().refresh()
        # the reindexation is done, let's double check we have all our docs
        self.check_results({'sort': 'popularity'}, wanted)

        # let's check the aliases as well, we should have 2
        aliases = call_es('_aliases').json()
        new_aliases = [(index, aliases['aliases'].keys()[0])
                       for index, aliases in aliases.items()
                       if len(aliases['aliases']) > 0 and
                       index.startswith('test')]
        new_aliases.sort()
        self.assertTrue(len(new_aliases), 2)

        # and they should be new aliases
        self.assertNotEqual(new_aliases, old_aliases)
コード例 #6
0
    def test_reindexation(self):
        # adding a web app
        webapp2 = self._create_app('neat app 2')
        self.refresh()

        # this search should return both apps
        r = self.check_results({'sort': 'popularity'},
                               [webapp2.pk, self.webapp.pk])

        # adding 5 more apps
        webapps = [self._create_app('moarneatapp %d' % i) for i in range(5)]
        self.refresh()

        # XXX is there a cleaner way ?
        # all I want is to have those webapp in the DB
        # so the reindex command sees them
        connection._commit()
        connection.clean_savepoints()

        # right now, the DB should be composed of
        # two indexes, and two aliases, let's check
        # we have two aliases
        aliases = call_es('_aliases').json
        old_aliases = [
            (index, aliases['aliases'].keys()[0])
            for index, aliases in aliases.items()
            if len(aliases['aliases']) > 0 and index.startswith('test')
        ]
        old_aliases.sort()

        # now doing a reindexation in a background process
        args = [
            sys.executable, 'manage.py', 'reindex', '--prefix=test_',
            '--settings=%s' % self.settings
        ]

        indexer = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   cwd=settings.ROOT)

        try:
            # we should be able to continue some searching in the foreground
            # and always get our documents
            #
            # we should also be able to index new documents, and
            # they should not be lost
            count = 1
            wanted = [app.pk for app in webapps] + [webapp2.pk, self.webapp.pk]

            # let's add more apps, and also do some searches
            while indexer.poll() is None and count < 8:
                r = self.client.get(urlparams(self.url, sort='popularity'),
                                    follow=True)
                eq_(r.status_code, 200, str(r.content))
                got = self.get_results(r)
                got.sort()
                self.assertEqual(len(got), len(wanted), (got, wanted))
                wanted.append(self._create_app('moar %d' % count).pk)
                self.refresh()
                connection._commit()
                connection.clean_savepoints()
                count += 1
                time.sleep(.1)

            if count < 3:
                raise AssertionError("Could not index enough objects for the "
                                     "test to be meaningful.")
        except Exception:
            indexer.terminate()
            raise

        stdout, stderr = indexer.communicate()
        self.assertTrue('Reindexation done' in stdout, stdout + '\n' + stderr)

        elasticutils.get_es().refresh()
        # the reindexation is done, let's double check we have all our docs
        self.check_results({'sort': 'popularity'}, wanted)

        # let's check the aliases as well, we should have 2
        aliases = call_es('_aliases').json
        new_aliases = [
            (index, aliases['aliases'].keys()[0])
            for index, aliases in aliases.items()
            if len(aliases['aliases']) > 0 and index.startswith('test')
        ]
        new_aliases.sort()
        self.assertTrue(len(new_aliases), 2)

        # and they should be new aliases
        self.assertNotEqual(new_aliases, old_aliases)