Beispiel #1
0
 def test_extend_differing_stage(self, shared_data):
     mirror = FakeStage('mirror')
     stage = FakeStage('stage')
     mirror_prj = ProjectIndexingInfo(stage=mirror, name='mirror_prj')
     stage_prj = ProjectIndexingInfo(stage=stage, name='stage_prj')
     with pytest.raises(ValueError, match="Project isn't from same index"):
         shared_data.extend([mirror_prj, stage_prj], 0)
Beispiel #2
0
    def test_mirror_priority(self, shared_data):
        mirror = FakeStage('mirror')
        stage = FakeStage('stage')
        mirror_prj = ProjectIndexingInfo(stage=mirror, name='mirror_prj')
        stage_prj = ProjectIndexingInfo(stage=stage, name='stage_prj')
        result = []

        def handler(is_from_mirror, serial, indexname, names):
            (name, ) = names
            result.append(name)

        # Regardless of the serial or add order, the stage should come first
        cases = [((mirror_prj, 0), (stage_prj, 0)),
                 ((mirror_prj, 1), (stage_prj, 0)),
                 ((mirror_prj, 0), (stage_prj, 1)),
                 ((stage_prj, 0), (mirror_prj, 0)),
                 ((stage_prj, 1), (mirror_prj, 0)),
                 ((stage_prj, 0), (mirror_prj, 1))]
        for (prj1, serial1), (prj2, serial2) in cases:
            shared_data.add(prj1, serial1)
            shared_data.add(prj2, serial2)
            assert shared_data.queue.qsize() == 2
            shared_data.process_next(handler)
            shared_data.process_next(handler)
            assert shared_data.queue.qsize() == 0
            assert result == ['stage_prj', 'mirror_prj']
            result.clear()
Beispiel #3
0
def devpiserver_mirror_initialnames(stage, projectnames):
    ix = get_indexer(stage.xom)
    threadlog.info("indexing '%s' mirror with %s projects", stage.name,
                   len(projectnames))
    ix.update_projects(
        ProjectIndexingInfo(stage=stage, name=name) for name in projectnames)
    threadlog.info("finished mirror indexing operation")
Beispiel #4
0
 def handler(self, is_from_mirror, serial, indexname, names):
     log.debug(
         "Got %s projects from %s at serial %s for indexing",
         len(names), indexname, serial)
     ix = get_indexer(self.xom)
     counter = itertools.count()
     project_ix = ix.get_project_ix()
     main_keys = project_ix.schema.names()
     writer = project_ix.writer()
     searcher = project_ix.searcher()
     try:
         with self.xom.keyfs.transaction(write=False) as tx:
             stage = self.xom.model.getstage(indexname)
             if stage is not None:
                 for name in names:
                     data = preprocess_project(
                         ProjectIndexingInfo(stage=stage, name=name))
                     # because we use the current transaction, we also
                     # use the current serial for indexing
                     ix._update_project(
                         data, tx.at_serial, counter, main_keys, writer,
                         searcher=searcher)
         count = next(counter)
     except Exception:
         writer.cancel()
         # let the queue handle retries
         raise
     else:
         log.debug("Committing %s new documents to search index." % count)
         writer.commit()
Beispiel #5
0
    def test_queue_projects_max_names(self, shared_data):
        shared_data.QUEUE_MAX_NAMES = 3
        mirror = FakeStage('mirror')
        mirror.serial = 0
        prjs = []
        for i in range(10):
            prjs.append(ProjectIndexingInfo(stage=mirror, name='prj%d' % i))

        result = []

        def handler(is_from_mirror, serial, indexname, names):
            result.append(names)

        class FakeSearcher:
            def document_number(self, path):
                return None

        shared_data.queue_projects(prjs, 0, FakeSearcher())
        assert shared_data.queue.qsize() == 4

        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 3
        assert result == [['prj0', 'prj1', 'prj2']]
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 2
        assert result == [['prj0', 'prj1', 'prj2'], ['prj3', 'prj4', 'prj5']]
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 1
        assert result == [['prj0', 'prj1', 'prj2'], ['prj3', 'prj4', 'prj5'],
                          ['prj6', 'prj7', 'prj8']]
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 0
        assert result == [['prj0', 'prj1', 'prj2'], ['prj3', 'prj4', 'prj5'],
                          ['prj6', 'prj7', 'prj8'], ['prj9']]
        assert shared_data.error_queue.qsize() == 0
Beispiel #6
0
    def test_extend_max_names(self, shared_data):
        shared_data.QUEUE_MAX_NAMES = 3
        mirror = FakeStage('mirror')
        prjs = []
        for i in range(10):
            prjs.append(ProjectIndexingInfo(stage=mirror, name='prj%d' % i))

        result = []

        def handler(is_from_mirror, serial, indexname, names):
            result.append(names)

        shared_data.extend(prjs, 0)
        assert shared_data.queue.qsize() == 4

        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 3
        assert result == [['prj0', 'prj1', 'prj2']]
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 2
        assert result == [['prj0', 'prj1', 'prj2'], ['prj3', 'prj4', 'prj5']]
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 1
        assert result == [['prj0', 'prj1', 'prj2'], ['prj3', 'prj4', 'prj5'],
                          ['prj6', 'prj7', 'prj8']]
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 0
        assert result == [['prj0', 'prj1', 'prj2'], ['prj3', 'prj4', 'prj5'],
                          ['prj6', 'prj7', 'prj8'], ['prj9']]
        assert shared_data.error_queue.qsize() == 0
Beispiel #7
0
def test_inheritance(xom):
    with xom.keyfs.transaction(write=True):
        user = xom.model.create_user("one", "one")
        prod = user.create_stage("prod")
        prod.set_versiondata({"name": "proj", "version": "1.0"})
        dev = user.create_stage("dev", bases=(prod.name, ))
        dev.set_versiondata({"name": "proj", "version": "1.1"})

    with xom.keyfs.transaction():
        stage = xom.model.getstage(dev.name)
        preprocess_project(ProjectIndexingInfo(stage=stage, name="proj"))
Beispiel #8
0
    def test_serial_priority(self, index_type, shared_data):
        stage = FakeStage(index_type)
        prj = ProjectIndexingInfo(stage=stage, name='prj')
        result = []

        def handler(is_from_mirror, serial, indexname, names):
            result.append(serial)

        # Later serials come first
        shared_data.add(prj, 1)
        shared_data.add(prj, 100)
        shared_data.add(prj, 10)
        assert shared_data.queue.qsize() == 3
        shared_data.process_next(handler)
        shared_data.process_next(handler)
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 0
        assert result == [100, 10, 1]
Beispiel #9
0
def index_project(stage, name):
    if stage is None:
        return
    ix = get_indexer(stage.xom)
    ix.update_projects([ProjectIndexingInfo(stage=stage, name=name)])
Beispiel #10
0
    def test_queue_projects_skip_existing(self, shared_data):
        """ For projects from mirrors the existing serial from the index
            is checked to skip reindexing projects which are already up to
            date.

            There was a bug where the used serial was overwritten during that
            check causing wrong entries in the queue.
        """
        class FakeSearcher:
            index = {}

            def document_number(self, path):
                if path in self.index:
                    return path

            def stored_fields(self, path):
                return {'serial': self.index[path]}

        searcher = FakeSearcher()

        result = []

        def handler(is_from_mirror, serial, indexname, names):
            if is_from_mirror and indexname == 'mirror':
                for project in names:
                    searcher.index['/%s/%s' % (indexname, project)] = serial
            result.append((is_from_mirror, serial, indexname, names))

        mirror = FakeStage('mirror')
        stage = FakeStage('stage')
        # add one project on the mirror at serial 0
        mirror.serials['mirror1'] = 0
        shared_data.queue_projects(
            [ProjectIndexingInfo(stage=mirror, name='mirror1')], 0, searcher)
        assert shared_data.queue.qsize() == 1
        while shared_data.queue.qsize():
            shared_data.process_next(handler)
        assert result == [(True, 0, 'mirror', ['mirror1'])]
        result.clear()
        # add another project on the mirror at serial 1 and re-add first project
        mirror.serials['mirror2'] = 1
        shared_data.queue_projects([
            ProjectIndexingInfo(stage=mirror, name='mirror1'),
            ProjectIndexingInfo(stage=mirror, name='mirror2')
        ], 1, searcher)
        assert shared_data.queue.qsize() == 1
        while shared_data.queue.qsize():
            shared_data.process_next(handler)
        assert result == [(True, 1, 'mirror', ['mirror2'])]
        result.clear()
        # add a project on the stage at serial 2 and re-add mirror projects
        stage.serials['prj'] = 2
        shared_data.queue_projects([
            ProjectIndexingInfo(stage=mirror, name='mirror1'),
            ProjectIndexingInfo(stage=mirror, name='mirror2'),
            ProjectIndexingInfo(stage=stage, name='prj')
        ], 2, searcher)
        assert shared_data.queue.qsize() == 1
        while shared_data.queue.qsize():
            shared_data.process_next(handler)
        assert result == [(False, 2, 'stage', ('prj', ))]
        result.clear()
        # now re-add everything at a later serial
        shared_data.queue_projects([
            ProjectIndexingInfo(stage=mirror, name='mirror1'),
            ProjectIndexingInfo(stage=mirror, name='mirror2'),
            ProjectIndexingInfo(stage=stage, name='prj')
        ], 3, searcher)
        assert shared_data.queue.qsize() == 1
        while shared_data.queue.qsize():
            shared_data.process_next(handler)
        assert result == [(False, 3, 'stage', ('prj', ))]
        result.clear()
Beispiel #11
0
    def test_error_queued(self, shared_data):
        stage = FakeStage('stage')
        prj = ProjectIndexingInfo(stage=stage, name='prj')

        next_ts_result = []
        handler_result = []
        orig_next_ts = shared_data.next_ts

        def next_ts(delay):
            next_ts_result.append(delay)
            return orig_next_ts(delay)

        shared_data.next_ts = next_ts

        def handler(is_from_mirror, serial, indexname, names):
            (name, ) = names
            handler_result.append(name)
            raise ValueError

        # No waiting on empty queues
        shared_data.QUEUE_TIMEOUT = 0
        shared_data.add(prj, 0)
        assert shared_data.queue.qsize() == 1
        assert shared_data.error_queue.qsize() == 0
        assert next_ts_result == []
        assert handler_result == []
        # An exception puts the info into the error queue
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 0
        assert shared_data.error_queue.qsize() == 1
        assert next_ts_result == [11]
        assert handler_result == ['prj']
        # Calling again doesn't change anything,
        # because there is a delay on errors
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 0
        assert shared_data.error_queue.qsize() == 1
        assert next_ts_result == [11]
        assert handler_result == ['prj']
        # When removing the delay check, the handler is called again and the
        # info re-queued with a longer delay
        shared_data.is_in_future = lambda ts: False
        shared_data.process_next(handler)
        assert shared_data.queue.qsize() == 0
        assert shared_data.error_queue.qsize() == 1
        assert next_ts_result == [
            11, 11 * shared_data.ERROR_QUEUE_DELAY_MULTIPLIER
        ]
        assert handler_result == ['prj', 'prj']
        while 1:
            # The delay is increased until reaching a maximum
            shared_data.process_next(handler)
            delay = next_ts_result[-1]
            if delay >= shared_data.ERROR_QUEUE_MAX_DELAY:
                break
        # then it will stay there
        shared_data.process_next(handler)
        delay = next_ts_result[-1]
        assert delay == shared_data.ERROR_QUEUE_MAX_DELAY
        # The number of retries should be reasonable.
        # Needs adjustment in case the ERROR_QUEUE_DELAY_MULTIPLIER
        # or ERROR_QUEUE_MAX_DELAY is changed
        assert len(next_ts_result) == 17
        assert len(handler_result) == 17