示例#1
0
 def test_poison_no_roots(self):
     self._add_active_workflow_tokens()
     self._archive_tokens()
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.poison([])
     tokens = analyzer.get_tokens()
     self._store.commit_tokens(updates=tokens)
     executed_jobs = self._simulate()
     self.assertEqual([], executed_jobs)
示例#2
0
 def test_poison_no_roots(self):
     self._add_active_workflow_tokens()
     self._archive_tokens()
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.poison([])
     tokens = analyzer.get_tokens()
     self._store.commit_tokens(updates=tokens)
     executed_jobs = self._simulate()
     self.assertEqual([], executed_jobs)
示例#3
0
 def test_change_job_histories(self):
     self._add_active_workflow_tokens()
     self._archive_tokens()
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.clear_job_histories()
     tokens = analyzer.get_tokens()
     self.assertLess(0, len(tokens))
     for token in tokens:
         job = pickle.loads(token.data)
         self.assertEqual([], job.history)
示例#4
0
 def test_change_instance(self):
     self._add_active_workflow_tokens()
     self._archive_tokens()
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.change_instance('321')
     tokens = analyzer.get_tokens()
     self.assertLess(0, len(tokens))
     for token in tokens:
         name = Name.from_job_token_name(token.name)
         self.assertEqual('321', name.instance)
示例#5
0
 def test_change_job_histories(self):
     self._add_active_workflow_tokens()
     self._archive_tokens()
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.clear_job_histories()
     tokens = analyzer.get_tokens()
     self.assertLess(0, len(tokens))
     for token in tokens:
         job = pickle.loads(token.data)
         self.assertEqual([], job.history)
示例#6
0
 def test_change_instance(self):
     self._add_active_workflow_tokens()
     self._archive_tokens()
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.change_instance('321')
     tokens = analyzer.get_tokens()
     self.assertLess(0, len(tokens))
     for token in tokens:
         name = Name.from_job_token_name(token.name)
         self.assertEqual('321', name.instance)
示例#7
0
    def test_poison_get_new_event_tokens(self):
        """Poison all top level jobs and get new event tokens."""
        self._add_active_workflow_tokens()
        tokens = self._archive_tokens()
        analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
        analyzer._filter_event_tokens(tokens)

        roots = []
        for job_index in range(0, 2 ** (AnalyzerTestCase._NUM_LEVELS - 1)):
            roots.append('job_0_{0:d}'.format(job_index))

        analyzer.poison(roots)
        tokens = analyzer.get_new_event_tokens()
        expected_num_new_event_tokens = 2 ** AnalyzerTestCase._NUM_LEVELS
        self.assertEqual(expected_num_new_event_tokens, len(tokens))
示例#8
0
    def test_poison_get_new_event_tokens(self):
        """Poison all top level jobs and get new event tokens."""
        self._add_active_workflow_tokens()
        tokens = self._archive_tokens()
        analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
        analyzer._filter_event_tokens(tokens)

        roots = []
        for job_index in range(0, 2**(AnalyzerTestCase._NUM_LEVELS - 1)):
            roots.append('job_0_%d' % job_index)

        analyzer.poison(roots)
        tokens = analyzer.get_new_event_tokens()
        expected_num_new_event_tokens = 2**AnalyzerTestCase._NUM_LEVELS
        self.assertEqual(expected_num_new_event_tokens, len(tokens))
示例#9
0
    def test_poison_all(self):
        """Poison all top level jobs."""
        self._add_active_workflow_tokens()
        self._archive_tokens()
        analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')

        roots = []
        for job_index in range(0, 2 ** (AnalyzerTestCase._NUM_LEVELS - 1)):
            roots.append('job_0_{0:d}'.format(job_index))

        analyzer.poison(roots)
        tokens = analyzer.get_tokens()
        self._store.commit_tokens(updates=tokens)
        executed_jobs = self._simulate()
        # We expect that every job has run.
        expected_num_executed_jobs = 2 ** (AnalyzerTestCase._NUM_LEVELS) - 1
        self.assertEqual(expected_num_executed_jobs, len(executed_jobs))
示例#10
0
    def test_poison_all(self):
        """Poison all top level jobs."""
        self._add_active_workflow_tokens()
        self._archive_tokens()
        analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')

        roots = []
        for job_index in range(0, 2**(AnalyzerTestCase._NUM_LEVELS - 1)):
            roots.append('job_0_%d' % job_index)

        analyzer.poison(roots)
        tokens = analyzer.get_tokens()
        self._store.commit_tokens(updates=tokens)
        executed_jobs = self._simulate()
        # We expect that every job has run.
        expected_num_executed_jobs = 2**(AnalyzerTestCase._NUM_LEVELS) - 1
        self.assertEqual(expected_num_executed_jobs, len(executed_jobs))
示例#11
0
    def test_poison_subset(self):
        """Poison every second top level job."""
        self._add_active_workflow_tokens()
        self._archive_tokens()
        analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')

        roots = []
        for job_index in range(0, 2**(AnalyzerTestCase._NUM_LEVELS - 1), 2):
            roots.append('job_0_%d' % job_index)

        analyzer.poison(roots)
        tokens = analyzer.get_tokens()
        self._store.commit_tokens(updates=tokens)
        executed_jobs = self._simulate()
        # We expect that every second job at the top level and every job at
        # a lower level was run.
        expected_num_executed_jobs = (
            2**(AnalyzerTestCase._NUM_LEVELS - 1) - 1 +
            2**(AnalyzerTestCase._NUM_LEVELS - 1) / 2)
        self.assertEqual(expected_num_executed_jobs, len(executed_jobs))
示例#12
0
    def test_poison_subset(self):
        """Poison every second top level job."""
        self._add_active_workflow_tokens()
        self._archive_tokens()
        analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')

        roots = []
        for job_index in range(0, 2 ** (AnalyzerTestCase._NUM_LEVELS - 1), 2):
            roots.append('job_0_%d' % job_index)

        analyzer.poison(roots)
        tokens = analyzer.get_tokens()
        self._store.commit_tokens(updates=tokens)
        executed_jobs = self._simulate()
        # We expect that every second job at the top level and every job at
        # a lower level was run.
        expected_num_executed_jobs = (
            2 ** (AnalyzerTestCase._NUM_LEVELS - 1) - 1 +
            2 ** (AnalyzerTestCase._NUM_LEVELS - 1) / 2)
        self.assertEqual(expected_num_executed_jobs, len(executed_jobs))
示例#13
0
 def test_poison_no_tokens(self):
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.poison([])
示例#14
0
 def test_poison_no_tokens(self):
     analyzer = Analyzer.from_store(self._store, 'some_workflow', '123')
     analyzer.poison([])