Exemple #1
0
    def test_run_task_records_exception_during_execution_and_re_raises(self):
        message = 'No mercy. No respite.'

        def bad_compare_trials(*_, **__):
            raise ValueError(message)

        self.metric.compare_trials = bad_compare_trials
        subject = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.RUNNING,
            node_id='test',
            job_id=1
        )
        self.assertIsNone(subject.result)
        with self.assertRaises(ValueError):
            subject.run_task(self.path_manager)
        self.assertTrue(subject.is_finished)
        self.assertIsNotNone(subject.result)
        self.assertFalse(subject.result.success)
        self.assertIsNotNone(subject.result.message)
        self.assertIn(message, subject.result.message)
        self.assertEqual(self.metric, subject.result.metric)
        self.assertEqual([self.trial_result_1], subject.result.trial_results_1)
        self.assertEqual([self.trial_result_2], subject.result.trial_results_2)
Exemple #2
0
    def test_result_id_gets_id_without_dereferencing(self):
        result = InitMonitoredResult(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            success=True
        )
        result.save()
        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.DONE,
            result=result
        )
        obj.save()

        # Set up mocks
        dereferenced = False

        def init_side_effect(_):
            nonlocal dereferenced
            dereferenced = True
        InitMonitoredResult.side_effect = init_side_effect

        # Delete and reload the object to reset the references to object ids
        del obj
        obj = next(Task.objects.all())

        # Autoload the model types
        _ = obj.result_id
        self.assertFalse(dereferenced)

        # Clean up
        InitMonitoredResult.side_effect = None
Exemple #3
0
    def test_stores_and_loads_unstarted(self):
        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.UNSTARTED
        )
        obj.save()

        # Load all the entities
        all_entities = list(Task.objects.all())
        self.assertGreaterEqual(len(all_entities), 1)
        self.assertEqual(all_entities[0], obj)
        all_entities[0].delete()
Exemple #4
0
 def test_run_task_saves_result(self):
     comparison_result = mock.create_autospec(TrialComparisonResult)
     self.metric.compare_trials = lambda *args, **kwargs: comparison_result
     subject = CompareTrialTask(
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         state=JobState.RUNNING,
         node_id='test',
         job_id=1
     )
     self.assertFalse(comparison_result.save.called)
     subject.run_task(self.path_manager)
     self.assertTrue(comparison_result.save.called)
Exemple #5
0
 def test_run_task_records_returned_metric_result(self):
     comparison_result = mock.create_autospec(TrialComparisonResult)
     self.metric.compare_trials = lambda *args, **kwargs: comparison_result
     subject = CompareTrialTask(
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         state=JobState.RUNNING,
         node_id='test',
         job_id=1
     )
     self.assertIsNone(subject.result)
     subject.run_task(self.path_manager)
     self.assertTrue(subject.is_finished)
     self.assertEqual(subject.result, comparison_result)
Exemple #6
0
 def test_result_id_is_none_if_result_is_none(self):
     obj = CompareTrialTask(
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         state=JobState.UNSTARTED
     )
     self.assertIsNone(obj.result_id)
Exemple #7
0
 def test_run_task_records_metric_returned_none(self):
     self.metric.compare_trials = lambda *args, **kwargs: None
     subject = CompareTrialTask(
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         state=JobState.RUNNING,
         node_id='test',
         job_id=1
     )
     self.assertIsNone(subject.result)
     subject.run_task(self.path_manager)
     self.assertTrue(subject.is_finished)
     self.assertIsNotNone(subject.result)
     self.assertFalse(subject.result.success)
     self.assertIsNotNone(subject.result.message)
     self.assertEqual(self.metric, subject.result.metric)
     self.assertEqual([self.trial_result_1], subject.result.trial_results_1)
     self.assertEqual([self.trial_result_2], subject.result.trial_results_2)
Exemple #8
0
 def test_run_task_records_unable_to_measure_trial_in_group_2(self):
     self.metric.is_trial_appropriate_for_second = lambda _: False
     subject = CompareTrialTask(
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         state=JobState.RUNNING,
         node_id='test',
         job_id=1
     )
     self.assertIsNone(subject.result)
     subject.run_task(self.path_manager)
     self.assertTrue(subject.is_finished)
     self.assertIsNotNone(subject.result)
     self.assertFalse(subject.result.success)
     self.assertIsNotNone(subject.result.message)
     self.assertEqual(self.metric, subject.result.metric)
     self.assertEqual([self.trial_result_1], subject.result.trial_results_1)
     self.assertEqual([self.trial_result_2], subject.result.trial_results_2)
Exemple #9
0
    def test_load_referenced_models_autoloads_models_that_are_just_ids(self, mock_autoload):
        # Set up objects
        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.DONE,
            result=self.metric_result
        )
        obj.save()
        obj_id = obj.pk
        del obj     # Clear existing references, which should reset the references to ids

        obj = CompareTrialTask.objects.get({'_id': obj_id})
        self.assertFalse(mock_autoload.called)
        obj.load_referenced_models()
        self.assertTrue(mock_autoload.called)
        self.assertIn(mock.call(TrialComparisonMetric, [self.metric.pk]), mock_autoload.call_args_list)
        self.assertIn(mock.call(TrialComparisonResult, [self.metric_result.pk]), mock_autoload.call_args_list)

        # Need to manually search, because the order of the keys may change
        found_trial_result_call = False
        for call_args in mock_autoload.call_args_list:
            model, ids = call_args[0]
            if model is TrialResult:
                found_trial_result_call = True
                self.assertEqual({self.trial_result_1.pk, self.trial_result_2.pk}, set(ids))
        self.assertTrue(found_trial_result_call)
Exemple #10
0
 def test_result_id_is_result_primary_key(self):
     comparison_result = TrialComparisonResult(
         _id=bson.ObjectId(),
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         success=True
     )
     obj = CompareTrialTask(
         metric=self.metric,
         trial_results_1=[self.trial_result_1],
         trial_results_2=[self.trial_result_2],
         state=JobState.DONE,
         result=comparison_result
     )
     self.assertEqual(comparison_result.pk, obj.result_id)
Exemple #11
0
    def test_get_result_autoloads_model_type_before_dereferencing(self, mock_autoload):
        # Set up objects
        result = InitMonitoredResult(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            success=True
        )
        result.save()

        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.DONE,
            result=result
        )
        obj.save()

        # Set up mocks
        loaded = False
        constructed = False
        loaded_first = False

        def autoload_side_effect(model, *_, **__):
            nonlocal loaded
            if model == TrialComparisonResult:
                loaded = True
        mock_autoload.side_effect = autoload_side_effect

        def init_result_side_effect(_):
            nonlocal loaded, constructed, loaded_first
            constructed = True
            if loaded:
                loaded_first = True
        InitMonitoredResult.side_effect = init_result_side_effect

        # Delete and reload the object to reset the references to object ids
        del obj
        obj = next(Task.objects.all())

        # get the result
        obj.get_result()
        self.assertTrue(mock_autoload.called)
        self.assertEqual(mock.call(TrialComparisonResult, [result.pk]), mock_autoload.call_args)
        self.assertTrue(constructed)
        self.assertTrue(loaded)
        self.assertTrue(loaded_first)

        # Clean up
        InitMonitoredResult.side_effect = None
Exemple #12
0
    def test_load_referenced_models_does_nothing_to_models_that_are_already_objects(self, mock_autoload):
        # Set up objects
        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.DONE,
            result=self.metric_result
        )
        obj.save()

        self.assertFalse(mock_autoload.called)
        obj.load_referenced_models()
        self.assertFalse(mock_autoload.called)
Exemple #13
0
    def test_load_referenced_models_loads_metric_and_trial_models(self, mock_autoload):
        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.UNSTARTED
        )
        obj.save()

        # Delete and reload the object to reset the references to object ids
        del obj
        obj = next(Task.objects.all())

        # Autoload the model types
        self.assertFalse(mock_autoload.called)
        obj.load_referenced_modules()
        self.assertTrue(mock_autoload.called)
        self.assertEqual(mock.call(TrialComparisonMetric, [self.metric.pk]), mock_autoload.call_args_list[0])
        # Order is uncertain for the ids in the second argument, assert separately
        self.assertEqual(TrialResult, mock_autoload.call_args_list[1][0][0])
        self.assertEqual({self.trial_result_1.pk, self.trial_result_2.pk}, set(mock_autoload.call_args_list[1][0][1]))
Exemple #14
0
    def test_saving_throws_exeption_if_required_fields_are_missing(self):
        obj = CompareTrialTask(
            # metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.UNSTARTED
        )
        with self.assertRaises(ValidationError):
            obj.save()

        obj = CompareTrialTask(
            metric=self.metric,
            # trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2],
            state=JobState.UNSTARTED
        )
        with self.assertRaises(ValidationError):
            obj.save()

        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            # trial_results_2=[self.trial_result_2],
            state=JobState.UNSTARTED
        )
        with self.assertRaises(ValidationError):
            obj.save()

        obj = CompareTrialTask(
            metric=self.metric,
            trial_results_1=[self.trial_result_1],
            trial_results_2=[self.trial_result_2]
            # state=JobState.UNSTARTED
        )
        with self.assertRaises(ValidationError):
            obj.save()
Exemple #15
0
def get_trial_comparison_task(
        trial_results_1: typing.Union[typing.Iterable[TrialResult],
                                      typing.Iterable[bson.ObjectId]],
        trial_results_2: typing.Union[typing.Iterable[TrialResult],
                                      typing.Iterable[bson.ObjectId]],
        comparison_metric: typing.Union[TrialComparisonMetric, bson.ObjectId],
        num_cpus: int = 1,
        num_gpus: int = 0,
        memory_requirements: str = '3GB',
        expected_duration: str = '1:00:00') -> CompareTrialTask:
    """
    Get a task to compare two trial results.
    Most of the parameters are resources requirements passed to the job system.
    :param trial_results_1: The ids of the trial results to compare
    :param trial_results_2: The id of the reference trial results to compare
    :param comparison_metric: The id of the comparison benchmark to use
    :param num_cpus: The number of CPUs required for the job. Default 1.
    :param num_gpus: The number of GPUs required for the job. Default 0.
    :param memory_requirements: The memory required for this job. Default 3 GB.
    :param expected_duration: The expected time this job will take. Default 1 hour.
    :return: A CompareTrialTask
    """
    trial_results_1 = [
        trial_result.identifier
        if isinstance(trial_result, TrialResult) else trial_result
        for trial_result in trial_results_1
    ]
    trial_results_2 = [
        trial_result.identifier
        if isinstance(trial_result, TrialResult) else trial_result
        for trial_result in trial_results_2
    ]
    if TrialResult.objects.raw({
            '_id': {
                '$in': trial_results_1
            }
    }).count() < len(trial_results_1):
        raise ValueError(
            f'trial_results_1 \"{trial_results_1}\" contains invalid trial result id'
        )
    if TrialResult.objects.raw({
            '_id': {
                '$in': trial_results_2
            }
    }).count() < len(trial_results_2):
        raise ValueError(
            f'trial_results_2 \"{trial_results_2}\" contains invalid trial result id'
        )
    if isinstance(comparison_metric, TrialComparisonMetric):
        comparison_metric = comparison_metric.identifier
    elif TrialComparisonMetric.objects.raw({
            '_id': comparison_metric
    }).count() < 1:
        raise ValueError(
            f"metric \"{comparison_metric}\" is not a valid Metric id")

    try:
        return CompareTrialTask.objects.get({
            'trial_results_1': trial_results_1,
            'trial_results_2': trial_results_2,
            'metric': comparison_metric
        })
    except CompareTrialTask.DoesNotExist:
        return CompareTrialTask(trial_results_1=trial_results_1,
                                trial_results_2=trial_results_2,
                                metric=comparison_metric,
                                state=JobState.UNSTARTED,
                                num_cpus=int(num_cpus),
                                num_gpus=int(num_gpus),
                                memory_requirements=str(memory_requirements),
                                expected_duration=str(expected_duration))