def test_result_id_gets_id_without_dereferencing(self):
        result = InitMonitoredResult(metric=self.metric,
                                     trial_results=[self.trial_result],
                                     success=True)
        result.save()
        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result],
                               state=JobState.DONE,
                               result=result)
        obj.save()

        # Set up mocks
        dereferenced = False

        def init_side_effect(_):
            nonlocal dereferenced
            dereferenced = True

        InitMonitoredResult.side_effect = init_side_effect

        # Delete and reload the object to reset the references to object ids
        del obj
        obj = next(Task.objects.all())

        # Autoload the model types
        _ = obj.result_id
        self.assertFalse(dereferenced)

        # Clean up
        InitMonitoredResult.side_effect = None
Exemple #2
0
    def setUp(self):
        # Remove the collections as the start of the test, so that we're sure it's empty
        Task._mongometa.collection.drop()
        TrialResult._mongometa.collection.drop()
        MetricResult._mongometa.collection.drop()

        # Create run system tasks and trial results
        self.run_system_tasks = {}
        self.trial_result_groups = []
        for image_collection in self.image_collections:
            for system in self.systems:
                trial_result_group = []
                for repeat in range(3):
                    trial_result = mock_types.MockTrialResult(
                        system=system,
                        image_source=image_collection,
                        success=True)
                    trial_result.save()
                    trial_result_group.append(trial_result)

                    task = RunSystemTask(system=system,
                                         image_source=image_collection,
                                         state=JobState.DONE,
                                         result=trial_result)
                    task.save()
                    self.run_system_tasks[
                        trial_result.identifier] = task.identifier
                self.trial_result_groups.append(trial_result_group)

        self.measure_trial_tasks = {}
        self.metric_results = {}
        for group_id, trial_result_group in enumerate(
                self.trial_result_groups):
            self.measure_trial_tasks[group_id] = []
            self.metric_results[group_id] = []
            for metric in self.metrics:
                metric_result = mock_types.MockMetricResult(
                    metric=metric,
                    trial_results=trial_result_group,
                    success=True)
                metric_result.save()
                self.metric_results[group_id].append(metric_result.identifier)

                task = MeasureTrialTask(metric=metric,
                                        trial_results=trial_result_group,
                                        state=JobState.DONE,
                                        result=metric_result)
                task.save()
                self.measure_trial_tasks[group_id].append(task.identifier)

            for metric in self.unfinished_metrics:
                task = MeasureTrialTask(metric=metric,
                                        trial_results=trial_result_group,
                                        state=JobState.UNSTARTED)
                task.save()
                self.measure_trial_tasks[group_id].append(task.identifier)
 def test_run_task_saves_result(self):
     metric_result = mock.create_autospec(mock_types.MockMetricResult)
     self.metric.measure_results = lambda _: metric_result
     subject = MeasureTrialTask(metric=self.metric,
                                trial_results=[self.trial_result],
                                state=JobState.RUNNING,
                                node_id='test',
                                job_id=1)
     self.assertFalse(metric_result.save.called)
     subject.run_task(self.path_manager)
     self.assertTrue(metric_result.save.called)
    def test_stores_and_loads_unstarted(self):
        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result],
                               state=JobState.UNSTARTED)
        obj.save()

        # Load all the entities
        all_entities = list(Task.objects.all())
        self.assertGreaterEqual(len(all_entities), 1)
        self.assertEqual(all_entities[0], obj)
        all_entities[0].delete()
 def test_run_task_records_returned_metric_result(self):
     metric_result = mock.create_autospec(mock_types.MockMetricResult)
     self.metric.measure_results = lambda _: metric_result
     subject = MeasureTrialTask(metric=self.metric,
                                trial_results=[self.trial_result],
                                state=JobState.RUNNING,
                                node_id='test',
                                job_id=1)
     self.assertIsNone(subject.result)
     subject.run_task(self.path_manager)
     self.assertTrue(subject.is_finished)
     self.assertEqual(subject.result, metric_result)
 def test_run_task_records_metric_returned_none(self):
     self.metric.measure_results = lambda _: None
     subject = MeasureTrialTask(metric=self.metric,
                                trial_results=[self.trial_result],
                                state=JobState.RUNNING,
                                node_id='test',
                                job_id=1)
     self.assertIsNone(subject.result)
     subject.run_task(self.path_manager)
     self.assertTrue(subject.is_finished)
     self.assertIsNotNone(subject.result)
     self.assertFalse(subject.result.success)
     self.assertIsNotNone(subject.result.message)
     self.assertEqual(self.metric, subject.result.metric)
     self.assertEqual([self.trial_result], subject.result.trial_results)
 def test_result_id_is_result_primary_key(self):
     result = mock_types.MockMetricResult()
     result.pk = bson.ObjectId()
     obj = MeasureTrialTask(metric=self.metric,
                            trial_results=[self.trial_result],
                            state=JobState.DONE,
                            result=result)
     self.assertEqual(result.pk, obj.result_id)
Exemple #8
0
    def setUp(self):
        # Remove the collections as the start of the test, so that we're sure it's empty
        Task._mongometa.collection.drop()
        MetricResult._mongometa.collection.drop()

        self.metric_results = []
        self.measure_trial_tasks = {}
        for metric in self.metrics:
            for trial_result in self.trials:
                metric_result = mock_types.MockMetricResult(
                    metric=metric, trial_results=[trial_result], success=True)
                metric_result.save()
                self.metric_results.append(metric_result)

                task = MeasureTrialTask(metric=metric,
                                        trial_results=[trial_result],
                                        state=JobState.DONE,
                                        result=metric_result)
                task.save()
                self.measure_trial_tasks[
                    metric_result.identifier] = task.identifier
    def test_run_task_records_exception_during_execution_and_re_raises(self):
        message = 'No mercy. No respite.'

        def bad_measure_results(*_, **__):
            raise ValueError(message)

        self.metric.measure_results = bad_measure_results
        subject = MeasureTrialTask(metric=self.metric,
                                   trial_results=[self.trial_result],
                                   state=JobState.RUNNING,
                                   node_id='test',
                                   job_id=1)
        self.assertIsNone(subject.result)
        with self.assertRaises(ValueError):
            subject.run_task(self.path_manager)
        self.assertTrue(subject.is_finished)
        self.assertIsNotNone(subject.result)
        self.assertFalse(subject.result.success)
        self.assertIsNotNone(subject.result.message)
        self.assertIn(message, subject.result.message)
        self.assertEqual(self.metric, subject.result.metric)
        self.assertEqual([self.trial_result], subject.result.trial_results)
Exemple #10
0
def get_measure_trial_task(
        trial_results: typing.Union[typing.Iterable[TrialResult],
                                    typing.Iterable[bson.ObjectId]],
        metric: typing.Union[Metric, bson.ObjectId],
        num_cpus: int = 1,
        num_gpus: int = 0,
        memory_requirements: str = '3GB',
        expected_duration: str = '1:00:00') -> MeasureTrialTask:
    """
    Get a task to benchmark a trial result.
    Most of the parameters are resources requirements passed to the job system.
    :param trial_results: The ids of the trial results to benchmark
    :param metric: The id of the benchmark to use
    :param num_cpus: The number of CPUs required for the job. Default 1.
    :param num_gpus: The number of GPUs required for the job. Default 0.
    :param memory_requirements: The memory required for this job. Default 3 GB.
    :param expected_duration: The expected time this job will take. Default 1 hour.
    :return: A BenchmarkTrialTask
    """
    trial_results = [
        trial_result.identifier
        if isinstance(trial_result, TrialResult) else trial_result
        for trial_result in trial_results
    ]
    if TrialResult.objects.raw({
            '_id': {
                '$in': trial_results
            }
    }).count() < len(trial_results):
        raise ValueError(
            f'trial_results "{trial_results}" contains invalid trial result id'
        )
    if isinstance(metric, Metric):
        metric = metric.identifier
    elif Metric.objects.raw({'_id': metric}).count() < 1:
        raise ValueError(f"metric \"{metric}\" is not a valid Metric id")
    try:
        return MeasureTrialTask.objects.get({
            'trial_results': {
                '$all': trial_results
            },
            'metric': metric
        })
    except MeasureTrialTask.DoesNotExist:
        return MeasureTrialTask(trial_results=trial_results,
                                metric=metric,
                                state=JobState.UNSTARTED,
                                num_cpus=int(num_cpus),
                                num_gpus=int(num_gpus),
                                memory_requirements=str(memory_requirements),
                                expected_duration=str(expected_duration))
    def test_load_referenced_models_does_nothing_to_models_that_are_already_objects(
            self, mock_autoload):
        # Set up objects
        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result],
                               state=JobState.DONE,
                               result=self.metric_result)
        obj.save()

        self.assertFalse(mock_autoload.called)
        obj.load_referenced_models()
        self.assertFalse(mock_autoload.called)
    def test_get_result_autoloads_model_type_before_dereferencing(
            self, mock_autoload):
        # Set up objects
        result = InitMonitoredResult(metric=self.metric,
                                     trial_results=[self.trial_result],
                                     success=True)
        result.save()

        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result],
                               state=JobState.DONE,
                               result=result)
        obj.save()

        # Set up mocks
        loaded = False
        constructed = False
        loaded_first = False

        def autoload_side_effect(model, *_, **__):
            nonlocal loaded
            if model == MetricResult:
                loaded = True

        mock_autoload.side_effect = autoload_side_effect

        def init_result_side_effect(_):
            nonlocal loaded, constructed, loaded_first
            constructed = True
            if loaded:
                loaded_first = True

        InitMonitoredResult.side_effect = init_result_side_effect

        # Delete and reload the object to reset the references to object ids
        del obj
        obj = next(Task.objects.all())

        # get the result
        obj.get_result()
        self.assertTrue(mock_autoload.called)
        self.assertEqual(mock.call(MetricResult, [result.pk]),
                         mock_autoload.call_args)
        self.assertTrue(constructed)
        self.assertTrue(loaded)
        self.assertTrue(loaded_first)

        # Clean up
        InitMonitoredResult.side_effect = None
Exemple #13
0
    def setUp(self):
        # Remove the collections as the start of the test, so that we're sure it's empty
        Task._mongometa.collection.drop()
        MetricResult._mongometa.collection.drop()
        mock_types.MockMetric._mongometa.collection.drop()

        self.metrics = [mock_types.MockMetric() for _ in range(2)]
        for metric in self.metrics:
            metric.save()

        self.measure_trial_tasks = {}
        self.metric_results = {}
        for metric in self.metrics:
            self.measure_trial_tasks[metric.identifier] = []
            self.metric_results[metric.identifier] = []

            for trial_result in self.trials:
                metric_result = mock_types.MockMetricResult(
                    metric=metric, trial_results=[trial_result], success=True)
                metric_result.save()
                self.metric_results[metric.identifier].append(
                    metric_result.identifier)

                task = MeasureTrialTask(metric=metric,
                                        trial_results=[trial_result],
                                        state=JobState.DONE,
                                        result=metric_result)
                task.save()
                self.measure_trial_tasks[metric.identifier].append(
                    task.identifier)

            for trial_result in self.unfinished_trials:
                task = MeasureTrialTask(metric=metric,
                                        trial_results=[trial_result],
                                        state=JobState.UNSTARTED)
                task.save()
                self.measure_trial_tasks[metric.identifier].append(
                    task.identifier)
    def test_load_referenced_models_loads_metric_and_trial_models(
            self, mock_autoload):
        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result],
                               state=JobState.UNSTARTED)
        obj.save()

        # Delete and reload the object to reset the references to object ids
        del obj
        obj = next(Task.objects.all())

        # Autoload the model types
        self.assertFalse(mock_autoload.called)
        obj.load_referenced_modules()
        self.assertTrue(mock_autoload.called)
        self.assertIn(mock.call(Metric, [self.metric.pk]),
                      mock_autoload.call_args_list)
        self.assertIn(mock.call(TrialResult, [self.trial_result.pk]),
                      mock_autoload.call_args_list)
    def test_load_referenced_models_autoloads_models_that_are_just_ids(
            self, mock_autoload):
        # Set up objects
        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result],
                               state=JobState.DONE,
                               result=self.metric_result)
        obj.save()
        obj_id = obj.pk
        del obj  # Clear existing references, which should reset the references to ids

        obj = MeasureTrialTask.objects.get({'_id': obj_id})
        self.assertFalse(mock_autoload.called)
        obj.load_referenced_models()
        self.assertTrue(mock_autoload.called)
        self.assertIn(mock.call(Metric, [self.metric.pk]),
                      mock_autoload.call_args_list)
        self.assertIn(mock.call(TrialResult, [self.trial_result.pk]),
                      mock_autoload.call_args_list)
        self.assertIn(mock.call(MetricResult, [self.metric_result.pk]),
                      mock_autoload.call_args_list)
Exemple #16
0
    def test_removes_incomplete_measure_results(self):
        # Make tasks of each type, some unstarted, some running, some complete
        unstarted = []
        running = []
        complete = []
        for system in self.systems:
            for image_collection in self.image_collections:
                trial_result = mock_types.MockTrialResult(
                    system=system, image_source=image_collection, success=True)
                trial_result.save()

                for metric in self.metrics:
                    task = MeasureTrialTask(metric=metric,
                                            trial_results=[trial_result],
                                            state=JobState.UNSTARTED)
                    task.save()
                    unstarted.append(task)

                    task = MeasureTrialTask(metric=metric,
                                            trial_results=[trial_result],
                                            state=JobState.RUNNING,
                                            job_id=10,
                                            node_id='this')
                    task.save()
                    running.append(task)

                    metric_result = mock_types.MockMetricResult(
                        metric=metric,
                        trial_results=[trial_result],
                        success=True)
                    metric_result.save()
                    task = MeasureTrialTask(metric=metric,
                                            trial_results=[trial_result],
                                            state=JobState.DONE,
                                            result=metric_result)
                    task.save()
                    complete.append(task)

        self.assertEqual(
            len(complete) + len(running) + len(unstarted),
            Task.objects.all().count())
        invalidate.invalidate_incomplete_tasks()
        self.assertEqual(len(complete), Task.objects.all().count())
        for unstarted_task in unstarted:
            self.assertEqual(
                0,
                Task.objects.raw({
                    '_id': unstarted_task.pk
                }).count())
        for running_task in running:
            self.assertEqual(
                0,
                Task.objects.raw({
                    '_id': running_task.pk
                }).count())
        for complete_task in complete:
            self.assertEqual(
                1,
                Task.objects.raw({
                    '_id': complete_task.pk
                }).count())

        # Clean up after ourselves
        Task.objects.all().delete()
        MetricResult.objects.all().delete()
        TrialResult.objects.all().delete()
    def test_saving_throws_exeption_if_required_fields_are_missing(self):
        obj = MeasureTrialTask(
            # metric=self.metric,
            trial_results=[self.trial_result],
            state=JobState.UNSTARTED)
        with self.assertRaises(ValidationError):
            obj.save()

        obj = MeasureTrialTask(
            metric=self.metric,
            # trial_results=[self.trial_result],
            state=JobState.UNSTARTED)
        with self.assertRaises(ValidationError):
            obj.save()

        obj = MeasureTrialTask(metric=self.metric,
                               trial_results=[self.trial_result]
                               # state=JobState.UNSTARTED
                               )
        with self.assertRaises(ValidationError):
            obj.save()
 def test_result_id_is_none_if_result_is_none(self):
     obj = MeasureTrialTask(metric=self.metric,
                            trial_results=[self.trial_result],
                            state=JobState.UNSTARTED)
     self.assertIsNone(obj.result_id)