Ejemplo n.º 1
0
 def test_train_eval(self, *args):
     """Callback for train once and eval once."""
     args[0].return_value = 10
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_eval')
     make_directory(summary_dir)
     args[1].return_value = os.path.join(
         summary_dir,
         f'train_out.events.summary.{str(int(time.time()))}.ubuntu_lineage')
     train_callback = TrainLineage(summary_dir)
     train_callback.begin(RunContext(self.run_context))
     train_callback.end(RunContext(self.run_context))
     args[1].return_value = os.path.join(
         summary_dir,
         f'eval_out.events.summary.{str(int(time.time())+1)}.ubuntu_lineage'
     )
     eval_callback = EvalLineage(summary_dir)
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.78}
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     eval_run_context['step_num'] = 32
     eval_callback.end(RunContext(eval_run_context))
     res = get_summary_lineage(summary_dir)
     assert res.get('hyper_parameters', {}).get('loss_function') \
         == 'SoftmaxCrossEntropyWithLogits'
     assert res.get('algorithm', {}).get('network') == 'ResNet'
     if os.path.exists(summary_dir):
         shutil.rmtree(summary_dir)
Ejemplo n.º 2
0
    def test_multiple_trains(self, *args):
        """
        Callback TrainLineage and EvalLineage for multiple times.

        Write TrainLineage and EvalLineage in different files under same directory.
        EvalLineage log file end with '_lineage'.
        """
        args[0].return_value = 10
        for i in range(2):
            summary_record = SummaryRecord(SUMMARY_DIR_2,
                                           create_time=int(time.time()) + i)
            eval_record = SummaryRecord(SUMMARY_DIR_2,
                                        create_time=int(time.time() + 10) + i)
            args[1].return_value = os.path.join(
                SUMMARY_DIR_2,
                f'train_out.events.summary.{str(int(time.time()) + 2*i)}.ubuntu_lineage'
            )
            train_callback = TrainLineage(summary_record, True)
            train_callback.begin(RunContext(self.run_context))
            train_callback.end(RunContext(self.run_context))

            args[1].return_value = os.path.join(
                SUMMARY_DIR_2,
                f'eval_out.events.summary.{str(int(time.time())+ 2*i + 1)}.ubuntu_lineage'
            )
            eval_callback = EvalLineage(eval_record, True)
            eval_run_context = self.run_context
            eval_run_context['metrics'] = {'accuracy': 0.78 + i + 1}
            eval_run_context['valid_dataset'] = self.run_context[
                'train_dataset']
            eval_run_context['step_num'] = 32
            eval_callback.end(RunContext(eval_run_context))
        file_num = os.listdir(SUMMARY_DIR_2)
        assert len(file_num) == 8
Ejemplo n.º 3
0
 def test_eval_end(self):
     """Test the end function in EvalLineage."""
     eval_callback = EvalLineage(self.summary_record, True)
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.78}
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     eval_run_context['step_num'] = 32
     eval_callback.end(RunContext(eval_run_context))
Ejemplo n.º 4
0
 def test_raise_exception_init(self):
     """Test exception when error happened during the initialization process."""
     if os.path.exists(SUMMARY_DIR_3):
         shutil.rmtree(SUMMARY_DIR_3)
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     train_callback = TrainLineage('fake_summary_record', False)
     eval_callback = EvalLineage('fake_summary_record', False)
     train_callback.begin(RunContext(self.run_context))
     eval_callback.end(RunContext(self.run_context))
     file_num = os.listdir(SUMMARY_DIR_3)
     full_file_name = summary_record.full_file_name
     assert len(file_num) == 1
     assert os.path.isfile(full_file_name + "_lineage") is False
Ejemplo n.º 5
0
 def test_raise_exception(self):
     """Test exception when raise_exception is set True."""
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     full_file_name = summary_record.full_file_name
     assert os.path.isfile(full_file_name) is True
     assert os.path.isfile(full_file_name + "_lineage") is False
     train_callback = TrainLineage(summary_record, True)
     eval_callback = EvalLineage(summary_record, False)
     with self.assertRaises(LineageParamRunContextError):
         train_callback.begin(self.run_context)
         eval_callback.end(self.run_context)
     file_num = os.listdir(SUMMARY_DIR_3)
     assert len(file_num) == 1
     assert os.path.isfile(full_file_name + "_lineage") is False
Ejemplo n.º 6
0
    def test_train_multi_eval(self, *args):
        """Callback for train once and eval twice."""
        args[0].return_value = 10
        summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_multi_eval')
        make_directory(summary_dir)
        args[1].return_value = os.path.join(
            summary_dir, 'train_out.events.summary.1590107366.ubuntu_lineage')
        train_callback = TrainLineage(summary_dir, True)
        train_callback.begin(RunContext(self.run_context))
        train_callback.end(RunContext(self.run_context))

        args[1].return_value = os.path.join(
            summary_dir, 'eval_out.events.summary.1590107367.ubuntu_lineage')
        eval_callback = EvalLineage(summary_dir, True)
        eval_run_context = self.run_context
        eval_run_context['valid_dataset'] = self.run_context['train_dataset']
        eval_run_context['metrics'] = {'accuracy': 0.79}
        eval_callback.end(RunContext(eval_run_context))
        res = get_summary_lineage(summary_dir)
        assert res.get('metric', {}).get('accuracy') == 0.79

        args[1].return_value = os.path.join(
            summary_dir, 'eval_out.events.summary.1590107368.ubuntu_lineage')
        eval_callback = EvalLineage(summary_dir, True)
        eval_run_context = self.run_context
        eval_run_context['valid_dataset'] = self.run_context['train_dataset']
        eval_run_context['metrics'] = {'accuracy': 0.80}
        eval_callback.end(RunContext(eval_run_context))
        res = get_summary_lineage(summary_dir)
        assert res.get('metric', {}).get('accuracy') == 0.80
        if os.path.exists(summary_dir):
            shutil.rmtree(summary_dir)
Ejemplo n.º 7
0
 def test_raise_exception_create_file(self):
     """Test exception when error happened after creating file."""
     if os.path.exists(SUMMARY_DIR_3):
         shutil.rmtree(SUMMARY_DIR_3)
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     eval_callback = EvalLineage(summary_record, False)
     full_file_name = summary_record.full_file_name + "_lineage"
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.78}
     eval_run_context['step_num'] = 32
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     with open(full_file_name, 'ab'):
         with mock.patch('builtins.open') as mock_handler:
             mock_handler.return_value.__enter__.return_value.write.side_effect = IOError
             eval_callback.end(RunContext(eval_run_context))
     assert os.path.isfile(full_file_name) is True
     assert os.path.getsize(full_file_name) == 0
Ejemplo n.º 8
0
 def test_raise_exception_record_trainlineage(self, *args):
     """Test exception when error happened after recording training infos."""
     if os.path.exists(SUMMARY_DIR_3):
         shutil.rmtree(SUMMARY_DIR_3)
     args[1].side_effect = MindInsightException(
         error=LineageErrors.PARAM_RUN_CONTEXT_ERROR,
         message="RunContext error.")
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     train_callback = TrainLineage(summary_record, True)
     train_callback.begin(RunContext(self.run_context))
     full_file_name = train_callback.lineage_summary.lineage_log_path
     file_size1 = os.path.getsize(full_file_name)
     train_callback.end(RunContext(self.run_context))
     file_size2 = os.path.getsize(full_file_name)
     assert file_size2 > file_size1
     eval_callback = EvalLineage(summary_record, False)
     eval_callback.end(RunContext(self.run_context))
     file_size3 = os.path.getsize(full_file_name)
     assert file_size3 == file_size2
Ejemplo n.º 9
0
 def test_eval_only(self):
     """Test record evaluation event only."""
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'eval_only_dir')
     summary_record = SummaryRecord(summary_dir)
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.58}
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     eval_run_context['step_num'] = 32
     eval_only_callback = EvalLineage(summary_record)
     eval_only_callback.end(RunContext(eval_run_context))
     res = get_summary_lineage(summary_dir, ['metric', 'dataset_graph'])
     expect_res = {
         'summary_dir': summary_dir,
         'dataset_graph': {},
         'metric': {
             'accuracy': 0.58
         }
     }
     assert res == expect_res
     shutil.rmtree(summary_dir)