Exemple #1
0
 def test_train_begin(self):
     """Test the begin function in TrainLineage."""
     train_callback = TrainLineage(self.summary_record, True)
     train_callback.begin(RunContext(self.run_context))
     assert train_callback.initial_learning_rate == 0.12
     lineage_log_path = train_callback.lineage_summary.lineage_log_path
     assert os.path.isfile(lineage_log_path) is True
Exemple #2
0
 def test_train_begin_with_user_defined_info(self):
     """Test TrainLineage with nested user defined info."""
     user_defined_info = {"info": {"version": "v1"}}
     train_callback = TrainLineage(self.summary_record, False,
                                   user_defined_info)
     train_callback.begin(RunContext(self.run_context))
     assert train_callback.initial_learning_rate == 0.12
     lineage_log_path = train_callback.lineage_summary.lineage_log_path
     assert os.path.isfile(lineage_log_path) is True
Exemple #3
0
 def test_train_lineage_with_log_dir(self):
     """Test TrainLineage with log_dir."""
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'log_dir')
     train_callback = TrainLineage(summary_record=summary_dir)
     train_callback.begin(RunContext(self.run_context))
     assert summary_dir == train_callback.lineage_log_dir
     lineage_log_path = train_callback.lineage_summary.lineage_log_path
     assert os.path.isfile(lineage_log_path) is True
     if os.path.exists(summary_dir):
         shutil.rmtree(summary_dir)
Exemple #4
0
 def test_raise_exception_init(self):
     """Test exception when error happened during the initialization process."""
     if os.path.exists(SUMMARY_DIR_3):
         shutil.rmtree(SUMMARY_DIR_3)
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     train_callback = TrainLineage('fake_summary_record', False)
     eval_callback = EvalLineage('fake_summary_record', False)
     train_callback.begin(RunContext(self.run_context))
     eval_callback.end(RunContext(self.run_context))
     file_num = os.listdir(SUMMARY_DIR_3)
     full_file_name = summary_record.full_file_name
     assert len(file_num) == 1
     assert os.path.isfile(full_file_name + "_lineage") is False
Exemple #5
0
 def test_raise_exception(self):
     """Test exception when raise_exception is set True."""
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     full_file_name = summary_record.full_file_name
     assert os.path.isfile(full_file_name) is True
     assert os.path.isfile(full_file_name + "_lineage") is False
     train_callback = TrainLineage(summary_record, True)
     eval_callback = EvalLineage(summary_record, False)
     with self.assertRaises(LineageParamRunContextError):
         train_callback.begin(self.run_context)
         eval_callback.end(self.run_context)
     file_num = os.listdir(SUMMARY_DIR_3)
     assert len(file_num) == 1
     assert os.path.isfile(full_file_name + "_lineage") is False
Exemple #6
0
 def test_train_begin_with_user_defined_key_in_lineage(self):
     """Test TrainLineage with nested user defined info."""
     expected_res = {"info": "info1", "version": "v1"}
     user_defined_info = {
         "info": "info1",
         "version": "v1",
         "network": "LeNet"
     }
     train_callback = TrainLineage(self.summary_record, False,
                                   user_defined_info)
     train_callback.begin(RunContext(self.run_context))
     assert train_callback.initial_learning_rate == 0.12
     lineage_log_path = train_callback.lineage_summary.lineage_log_path
     assert os.path.isfile(lineage_log_path) is True
     res = filter_summary_lineage(os.path.dirname(lineage_log_path))
     assert expected_res == res['object'][0]['model_lineage'][
         'user_defined']
Exemple #7
0
 def test_train_with_customized_network(self, *args):
     """Test train with customized network."""
     args[0].return_value = 64
     train_callback = TrainLineage(self.summary_record, True)
     run_context_customized = self.run_context
     del run_context_customized['optimizer']
     del run_context_customized['net_outputs']
     del run_context_customized['loss_fn']
     net = WithLossCell(self.net, self.loss_fn)
     net_cap = net
     net_cap._cells = {'_backbone': self.net, '_loss_fn': self.loss_fn}
     net = TrainOneStep(net, self.optimizer)
     net._cells = {
         'optimizer': self.optimizer,
         'network': net_cap,
         'backbone': self.net
     }
     run_context_customized['train_network'] = net
     train_callback.begin(RunContext(run_context_customized))
     train_callback.end(RunContext(run_context_customized))
     res = get_summary_lineage(SUMMARY_DIR)
     assert res.get('hyper_parameters', {}).get('loss_function') \
            == 'SoftmaxCrossEntropyWithLogits'
     assert res.get('algorithm', {}).get('network') == 'ResNet'
     assert res.get('hyper_parameters', {}).get('optimizer') == 'Momentum'
Exemple #8
0
    def test_train_multi_eval(self, *args):
        """Callback for train once and eval twice."""
        args[0].return_value = 10
        summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_multi_eval')
        make_directory(summary_dir)
        args[1].return_value = os.path.join(
            summary_dir, 'train_out.events.summary.1590107366.ubuntu_lineage')
        train_callback = TrainLineage(summary_dir, True)
        train_callback.begin(RunContext(self.run_context))
        train_callback.end(RunContext(self.run_context))

        args[1].return_value = os.path.join(
            summary_dir, 'eval_out.events.summary.1590107367.ubuntu_lineage')
        eval_callback = EvalLineage(summary_dir, True)
        eval_run_context = self.run_context
        eval_run_context['valid_dataset'] = self.run_context['train_dataset']
        eval_run_context['metrics'] = {'accuracy': 0.79}
        eval_callback.end(RunContext(eval_run_context))
        res = get_summary_lineage(summary_dir)
        assert res.get('metric', {}).get('accuracy') == 0.79

        args[1].return_value = os.path.join(
            summary_dir, 'eval_out.events.summary.1590107368.ubuntu_lineage')
        eval_callback = EvalLineage(summary_dir, True)
        eval_run_context = self.run_context
        eval_run_context['valid_dataset'] = self.run_context['train_dataset']
        eval_run_context['metrics'] = {'accuracy': 0.80}
        eval_callback.end(RunContext(eval_run_context))
        res = get_summary_lineage(summary_dir)
        assert res.get('metric', {}).get('accuracy') == 0.80
        if os.path.exists(summary_dir):
            shutil.rmtree(summary_dir)
Exemple #9
0
 def test_train_eval(self, *args):
     """Callback for train once and eval once."""
     args[0].return_value = 10
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_eval')
     make_directory(summary_dir)
     args[1].return_value = os.path.join(
         summary_dir,
         f'train_out.events.summary.{str(int(time.time()))}.ubuntu_lineage')
     train_callback = TrainLineage(summary_dir)
     train_callback.begin(RunContext(self.run_context))
     train_callback.end(RunContext(self.run_context))
     args[1].return_value = os.path.join(
         summary_dir,
         f'eval_out.events.summary.{str(int(time.time())+1)}.ubuntu_lineage'
     )
     eval_callback = EvalLineage(summary_dir)
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.78}
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     eval_run_context['step_num'] = 32
     eval_callback.end(RunContext(eval_run_context))
     res = get_summary_lineage(summary_dir)
     assert res.get('hyper_parameters', {}).get('loss_function') \
         == 'SoftmaxCrossEntropyWithLogits'
     assert res.get('algorithm', {}).get('network') == 'ResNet'
     if os.path.exists(summary_dir):
         shutil.rmtree(summary_dir)
Exemple #10
0
    def test_multiple_trains(self, *args):
        """
        Callback TrainLineage and EvalLineage for multiple times.

        Write TrainLineage and EvalLineage in different files under same directory.
        EvalLineage log file end with '_lineage'.
        """
        args[0].return_value = 10
        for i in range(2):
            summary_record = SummaryRecord(SUMMARY_DIR_2,
                                           create_time=int(time.time()) + i)
            eval_record = SummaryRecord(SUMMARY_DIR_2,
                                        create_time=int(time.time() + 10) + i)
            args[1].return_value = os.path.join(
                SUMMARY_DIR_2,
                f'train_out.events.summary.{str(int(time.time()) + 2*i)}.ubuntu_lineage'
            )
            train_callback = TrainLineage(summary_record, True)
            train_callback.begin(RunContext(self.run_context))
            train_callback.end(RunContext(self.run_context))

            args[1].return_value = os.path.join(
                SUMMARY_DIR_2,
                f'eval_out.events.summary.{str(int(time.time())+ 2*i + 1)}.ubuntu_lineage'
            )
            eval_callback = EvalLineage(eval_record, True)
            eval_run_context = self.run_context
            eval_run_context['metrics'] = {'accuracy': 0.78 + i + 1}
            eval_run_context['valid_dataset'] = self.run_context[
                'train_dataset']
            eval_run_context['step_num'] = 32
            eval_callback.end(RunContext(eval_run_context))
        file_num = os.listdir(SUMMARY_DIR_2)
        assert len(file_num) == 8
Exemple #11
0
 def test_training_end(self, *args):
     """Test the end function in TrainLineage."""
     args[0].return_value = 64
     train_callback = TrainLineage(self.summary_record, True)
     train_callback.initial_learning_rate = 0.12
     train_callback.end(RunContext(self.run_context))
     res = get_summary_lineage(SUMMARY_DIR)
     assert res.get('hyper_parameters', {}).get('epoch') == 10
     run_context = self.run_context
     run_context['epoch_num'] = 14
     train_callback.end(RunContext(run_context))
     res = get_summary_lineage(SUMMARY_DIR)
     assert res.get('hyper_parameters', {}).get('epoch') == 14
Exemple #12
0
    def setup_class(cls):
        """Setup method."""
        cls.optimizer = Momentum(Tensor(0.12))
        cls.loss_fn = SoftmaxCrossEntropyWithLogits()
        cls.net = ResNet()

        cls.run_context = dict()
        cls.run_context['train_network'] = cls.net
        cls.run_context['loss_fn'] = cls.loss_fn
        cls.run_context['net_outputs'] = Tensor(np.array([0.03]))
        cls.run_context['optimizer'] = cls.optimizer
        cls.run_context['train_dataset'] = MindDataset(dataset_size=32)
        cls.run_context['epoch_num'] = 10
        cls.run_context['cur_step_num'] = 320
        cls.run_context['parallel_mode'] = "stand_alone"
        cls.run_context['device_number'] = 2
        cls.run_context['batch_num'] = 32
        cls.summary_record = SummaryRecord(SUMMARY_DIR)
        callback = [
            ModelCheckpoint(directory=SUMMARY_DIR),
            SummaryStep(cls.summary_record),
            TrainLineage(cls.summary_record)
        ]
        cls.run_context['list_callback'] = _ListCallback(callback)
Exemple #13
0
 def test_raise_exception_record_trainlineage(self, *args):
     """Test exception when error happened after recording training infos."""
     if os.path.exists(SUMMARY_DIR_3):
         shutil.rmtree(SUMMARY_DIR_3)
     args[1].side_effect = MindInsightException(
         error=LineageErrors.PARAM_RUN_CONTEXT_ERROR,
         message="RunContext error.")
     summary_record = SummaryRecord(SUMMARY_DIR_3)
     train_callback = TrainLineage(summary_record, True)
     train_callback.begin(RunContext(self.run_context))
     full_file_name = train_callback.lineage_summary.lineage_log_path
     file_size1 = os.path.getsize(full_file_name)
     train_callback.end(RunContext(self.run_context))
     file_size2 = os.path.getsize(full_file_name)
     assert file_size2 > file_size1
     eval_callback = EvalLineage(summary_record, False)
     eval_callback.end(RunContext(self.run_context))
     file_size3 = os.path.getsize(full_file_name)
     assert file_size3 == file_size2