Esempio n. 1
0
    def test_train_multi_eval(self, *args):
        """Callback for train once and eval twice."""
        args[0].return_value = 10
        summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_multi_eval')
        make_directory(summary_dir)
        args[1].return_value = os.path.join(
            summary_dir, 'train_out.events.summary.1590107366.ubuntu_lineage')
        train_callback = TrainLineage(summary_dir, True)
        train_callback.begin(RunContext(self.run_context))
        train_callback.end(RunContext(self.run_context))

        args[1].return_value = os.path.join(
            summary_dir, 'eval_out.events.summary.1590107367.ubuntu_lineage')
        eval_callback = EvalLineage(summary_dir, True)
        eval_run_context = self.run_context
        eval_run_context['valid_dataset'] = self.run_context['train_dataset']
        eval_run_context['metrics'] = {'accuracy': 0.79}
        eval_callback.end(RunContext(eval_run_context))
        res = get_summary_lineage(summary_dir)
        assert res.get('metric', {}).get('accuracy') == 0.79

        args[1].return_value = os.path.join(
            summary_dir, 'eval_out.events.summary.1590107368.ubuntu_lineage')
        eval_callback = EvalLineage(summary_dir, True)
        eval_run_context = self.run_context
        eval_run_context['valid_dataset'] = self.run_context['train_dataset']
        eval_run_context['metrics'] = {'accuracy': 0.80}
        eval_callback.end(RunContext(eval_run_context))
        res = get_summary_lineage(summary_dir)
        assert res.get('metric', {}).get('accuracy') == 0.80
        if os.path.exists(summary_dir):
            shutil.rmtree(summary_dir)
Esempio n. 2
0
 def test_raise_exception_non_lineage_file(self):
     """Test exception when lineage summary file cannot be found."""
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'run4')
     if os.path.exists(summary_dir):
         shutil.rmtree(summary_dir)
     summary_record = SummaryRecord(summary_dir, file_suffix='_MS_lineage')
     full_file_name = summary_record.full_file_name
     assert full_file_name.endswith('_lineage')
     assert os.path.isfile(full_file_name)
     with self.assertRaisesRegex(LineageFileNotFoundError,
                                 'no summary log file'):
         get_summary_lineage(summary_dir)
Esempio n. 3
0
 def test_training_end(self, *args):
     """Test the end function in TrainLineage."""
     args[0].return_value = 64
     train_callback = TrainLineage(SUMMARY_DIR, True, self.user_defined_info)
     train_callback.initial_learning_rate = 0.12
     train_callback.end(RunContext(self.run_context))
     res = get_summary_lineage(SUMMARY_DIR)
     assert res.get('hyper_parameters', {}).get('epoch') == 10
     run_context = self.run_context
     run_context['epoch_num'] = 14
     train_callback.end(RunContext(run_context))
     res = get_summary_lineage(SUMMARY_DIR)
     assert res.get('hyper_parameters', {}).get('epoch') == 14
Esempio n. 4
0
 def test_train_with_customized_network(self, *args):
     """Test train with customized network."""
     args[0].return_value = 64
     train_callback = TrainLineage(self.summary_record, True)
     run_context_customized = self.run_context
     del run_context_customized['optimizer']
     del run_context_customized['net_outputs']
     del run_context_customized['loss_fn']
     net = WithLossCell(self.net, self.loss_fn)
     net_cap = net
     net_cap._cells = {'_backbone': self.net, '_loss_fn': self.loss_fn}
     net = TrainOneStep(net, self.optimizer)
     net._cells = {
         'optimizer': self.optimizer,
         'network': net_cap,
         'backbone': self.net
     }
     run_context_customized['train_network'] = net
     train_callback.begin(RunContext(run_context_customized))
     train_callback.end(RunContext(run_context_customized))
     res = get_summary_lineage(SUMMARY_DIR)
     assert res.get('hyper_parameters', {}).get('loss_function') \
            == 'SoftmaxCrossEntropyWithLogits'
     assert res.get('algorithm', {}).get('network') == 'ResNet'
     assert res.get('hyper_parameters', {}).get('optimizer') == 'Momentum'
Esempio n. 5
0
 def test_train_eval(self, *args):
     """Callback for train once and eval once."""
     args[0].return_value = 10
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_eval')
     make_directory(summary_dir)
     args[1].return_value = os.path.join(
         summary_dir,
         f'train_out.events.summary.{str(int(time.time()))}.ubuntu_lineage')
     train_callback = TrainLineage(summary_dir)
     train_callback.begin(RunContext(self.run_context))
     train_callback.end(RunContext(self.run_context))
     args[1].return_value = os.path.join(
         summary_dir,
         f'eval_out.events.summary.{str(int(time.time())+1)}.ubuntu_lineage'
     )
     eval_callback = EvalLineage(summary_dir)
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.78}
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     eval_run_context['step_num'] = 32
     eval_callback.end(RunContext(eval_run_context))
     res = get_summary_lineage(summary_dir)
     assert res.get('hyper_parameters', {}).get('loss_function') \
         == 'SoftmaxCrossEntropyWithLogits'
     assert res.get('algorithm', {}).get('network') == 'ResNet'
     if os.path.exists(summary_dir):
         shutil.rmtree(summary_dir)
Esempio n. 6
0
 def test_get_summary_lineage_failed3(self, mock_summary, mock_valid,
                                      mock_parser, mock_file_handler):
     """Test get_summary_lineage failed."""
     mock_summary.return_value = ['/path/to/summary/file']
     mock_valid.return_value = '/path/to/summary_dir'
     mock_parser.return_value = None
     mock_file_handler = MagicMock()
     mock_file_handler.size = 1
     result = get_summary_lineage('/path/to/summary_dir')
     assert {} == result
Esempio n. 7
0
    def test_get_summary_lineage(self):
        """Test the interface of get_summary_lineage."""
        total_res = get_summary_lineage(SUMMARY_DIR)
        partial_res1 = get_summary_lineage(SUMMARY_DIR, ['hyper_parameters'])
        partial_res2 = get_summary_lineage(SUMMARY_DIR,
                                           ['metric', 'algorithm'])
        expect_total_res = LINEAGE_INFO_RUN1
        expect_partial_res1 = {
            'summary_dir': os.path.join(BASE_SUMMARY_DIR, 'run1'),
            'hyper_parameters': {
                'optimizer': 'Momentum',
                'learning_rate': 0.12,
                'loss_function': 'SoftmaxCrossEntropyWithLogits',
                'epoch': 14,
                'parallel_mode': 'stand_alone',
                'device_num': 2,
                'batch_size': 32
            }
        }
        expect_partial_res2 = {
            'summary_dir': os.path.join(BASE_SUMMARY_DIR, 'run1'),
            'metric': {
                'accuracy': 0.78
            },
            'algorithm': {
                'network': 'ResNet'
            }
        }
        assert_equal_lineages(expect_total_res, total_res,
                              self.assertDictEqual)
        assert_equal_lineages(expect_partial_res1, partial_res1,
                              self.assertDictEqual)
        assert_equal_lineages(expect_partial_res2, partial_res2,
                              self.assertDictEqual)

        # the lineage summary file is empty
        result = get_summary_lineage(self.dir_with_empty_lineage)
        assert {} == result

        # keys is empty list
        expect_result = {'summary_dir': SUMMARY_DIR}
        result = get_summary_lineage(SUMMARY_DIR, [])
        assert expect_result == result
Esempio n. 8
0
    def test_get_summary_lineage_failed3(self, mock_summary, mock_querier,
                                         mock_valid):
        """Test get_summary_lineage failed."""
        mock_summary.return_value = '/path/to/summary/file'
        mock_querier.return_value.get_summary_lineage.side_effect = \
            LineageSummaryParseException()
        mock_valid.return_value = '/path/to/summary_dir'
        res = get_summary_lineage('/path/to/summary_dir')
        assert res == {}

        mock_querier.side_effect = LineageQuerierParamException(['keys'],
                                                                'key')
        self.assertRaisesRegex(LineageQuerySummaryDataError,
                               'Get summary lineage failed',
                               get_summary_lineage, '/path/to/summary_dir')
Esempio n. 9
0
    def test_get_summary_lineage_success(self, isdir_mock, parser_mock,
                                         qurier_mock):
        """Test the function of get_summary_lineage."""
        isdir_mock.return_value = True
        parser_mock.return_value = MagicMock()

        mock_querier = MagicMock()
        qurier_mock.return_value = mock_querier
        mock_querier.get_summary_lineage.return_value = [{
            'algorithm': {
                'network': 'ResNet'
            }
        }]
        summary_dir = '/path/to/summary_dir'
        result = get_summary_lineage(summary_dir, keys=['algorithm'])
        self.assertEqual(result, {'algorithm': {'network': 'ResNet'}})
Esempio n. 10
0
def get_dataset_graph():
    """
    Get dataset graph.

    Returns:
        str, the dataset graph information.

    Raises:
        MindInsightException: If method fails to be called.
        ParamValueError: If summary_dir is invalid.

    Examples:
        >>> GET http://xxxx/v1/mindinsight/datasets/dataset_graph?train_id=xxx
    """

    summary_base_dir = str(settings.SUMMARY_BASE_DIR)
    summary_dir = get_train_id(request)
    if summary_dir.startswith('/'):
        validate_path(summary_dir)
    elif summary_dir.startswith('./'):
        summary_dir = os.path.join(summary_base_dir, summary_dir[2:])
        summary_dir = validate_path(summary_dir)
    else:
        raise ParamValueError("Summary dir should be absolute path or "
                              "relative path that relate to summary base dir.")
    try:
        dataset_graph = get_summary_lineage(summary_dir=summary_dir,
                                            keys=['dataset_graph'])
    except MindInsightException as exception:
        raise MindInsightException(exception.error,
                                   exception.message,
                                   http_code=400)

    if dataset_graph:
        summary_dir_result = dataset_graph.get('summary_dir')
        base_dir_len = len(summary_base_dir)
        if summary_base_dir == summary_dir_result:
            relative_dir = './'
        else:
            relative_dir = os.path.join(os.curdir,
                                        summary_dir[base_dir_len + 1:])
        dataset_graph['summary_dir'] = relative_dir

    return jsonify(dataset_graph)
Esempio n. 11
0
 def test_eval_only(self):
     """Test record evaluation event only."""
     summary_dir = os.path.join(BASE_SUMMARY_DIR, 'eval_only_dir')
     summary_record = SummaryRecord(summary_dir)
     eval_run_context = self.run_context
     eval_run_context['metrics'] = {'accuracy': 0.58}
     eval_run_context['valid_dataset'] = self.run_context['train_dataset']
     eval_run_context['step_num'] = 32
     eval_only_callback = EvalLineage(summary_record)
     eval_only_callback.end(RunContext(eval_run_context))
     res = get_summary_lineage(summary_dir, ['metric', 'dataset_graph'])
     expect_res = {
         'summary_dir': summary_dir,
         'dataset_graph': {},
         'metric': {
             'accuracy': 0.58
         }
     }
     assert res == expect_res
     shutil.rmtree(summary_dir)
Esempio n. 12
0
    def test_get_summary_lineage(self):
        """Test the interface of get_summary_lineage."""
        total_res = get_summary_lineage(SUMMARY_DIR)
        partial_res1 = get_summary_lineage(SUMMARY_DIR, ['hyper_parameters'])
        partial_res2 = get_summary_lineage(SUMMARY_DIR, ['metric', 'algorithm'])
        expect_total_res = {
            'summary_dir': os.path.join(BASE_SUMMARY_DIR, 'run1'),
            'metric': {
                'accuracy': 0.78
            },
            'hyper_parameters': {
                'optimizer': 'Momentum',
                'learning_rate': 0.11999999731779099,
                'loss_function': 'SoftmaxCrossEntropyWithLogits',
                'epoch': 14,
                'parallel_mode': 'stand_alone',
                'device_num': 2,
                'batch_size': 32
            },
            'algorithm': {
                'network': 'ResNet'
            },
            'train_dataset': {
                'train_dataset_size': 731
            },
            'valid_dataset': {
                'valid_dataset_size': 10240
            },
            'model': {
                'path': '{"ckpt": "'
                        + BASE_SUMMARY_DIR + '/run1/CKPtest_model.ckpt"}',
                'size': 64
            },
            'dataset_graph': DATASET_GRAPH
        }
        expect_partial_res1 = {
            'summary_dir': os.path.join(BASE_SUMMARY_DIR, 'run1'),
            'hyper_parameters': {
                'optimizer': 'Momentum',
                'learning_rate': 0.11999999731779099,
                'loss_function': 'SoftmaxCrossEntropyWithLogits',
                'epoch': 14,
                'parallel_mode': 'stand_alone',
                'device_num': 2,
                'batch_size': 32
            }
        }
        expect_partial_res2 = {
            'summary_dir': os.path.join(BASE_SUMMARY_DIR, 'run1'),
            'metric': {
                'accuracy': 0.78
            },
            'algorithm': {
                'network': 'ResNet'
            }
        }
        assert expect_total_res == total_res
        assert expect_partial_res1 == partial_res1
        assert expect_partial_res2 == partial_res2

        # the lineage summary file is empty
        result = get_summary_lineage(self.dir_with_empty_lineage)
        assert {} == result

        # keys is empty list
        expect_result = {
            'summary_dir': SUMMARY_DIR
        }
        result = get_summary_lineage(SUMMARY_DIR, [])
        assert expect_result == result