コード例 #1
0
    def test_parse_fail_summary_logs_2(self, *args):
        """Test the function of parsing fail summary logs."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0,
            event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0,
        )

        summary_path = ['/path/to/summary0/log0']
        querier = Querier(summary_path)
        querier._parse_failed_paths.append('/path/to/summary1/log1')

        args[0].return_value = create_lineage_info(None, None, None)
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = querier.get_summary_lineage()
        self.assertListEqual(expected_result, result)
        self.assertListEqual(['/path/to/summary1/log1'],
                             querier._parse_failed_paths)
コード例 #2
0
ファイル: test_querier.py プロジェクト: wenkai128/mindinsight
    def setUp(self, mock_file_handler, *args):
        """Initialization before test case execution."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0, event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0)
        args[1].return_value = []
        args[3].return_value = ['path']
        mock_file_handler = MagicMock()
        mock_file_handler.size = 1

        args[2].return_value = [{'relative_path': './', 'update_time': 1}]
        single_summary_path = '/path/to/summary0'
        lineage_objects = LineageOrganizer(
            summary_base_dir=single_summary_path).super_lineage_objs
        self.single_querier = Querier(lineage_objects)

        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos
        summary_base_dir = '/path/to'
        relative_dirs = []
        for i in range(7):
            relative_dirs.append(
                dict(relative_path=f'./summary{i}',
                     update_time=time.time() - i))
        args[2].return_value = relative_dirs
        lineage_objects = LineageOrganizer(
            summary_base_dir=summary_base_dir).super_lineage_objs
        self.multi_querier = Querier(lineage_objects)
コード例 #3
0
    def test_parse_fail_summary_logs_1(self, *args):
        """Test the function of parsing fail summary logs."""
        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos

        summary_path = ['/path/to/summary0/log0']
        querier = Querier(summary_path)
        querier._parse_failed_paths.append('/path/to/summary1/log1')
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary1',
            **event_data.EVENT_TRAIN_DICT_1['train_lineage'], 'metric':
            event_data.METRIC_1,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_1['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = querier.get_summary_lineage()
        self.assertListEqual(expected_result, result)
        self.assertListEqual([], querier._parse_failed_paths)
コード例 #4
0
ファイル: test_querier.py プロジェクト: wenkai128/mindinsight
    def test_init_fail(self):
        """Test the function of init with exception."""
        obj_dict = 'a'
        with self.assertRaises(LineageParamTypeError):
            Querier(obj_dict)

        obj_dict = None
        with self.assertRaises(LineageQuerierParamException):
            Querier(obj_dict)
コード例 #5
0
    def test_init_fail(self, *args):
        """Test the function of init with exception."""
        summary_path = {'xxx': 1}
        with self.assertRaises(LineageParamTypeError):
            Querier(summary_path)

        summary_path = None
        with self.assertRaises(LineageQuerierParamException):
            Querier(summary_path)

        args[0].side_effect = LineageSummaryAnalyzeException
        summary_path = '/path/to/summary0/log0'
        with self.assertRaises(LineageSummaryParseException):
            Querier(summary_path)
コード例 #6
0
def general_get_summary_lineage(data_manager=None,
                                summary_dir=None,
                                keys=None):
    """
    Get summary lineage from data_manager or parsing from summaries.

    One of data_manager or summary_dir needs to be specified. Support getting
    super_lineage_obj from data_manager or parsing summaries by summary_dir.

    Args:
        data_manager (DataManager): Data manager defined as
            mindinsight.datavisual.data_transform.data_manager.DataManager
        summary_dir (str): The summary directory. It contains summary logs for
            one training.
        keys (list[str]): The filter keys of lineage information. The acceptable
            keys are `metric`, `user_defined`, `hyper_parameters`, `algorithm`,
            `train_dataset`, `model`, `valid_dataset` and `dataset_graph`.
            If it is `None`, all information will be returned. Default: None.

    Returns:
        dict, the lineage information for one training.

    Raises:
        LineageParamSummaryPathError: If summary path is invalid.
        LineageQuerySummaryDataError: If querying summary data fails.
        LineageFileNotFoundError: If the summary log file is not found.

    """
    default_result = {}
    if data_manager is None and summary_dir is None:
        raise LineageParamTypeError(
            "One of data_manager or summary_dir needs to be specified.")
    if data_manager is not None and summary_dir is None:
        raise LineageParamTypeError(
            "If data_manager is specified, the summary_dir needs to be "
            "specified as relative path.")

    if keys is not None:
        validate_filter_key(keys)

    if data_manager is None:
        normalize_summary_dir(summary_dir)
        super_lineage_obj = LineageParser(summary_dir).super_lineage_obj
    else:
        validate_train_id(summary_dir)
        super_lineage_obj = LineageOrganizer(
            data_manager=data_manager).get_super_lineage_obj(summary_dir)

    if super_lineage_obj is None:
        return default_result

    try:
        result = Querier({
            summary_dir: super_lineage_obj
        }).get_summary_lineage(summary_dir, keys)
    except (LineageQuerierParamException, LineageParamTypeError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageQuerySummaryDataError("Get summary lineage failed.")
    return result[0]
コード例 #7
0
def general_filter_summary_lineage(data_manager=None,
                                   summary_base_dir=None,
                                   search_condition=None,
                                   added=False):
    """
    Filter summary lineage from data_manager or parsing from summaries.

    One of data_manager or summary_base_dir needs to be specified. Support getting
    super_lineage_obj from data_manager or parsing summaries by summary_base_dir.

    Args:
        data_manager (DataManager): Data manager defined as
            mindinsight.datavisual.data_transform.data_manager.DataManager
        summary_base_dir (str): The summary base directory. It contains summary
            directories generated by training.
        search_condition (dict): The search condition.
    """
    if data_manager is None and summary_base_dir is None:
        raise LineageParamTypeError(
            "One of data_manager or summary_base_dir needs to be specified.")

    if data_manager is None:
        summary_base_dir = normalize_summary_dir(summary_base_dir)
    else:
        summary_base_dir = data_manager.summary_base_dir

    search_condition = {} if search_condition is None else search_condition

    try:
        validate_condition(search_condition)
        validate_search_model_condition(SearchModelConditionParameter,
                                        search_condition)
    except MindInsightException as error:
        log.error(str(error))
        log.exception(error)
        raise LineageSearchConditionParamError(str(error.message))

    try:
        search_condition = _convert_relative_path_to_abspath(
            summary_base_dir, search_condition)
    except (LineageParamValueError, LineageDirNotExistError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageParamSummaryPathError(str(error.message))

    try:
        lineage_objects = LineageOrganizer(data_manager,
                                           summary_base_dir).super_lineage_objs
        result = Querier(lineage_objects).filter_summary_lineage(
            condition=search_condition, added=added)
    except LineageSummaryParseException:
        result = {'object': [], 'count': 0}
    except (LineageQuerierParamException, LineageParamTypeError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageQuerySummaryDataError("Filter summary lineage failed.")

    return result
コード例 #8
0
    def setUp(self, *args):
        """Initialization before test case execution."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0, event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0)

        single_summary_path = '/path/to/summary0/log0'
        self.single_querier = Querier(single_summary_path)

        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos
        summary_paths = [
            '/path/to/summary0/log0', '/path/to/summary1/log1',
            '/path/to/summary2/log2', '/path/to/summary3/log3',
            '/path/to/summary4/log4', '/path/to/summary5/log5',
            '/path/to/summary6/log6'
        ]
        self.multi_querier = Querier(summary_paths)
コード例 #9
0
def get_summary_lineage(summary_dir, keys=None):
    """
    Get the lineage information according to summary directory and keys.

    The function queries lineage information of single train process
    corresponding to the given summary directory. Users can query the
    information according to `keys`.

    Args:
        summary_dir (str): The summary directory. It contains summary logs for
            one training.
        keys (list[str]): The filter keys of lineage information. The acceptable
            keys are `metric`, `hyper_parameters`, `algorithm`, `train_dataset`,
            `model`, `valid_dataset` and `dataset_graph`. If it is `None`, all
            information will be returned. Default: None.

    Returns:
        dict, the lineage information for one training.

    Raises:
        LineageParamSummaryPathError: If summary path is invalid.
        LineageQuerySummaryDataError: If querying summary data fails.
        LineageFileNotFoundError: If the summary log file is not found.

    Examples:
        >>> summary_dir = "/path/to/summary"
        >>> summary_lineage_info = get_summary_lineage(summary_dir)
        >>> hyper_parameters = get_summary_lineage(summary_dir, keys=["hyper_parameters"])
    """
    try:
        summary_dir = validate_path(summary_dir)
    except MindInsightException as error:
        log.error(str(error))
        log.exception(error)
        raise LineageParamSummaryPathError(str(error.message))

    if keys is not None:
        validate_filter_key(keys)

    summary_path = SummaryPathParser.get_latest_lineage_summary(summary_dir)
    if summary_path is None:
        log.error('There is no summary log file under summary_dir.')
        raise LineageFileNotFoundError(
            'There is no summary log file under summary_dir.')

    try:
        result = Querier(summary_path).get_summary_lineage(summary_dir,
                                                           filter_keys=keys)
    except LineageSummaryParseException:
        return {}
    except (LineageQuerierParamException, LineageParamTypeError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageQuerySummaryDataError("Get summary lineage failed.")

    return result[0]
コード例 #10
0
ファイル: test_querier.py プロジェクト: rock4you/mindinsight
    def setUp(self, mock_file_handler, *args):
        """Initialization before test case execution."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0, event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0)
        args[1].return_value = []
        args[2].return_value = ['path']
        mock_file_handler = MagicMock()
        mock_file_handler.size = 1

        summary_dir = '/path/test/'

        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos
        lineage_objects = {}
        for i in range(7):
            train_id = f'./summary{i}'
            summary_dir = os.path.join(summary_dir, train_id)
            update_time = time.time() - i
            lineage_parser = LineageParser(train_id, summary_dir, update_time)
            lineage_objects.update(
                {train_id: lineage_parser.super_lineage_obj})

        self.multi_querier = Querier(lineage_objects)
コード例 #11
0
def filter_summary_lineage(summary_base_dir, search_condition=None):
    """
    Filter the lineage information under summary base directory according to search condition.

    Users can filter and sort all lineage information according to the search
    condition. The supported filter fields include `summary_dir`, `network`,
    etc. The filter conditions include `eq`, `lt`, `gt`, `le`, `ge` and `in`.
    At the same time, the combined use of these fields and conditions is
    supported. If you want to sort based on filter fields, the field of
    `sorted_name` and `sorted_type` should be specified.

    Users can use `lineage_type` to decide what kind of lineage information to
    query. If the `lineage_type` is `dataset`, the query result is only the
    lineage information related to data augmentation. If the `lineage_type` is
    `model` or `None`, the query result is all lineage information.

    Users can paginate query result based on `offset` and `limit`. The `offset`
    refers to page number. The `limit` refers to the number in one page.

    Args:
        summary_base_dir (str): The summary base directory. It contains summary
            directories generated by training.
        search_condition (dict): The search condition. When filtering and
            sorting, in addition to the following supported fields, fields
            prefixed with `metric_` are also supported. The fields prefixed with
            `metric_` are related to the `metrics` parameter in the training
            script. For example, if the key of `metrics` parameter is
            `accuracy`, the field should be `metric_accuracy`. Default: None.

            - summary_dir (dict): The filter condition of summary directory.

            - loss_function (dict): The filter condition of loss function.

            - train_dataset_path (dict): The filter condition of train dataset path.

            - train_dataset_count (dict): The filter condition of train dataset count.

            - test_dataset_path (dict): The filter condition of test dataset path.

            - test_dataset_count (dict): The filter condition of test dataset count.

            - network (dict): The filter condition of network.

            - optimizer (dict): The filter condition of optimizer.

            - learning_rate (dict): The filter condition of learning rate.

            - epoch (dict): The filter condition of epoch.

            - batch_size (dict): The filter condition of batch size.

            - loss (dict): The filter condition of loss.

            - model_size (dict): The filter condition of model size.

            - dataset_mark (dict): The filter condition of dataset mark.

            - offset (int): Page number, the value range is [0, 100000].

            - limit (int): The number in one page, the value range is [1, 100].

            - sorted_name (str): Specify which field to sort by.

            - sorted_type (str): Specify sort order. It can be `ascending` or
              `descending`.

            - lineage_type (str): It decides what kind of lineage information to
              query. It can be `dataset` or `model`. If it is `dataset`,
              the query result is only the lineage information related to data
              augmentation. If it is `model` or `None`, the query result is all
              lineage information.

    Returns:
        dict, all lineage information under summary base directory according to
        search condition.

    Raises:
        LineageSearchConditionParamError: If search_condition param is invalid.
        LineageParamSummaryPathError: If summary path is invalid.
        LineageFileNotFoundError: If the summary log file is not found.
        LineageQuerySummaryDataError: If querying summary log file data fails.

    Examples:
        >>> summary_base_dir = "/path/to/summary_base"
        >>> search_condition = {
        >>>     'summary_dir': {
        >>>         'in': [
        >>>             os.path.join(summary_base_dir, 'summary_1'),
        >>>             os.path.join(summary_base_dir, 'summary_2'),
        >>>             os.path.join(summary_base_dir, 'summary_3')
        >>>         ]
        >>>     },
        >>>     'loss': {
        >>>         'gt': 2.0
        >>>     },
        >>>     'batch_size': {
        >>>         'ge': 128,
        >>>         'le': 256
        >>>     },
        >>>     'metric_accuracy': {
        >>>         'lt': 0.1
        >>>     },
        >>>     'sorted_name': 'summary_dir',
        >>>     'sorted_type': 'descending',
        >>>     'limit': 3,
        >>>     'offset': 0,
        >>>     'lineage_type': 'model'
        >>> }
        >>> summary_lineage = filter_summary_lineage(summary_base_dir)
        >>> summary_lineage_filter = filter_summary_lineage(summary_base_dir, search_condition)
    """
    try:
        summary_base_dir = validate_path(summary_base_dir)
    except (LineageParamValueError, LineageDirNotExistError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageParamSummaryPathError(str(error.message))

    search_condition = {} if search_condition is None else search_condition

    try:
        validate_condition(search_condition)
        validate_search_model_condition(SearchModelConditionParameter,
                                        search_condition)
    except MindInsightException as error:
        log.error(str(error))
        log.exception(error)
        raise LineageSearchConditionParamError(str(error.message))

    try:
        search_condition = _convert_relative_path_to_abspath(
            summary_base_dir, search_condition)
    except (LineageParamValueError, LineageDirNotExistError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageParamSummaryPathError(str(error.message))

    summary_path = SummaryPathParser.get_latest_lineage_summaries(
        summary_base_dir)
    if not summary_path:
        log.error('There is no summary log file under summary_base_dir.')
        raise LineageFileNotFoundError(
            'There is no summary log file under summary_base_dir.')

    try:
        result = Querier(summary_path).filter_summary_lineage(
            condition=search_condition)
    except LineageSummaryParseException:
        result = {'object': [], 'count': 0}
    except (LineageQuerierParamException, LineageParamTypeError) as error:
        log.error(str(error))
        log.exception(error)
        raise LineageQuerySummaryDataError("Filter summary lineage failed.")

    return result
コード例 #12
0
ファイル: test_querier.py プロジェクト: wenkai128/mindinsight
class TestQuerier(TestCase):
    """Test the class of `Querier`."""
    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.SummaryPathParser.get_lineage_summaries'
    )
    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.SummaryWatcher.list_summary_directories'
    )
    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.LineageSummaryAnalyzer.get_user_defined_info'
    )
    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.LineageSummaryAnalyzer.get_summary_infos'
    )
    @mock.patch('mindinsight.lineagemgr.lineage_parser.FileHandler')
    def setUp(self, mock_file_handler, *args):
        """Initialization before test case execution."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0, event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0)
        args[1].return_value = []
        args[3].return_value = ['path']
        mock_file_handler = MagicMock()
        mock_file_handler.size = 1

        args[2].return_value = [{'relative_path': './', 'update_time': 1}]
        single_summary_path = '/path/to/summary0'
        lineage_objects = LineageOrganizer(
            summary_base_dir=single_summary_path).super_lineage_objs
        self.single_querier = Querier(lineage_objects)

        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos
        summary_base_dir = '/path/to'
        relative_dirs = []
        for i in range(7):
            relative_dirs.append(
                dict(relative_path=f'./summary{i}',
                     update_time=time.time() - i))
        args[2].return_value = relative_dirs
        lineage_objects = LineageOrganizer(
            summary_base_dir=summary_base_dir).super_lineage_objs
        self.multi_querier = Querier(lineage_objects)

    def _deal_float_for_list(self, list1, list2):
        index = 0
        for _ in list1:
            deal_float_for_dict(list1[index], list2[index])
            index += 1

    def _assert_list_equal(self, list1, list2):
        self._deal_float_for_list(list1, list2)
        self.assertListEqual(list1, list2)

    def _assert_lineages_equal(self, lineages1, lineages2):
        self._deal_float_for_list(lineages1['object'], lineages2['object'])
        self.assertDictEqual(lineages1, lineages2)

    def test_get_summary_lineage_success_1(self):
        """Test the success of get_summary_lineage."""
        expected_result = [LINEAGE_INFO_0]
        result = self.single_querier.get_summary_lineage()
        self._assert_list_equal(expected_result, result)

    def test_get_summary_lineage_success_2(self):
        """Test the success of get_summary_lineage."""
        expected_result = [LINEAGE_INFO_0]
        result = self.single_querier.get_summary_lineage()
        self._assert_list_equal(expected_result, result)

    def test_get_summary_lineage_success_3(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            'model':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['model'],
            'algorithm':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['algorithm']
        }]
        result = self.single_querier.get_summary_lineage(
            filter_keys=['model', 'algorithm'])
        self._assert_list_equal(expected_result, result)

    def test_get_summary_lineage_success_4(self):
        """Test the success of get_summary_lineage."""
        expected_result = [
            LINEAGE_INFO_0, LINEAGE_INFO_1, {
                'summary_dir':
                '/path/to/summary2',
                **event_data.EVENT_TRAIN_DICT_2['train_lineage'], 'metric':
                event_data.METRIC_2,
                'valid_dataset':
                event_data.EVENT_EVAL_DICT_2['evaluation_lineage']
                ['valid_dataset'],
                'dataset_graph':
                event_data.DATASET_DICT_0
            }, {
                'summary_dir':
                '/path/to/summary3',
                **event_data.EVENT_TRAIN_DICT_3['train_lineage'], 'metric':
                event_data.METRIC_3,
                'valid_dataset':
                event_data.EVENT_EVAL_DICT_3['evaluation_lineage']
                ['valid_dataset'],
                'dataset_graph':
                event_data.DATASET_DICT_0
            }, {
                'summary_dir':
                '/path/to/summary4',
                **event_data.EVENT_TRAIN_DICT_4['train_lineage'], 'metric':
                event_data.METRIC_4,
                'valid_dataset':
                event_data.EVENT_EVAL_DICT_4['evaluation_lineage']
                ['valid_dataset'],
                'dataset_graph':
                event_data.DATASET_DICT_0
            }, {
                'summary_dir': '/path/to/summary5',
                **event_data.EVENT_TRAIN_DICT_5['train_lineage'], 'metric': {},
                'valid_dataset': {},
                'dataset_graph': event_data.DATASET_DICT_0
            }, {
                'summary_dir':
                '/path/to/summary6',
                'hyper_parameters': {},
                'algorithm': {},
                'model': {},
                'train_dataset': {},
                'metric':
                event_data.METRIC_5,
                'valid_dataset':
                event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                ['valid_dataset'],
                'dataset_graph':
                event_data.DATASET_DICT_0
            }
        ]
        result = self.multi_querier.get_summary_lineage()
        self._assert_list_equal(expected_result, result)

    def test_get_summary_lineage_success_5(self):
        """Test the success of get_summary_lineage."""
        expected_result = [LINEAGE_INFO_1]
        result = self.multi_querier.get_summary_lineage(
            summary_dir='/path/to/summary1')
        self._assert_list_equal(expected_result, result)

    def test_get_summary_lineage_success_6(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            'hyper_parameters':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['hyper_parameters'],
            'train_dataset':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['train_dataset'],
            'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']['valid_dataset']
        }]
        filter_keys = [
            'metric', 'hyper_parameters', 'train_dataset', 'valid_dataset'
        ]
        result = self.multi_querier.get_summary_lineage(
            summary_dir='/path/to/summary0', filter_keys=filter_keys)
        self._assert_list_equal(expected_result, result)

    def test_get_summary_lineage_fail(self):
        """Test the function of get_summary_lineage with exception."""
        filter_keys = ['xxx']
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.get_summary_lineage,
                          filter_keys=filter_keys)

        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.get_summary_lineage,
                          summary_dir='xxx')

    def test_filter_summary_lineage_success_1(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'optimizer': {
                'in': [
                    'ApplyMomentum0', 'ApplyMomentum1', 'ApplyMomentum2',
                    'ApplyMomentum4'
                ]
            },
            'learning_rate': {
                'lt': 0.5,
                'gt': 0.2
            },
            'sorted_name': 'summary_dir'
        }
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_1, LINEAGE_FILTRATION_2],
            'count': 2,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_2(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'batch_size': {
                'le': 50,
                'ge': 35
            },
            'model_size': {
                'lt': 400716934,
                'gt': 400716931
            },
            'sorted_name': 'batch_size',
            'sorted_type': 'descending'
        }
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_2, LINEAGE_FILTRATION_3],
            'count': 2,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_3(self):
        """Test the success of filter_summary_lineage."""
        condition = {'limit': 2, 'offset': 1}
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_2, LINEAGE_FILTRATION_3],
            'count': 7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_4(self):
        """Test the success of filter_summary_lineage."""
        expected_result = {
            'customized':
            event_data.CUSTOMIZED_2,
            'object': [
                LINEAGE_FILTRATION_0, LINEAGE_FILTRATION_1,
                LINEAGE_FILTRATION_2, LINEAGE_FILTRATION_3,
                LINEAGE_FILTRATION_4, LINEAGE_FILTRATION_5,
                LINEAGE_FILTRATION_6
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage()
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_5(self):
        """Test the success of filter_summary_lineage."""
        condition = {'optimizer': {'eq': 'ApplyMomentum4'}}
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_4],
            'count': 1,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_6(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'sorted_name': 'metric/accuracy',
            'sorted_type': 'ascending'
        }
        expected_result = {
            'customized':
            event_data.CUSTOMIZED_2,
            'object': [
                LINEAGE_FILTRATION_0, LINEAGE_FILTRATION_5,
                LINEAGE_FILTRATION_1, LINEAGE_FILTRATION_2,
                LINEAGE_FILTRATION_3, LINEAGE_FILTRATION_4,
                LINEAGE_FILTRATION_6
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_7(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'sorted_name': 'metric/accuracy',
            'sorted_type': 'descending'
        }
        expected_result = {
            'customized':
            event_data.CUSTOMIZED_2,
            'object': [
                LINEAGE_FILTRATION_6, LINEAGE_FILTRATION_4,
                LINEAGE_FILTRATION_3, LINEAGE_FILTRATION_2,
                LINEAGE_FILTRATION_1, LINEAGE_FILTRATION_0,
                LINEAGE_FILTRATION_5
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_8(self):
        """Test the success of filter_summary_lineage."""
        condition = {'metric/accuracy': {'lt': 1.6000006, 'gt': 1.4000004}}
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_4],
            'count': 1,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_success_9(self):
        """Test the success of filter_summary_lineage."""
        condition = {'limit': 3, 'offset': 3}
        expected_result = {
            'customized': {},
            'object': [],
            'count': 7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self._assert_lineages_equal(expected_result, result)

    def test_filter_summary_lineage_fail(self):
        """Test the function of filter_summary_lineage with exception."""
        condition = {'xxx': {'lt': 1.6000006, 'gt': 1.4000004}}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

        condition = {'accuracy': {'xxx': 1}}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

        condition = {'sorted_name': 'xxx'}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

    def test_init_fail(self):
        """Test the function of init with exception."""
        obj_dict = 'a'
        with self.assertRaises(LineageParamTypeError):
            Querier(obj_dict)

        obj_dict = None
        with self.assertRaises(LineageQuerierParamException):
            Querier(obj_dict)
コード例 #13
0
ファイル: test_querier.py プロジェクト: rock4you/mindinsight
class TestQuerier(TestCase):
    """Test the class of `Querier`."""
    _MOCK_DATA_MANAGER = MagicMock()

    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.SummaryPathParser.get_lineage_summaries'
    )
    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.LineageSummaryAnalyzer.get_user_defined_info'
    )
    @mock.patch(
        'mindinsight.lineagemgr.lineage_parser.LineageSummaryAnalyzer.get_summary_infos'
    )
    @mock.patch('mindinsight.lineagemgr.lineage_parser.FileHandler')
    def setUp(self, mock_file_handler, *args):
        """Initialization before test case execution."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0, event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0)
        args[1].return_value = []
        args[2].return_value = ['path']
        mock_file_handler = MagicMock()
        mock_file_handler.size = 1

        summary_dir = '/path/test/'

        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos
        lineage_objects = {}
        for i in range(7):
            train_id = f'./summary{i}'
            summary_dir = os.path.join(summary_dir, train_id)
            update_time = time.time() - i
            lineage_parser = LineageParser(train_id, summary_dir, update_time)
            lineage_objects.update(
                {train_id: lineage_parser.super_lineage_obj})

        self.multi_querier = Querier(lineage_objects)

    def test_filter_summary_lineage_success_1(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'optimizer': {
                'in': [
                    'ApplyMomentum0', 'ApplyMomentum1', 'ApplyMomentum2',
                    'ApplyMomentum4'
                ]
            },
            'learning_rate': {
                'lt': 0.5,
                'gt': 0.2
            },
            'sorted_name': 'summary_dir'
        }
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_1, LINEAGE_FILTRATION_2],
            'count': 2,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_2(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'batch_size': {
                'le': 50,
                'ge': 35
            },
            'model_size': {
                'lt': 400716934,
                'gt': 400716931
            },
            'sorted_name': 'batch_size',
            'sorted_type': 'descending'
        }
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_2, LINEAGE_FILTRATION_3],
            'count': 2,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_3(self):
        """Test the success of filter_summary_lineage."""
        condition = {'limit': 2, 'offset': 1}
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_2, LINEAGE_FILTRATION_3],
            'count': 7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_4(self):
        """Test the success of filter_summary_lineage."""
        expected_result = {
            'customized':
            event_data.CUSTOMIZED_2,
            'object': [
                LINEAGE_FILTRATION_0, LINEAGE_FILTRATION_1,
                LINEAGE_FILTRATION_2, LINEAGE_FILTRATION_3,
                LINEAGE_FILTRATION_4, LINEAGE_FILTRATION_5,
                LINEAGE_FILTRATION_6
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage()
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_5(self):
        """Test the success of filter_summary_lineage."""
        condition = {'optimizer': {'eq': 'ApplyMomentum4'}}
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_4],
            'count': 1,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_6(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'sorted_name': 'metric/accuracy',
            'sorted_type': 'ascending'
        }
        expected_result = {
            'customized':
            event_data.CUSTOMIZED_2,
            'object': [
                LINEAGE_FILTRATION_0, LINEAGE_FILTRATION_5,
                LINEAGE_FILTRATION_1, LINEAGE_FILTRATION_2,
                LINEAGE_FILTRATION_3, LINEAGE_FILTRATION_4,
                LINEAGE_FILTRATION_6
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_7(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'sorted_name': 'metric/accuracy',
            'sorted_type': 'descending'
        }
        expected_result = {
            'customized':
            event_data.CUSTOMIZED_2,
            'object': [
                LINEAGE_FILTRATION_6, LINEAGE_FILTRATION_4,
                LINEAGE_FILTRATION_3, LINEAGE_FILTRATION_2,
                LINEAGE_FILTRATION_1, LINEAGE_FILTRATION_0,
                LINEAGE_FILTRATION_5
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_8(self):
        """Test the success of filter_summary_lineage."""
        condition = {'metric/accuracy': {'lt': 1.6000006, 'gt': 1.4000004}}
        expected_result = {
            'customized': event_data.CUSTOMIZED_0,
            'object': [LINEAGE_FILTRATION_4],
            'count': 1,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_success_9(self):
        """Test the success of filter_summary_lineage."""
        condition = {'limit': 3, 'offset': 3}
        expected_result = {
            'customized': {},
            'object': [],
            'count': 7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        assert_equal_lineages(expected_result, result, self.assertDictEqual)

    def test_filter_summary_lineage_fail(self):
        """Test the function of filter_summary_lineage with exception."""
        condition = {'xxx': {'lt': 1.6000006, 'gt': 1.4000004}}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

        condition = {'accuracy': {'xxx': 1}}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

        condition = {'sorted_name': 'xxx'}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

    def test_init_fail(self):
        """Test the function of init with exception."""
        obj_dict = 'a'
        with self.assertRaises(LineageParamTypeError):
            Querier(obj_dict)

        obj_dict = None
        with self.assertRaises(LineageQuerierParamException):
            Querier(obj_dict)
コード例 #14
0
class TestQuerier(TestCase):
    """Test the class of `Querier`."""
    @mock.patch(
        'mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos'
    )
    def setUp(self, *args):
        """Initialization before test case execution."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0, event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0)

        single_summary_path = '/path/to/summary0/log0'
        self.single_querier = Querier(single_summary_path)

        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos
        summary_paths = [
            '/path/to/summary0/log0', '/path/to/summary1/log1',
            '/path/to/summary2/log2', '/path/to/summary3/log3',
            '/path/to/summary4/log4', '/path/to/summary5/log5',
            '/path/to/summary6/log6'
        ]
        self.multi_querier = Querier(summary_paths)

    def test_get_summary_lineage_success_1(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = self.single_querier.get_summary_lineage()
        self.assertListEqual(expected_result, result)

    def test_get_summary_lineage_success_2(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = self.single_querier.get_summary_lineage(
            summary_dir='/path/to/summary0')
        self.assertListEqual(expected_result, result)

    def test_get_summary_lineage_success_3(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            'model':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['model'],
            'algorithm':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['algorithm']
        }]
        result = self.single_querier.get_summary_lineage(
            filter_keys=['model', 'algorithm'])
        self.assertListEqual(expected_result, result)

    def test_get_summary_lineage_success_4(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary1',
            **event_data.EVENT_TRAIN_DICT_1['train_lineage'], 'metric':
            event_data.METRIC_1,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_1['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary2',
            **event_data.EVENT_TRAIN_DICT_2['train_lineage'], 'metric':
            event_data.METRIC_2,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_2['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary3',
            **event_data.EVENT_TRAIN_DICT_3['train_lineage'], 'metric':
            event_data.METRIC_3,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_3['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary4',
            **event_data.EVENT_TRAIN_DICT_4['train_lineage'], 'metric':
            event_data.METRIC_4,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_4['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir': '/path/to/summary5',
            **event_data.EVENT_TRAIN_DICT_5['train_lineage'], 'metric': {},
            'valid_dataset': {},
            'dataset_graph': event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary6',
            'hyper_parameters': {},
            'algorithm': {},
            'model': {},
            'train_dataset': {},
            'metric':
            event_data.METRIC_5,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = self.multi_querier.get_summary_lineage()
        self.assertListEqual(expected_result, result)

    def test_get_summary_lineage_success_5(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary1',
            **event_data.EVENT_TRAIN_DICT_1['train_lineage'], 'metric':
            event_data.METRIC_1,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_1['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = self.multi_querier.get_summary_lineage(
            summary_dir='/path/to/summary1')
        self.assertListEqual(expected_result, result)

    def test_get_summary_lineage_success_6(self):
        """Test the success of get_summary_lineage."""
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            'hyper_parameters':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['hyper_parameters'],
            'train_dataset':
            event_data.EVENT_TRAIN_DICT_0['train_lineage']['train_dataset'],
            'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']['valid_dataset']
        }]
        filter_keys = [
            'metric', 'hyper_parameters', 'train_dataset', 'valid_dataset'
        ]
        result = self.multi_querier.get_summary_lineage(
            summary_dir='/path/to/summary0', filter_keys=filter_keys)
        self.assertListEqual(expected_result, result)

    def test_get_summary_lineage_fail(self):
        """Test the function of get_summary_lineage with exception."""
        filter_keys = ['xxx']
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.get_summary_lineage,
                          filter_keys=filter_keys)

        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.get_summary_lineage,
                          summary_dir='xxx')

    def test_filter_summary_lineage_success_1(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'optimizer': {
                'in': [
                    'ApplyMomentum0', 'ApplyMomentum1', 'ApplyMomentum2',
                    'ApplyMomentum4'
                ]
            },
            'learning_rate': {
                'lt': 0.5,
                'gt': 0.2
            },
            'sorted_name': 'summary_dir'
        }
        expected_result = {
            'object': [
                create_filtration_result(
                    '/path/to/summary1',
                    event_data.EVENT_TRAIN_DICT_1,
                    event_data.EVENT_EVAL_DICT_1,
                    event_data.METRIC_1,
                    event_data.DATASET_DICT_0,
                ),
                create_filtration_result('/path/to/summary2',
                                         event_data.EVENT_TRAIN_DICT_2,
                                         event_data.EVENT_EVAL_DICT_2,
                                         event_data.METRIC_2,
                                         event_data.DATASET_DICT_0)
            ],
            'count':
            2,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_2(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'batch_size': {
                'le': 50,
                'ge': 35
            },
            'model_size': {
                'lt': 400716934,
                'gt': 400716931
            },
            'sorted_name': 'batch_size',
            'sorted_type': 'descending'
        }
        expected_result = {
            'object': [
                create_filtration_result('/path/to/summary2',
                                         event_data.EVENT_TRAIN_DICT_2,
                                         event_data.EVENT_EVAL_DICT_2,
                                         event_data.METRIC_2,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary3',
                                         event_data.EVENT_TRAIN_DICT_3,
                                         event_data.EVENT_EVAL_DICT_3,
                                         event_data.METRIC_3,
                                         event_data.DATASET_DICT_0)
            ],
            'count':
            2,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_3(self):
        """Test the success of filter_summary_lineage."""
        condition = {'limit': 2, 'offset': 1}
        expected_result = {
            'object': [
                create_filtration_result('/path/to/summary2',
                                         event_data.EVENT_TRAIN_DICT_2,
                                         event_data.EVENT_EVAL_DICT_2,
                                         event_data.METRIC_2,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary3',
                                         event_data.EVENT_TRAIN_DICT_3,
                                         event_data.EVENT_EVAL_DICT_3,
                                         event_data.METRIC_3,
                                         event_data.DATASET_DICT_0)
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_4(self):
        """Test the success of filter_summary_lineage."""
        expected_result = {
            'object': [
                create_filtration_result('/path/to/summary0',
                                         event_data.EVENT_TRAIN_DICT_0,
                                         event_data.EVENT_EVAL_DICT_0,
                                         event_data.METRIC_0,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary1',
                                         event_data.EVENT_TRAIN_DICT_1,
                                         event_data.EVENT_EVAL_DICT_1,
                                         event_data.METRIC_1,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary2',
                                         event_data.EVENT_TRAIN_DICT_2,
                                         event_data.EVENT_EVAL_DICT_2,
                                         event_data.METRIC_2,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary3',
                                         event_data.EVENT_TRAIN_DICT_3,
                                         event_data.EVENT_EVAL_DICT_3,
                                         event_data.METRIC_3,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary4',
                                         event_data.EVENT_TRAIN_DICT_4,
                                         event_data.EVENT_EVAL_DICT_4,
                                         event_data.METRIC_4,
                                         event_data.DATASET_DICT_0),
                {
                    "summary_dir":
                    '/path/to/summary5',
                    "loss_function":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['loss_function'],
                    "train_dataset_path":
                    None,
                    "train_dataset_count":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['train_dataset']['train_dataset_size'],
                    "test_dataset_path":
                    None,
                    "test_dataset_count":
                    None,
                    "network":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']
                    ['network'],
                    "optimizer":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['optimizer'],
                    "learning_rate":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['learning_rate'],
                    "epoch":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['epoch'],
                    "batch_size":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['batch_size'],
                    "loss":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']
                    ['loss'],
                    "model_size":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']['model']
                    ['size'],
                    "metric": {},
                    "dataset_graph":
                    event_data.DATASET_DICT_0,
                    "dataset_mark":
                    '2'
                }, {
                    "summary_dir":
                    '/path/to/summary6',
                    "loss_function":
                    None,
                    "train_dataset_path":
                    None,
                    "train_dataset_count":
                    None,
                    "test_dataset_path":
                    event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                    ['valid_dataset']['valid_dataset_path'],
                    "test_dataset_count":
                    event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                    ['valid_dataset']['valid_dataset_size'],
                    "network":
                    None,
                    "optimizer":
                    None,
                    "learning_rate":
                    None,
                    "epoch":
                    None,
                    "batch_size":
                    None,
                    "loss":
                    None,
                    "model_size":
                    None,
                    "metric":
                    event_data.METRIC_5,
                    "dataset_graph":
                    event_data.DATASET_DICT_0,
                    "dataset_mark":
                    '2'
                }
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage()
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_5(self):
        """Test the success of filter_summary_lineage."""
        condition = {'optimizer': {'eq': 'ApplyMomentum4'}}
        expected_result = {
            'object': [
                create_filtration_result('/path/to/summary4',
                                         event_data.EVENT_TRAIN_DICT_4,
                                         event_data.EVENT_EVAL_DICT_4,
                                         event_data.METRIC_4,
                                         event_data.DATASET_DICT_0),
            ],
            'count':
            1,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_6(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'sorted_name': 'metric_accuracy',
            'sorted_type': 'ascending'
        }
        expected_result = {
            'object': [
                create_filtration_result('/path/to/summary0',
                                         event_data.EVENT_TRAIN_DICT_0,
                                         event_data.EVENT_EVAL_DICT_0,
                                         event_data.METRIC_0,
                                         event_data.DATASET_DICT_0),
                {
                    "summary_dir":
                    '/path/to/summary5',
                    "loss_function":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['loss_function'],
                    "train_dataset_path":
                    None,
                    "train_dataset_count":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['train_dataset']['train_dataset_size'],
                    "test_dataset_path":
                    None,
                    "test_dataset_count":
                    None,
                    "network":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']
                    ['network'],
                    "optimizer":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['optimizer'],
                    "learning_rate":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['learning_rate'],
                    "epoch":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['epoch'],
                    "batch_size":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']
                    ['hyper_parameters']['batch_size'],
                    "loss":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']['algorithm']
                    ['loss'],
                    "model_size":
                    event_data.EVENT_TRAIN_DICT_5['train_lineage']['model']
                    ['size'],
                    "metric": {},
                    "dataset_graph":
                    event_data.DATASET_DICT_0,
                    "dataset_mark":
                    '2'
                },
                create_filtration_result('/path/to/summary1',
                                         event_data.EVENT_TRAIN_DICT_1,
                                         event_data.EVENT_EVAL_DICT_1,
                                         event_data.METRIC_1,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary2',
                                         event_data.EVENT_TRAIN_DICT_2,
                                         event_data.EVENT_EVAL_DICT_2,
                                         event_data.METRIC_2,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary3',
                                         event_data.EVENT_TRAIN_DICT_3,
                                         event_data.EVENT_EVAL_DICT_3,
                                         event_data.METRIC_3,
                                         event_data.DATASET_DICT_0),
                create_filtration_result('/path/to/summary4',
                                         event_data.EVENT_TRAIN_DICT_4,
                                         event_data.EVENT_EVAL_DICT_4,
                                         event_data.METRIC_4,
                                         event_data.DATASET_DICT_0),
                {
                    "summary_dir":
                    '/path/to/summary6',
                    "loss_function":
                    None,
                    "train_dataset_path":
                    None,
                    "train_dataset_count":
                    None,
                    "test_dataset_path":
                    event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                    ['valid_dataset']['valid_dataset_path'],
                    "test_dataset_count":
                    event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                    ['valid_dataset']['valid_dataset_size'],
                    "network":
                    None,
                    "optimizer":
                    None,
                    "learning_rate":
                    None,
                    "epoch":
                    None,
                    "batch_size":
                    None,
                    "loss":
                    None,
                    "model_size":
                    None,
                    "metric":
                    event_data.METRIC_5,
                    "dataset_graph":
                    event_data.DATASET_DICT_0,
                    "dataset_mark":
                    '2'
                }
            ],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_7(self):
        """Test the success of filter_summary_lineage."""
        condition = {
            'sorted_name': 'metric_accuracy',
            'sorted_type': 'descending'
        }
        expected_result = {
            'object': [{
                "summary_dir":
                '/path/to/summary6',
                "loss_function":
                None,
                "train_dataset_path":
                None,
                "train_dataset_count":
                None,
                "test_dataset_path":
                event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                ['valid_dataset']['valid_dataset_path'],
                "test_dataset_count":
                event_data.EVENT_EVAL_DICT_5['evaluation_lineage']
                ['valid_dataset']['valid_dataset_size'],
                "network":
                None,
                "optimizer":
                None,
                "learning_rate":
                None,
                "epoch":
                None,
                "batch_size":
                None,
                "loss":
                None,
                "model_size":
                None,
                "metric":
                event_data.METRIC_5,
                "dataset_graph":
                event_data.DATASET_DICT_0,
                "dataset_mark":
                '2'
            },
                       create_filtration_result('/path/to/summary4',
                                                event_data.EVENT_TRAIN_DICT_4,
                                                event_data.EVENT_EVAL_DICT_4,
                                                event_data.METRIC_4,
                                                event_data.DATASET_DICT_0),
                       create_filtration_result('/path/to/summary3',
                                                event_data.EVENT_TRAIN_DICT_3,
                                                event_data.EVENT_EVAL_DICT_3,
                                                event_data.METRIC_3,
                                                event_data.DATASET_DICT_0),
                       create_filtration_result('/path/to/summary2',
                                                event_data.EVENT_TRAIN_DICT_2,
                                                event_data.EVENT_EVAL_DICT_2,
                                                event_data.METRIC_2,
                                                event_data.DATASET_DICT_0),
                       create_filtration_result('/path/to/summary1',
                                                event_data.EVENT_TRAIN_DICT_1,
                                                event_data.EVENT_EVAL_DICT_1,
                                                event_data.METRIC_1,
                                                event_data.DATASET_DICT_0),
                       create_filtration_result('/path/to/summary0',
                                                event_data.EVENT_TRAIN_DICT_0,
                                                event_data.EVENT_EVAL_DICT_0,
                                                event_data.METRIC_0,
                                                event_data.DATASET_DICT_0),
                       {
                           "summary_dir":
                           '/path/to/summary5',
                           "loss_function":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['hyper_parameters']['loss_function'],
                           "train_dataset_path":
                           None,
                           "train_dataset_count":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['train_dataset']['train_dataset_size'],
                           "test_dataset_path":
                           None,
                           "test_dataset_count":
                           None,
                           "network":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['algorithm']['network'],
                           "optimizer":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['hyper_parameters']['optimizer'],
                           "learning_rate":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['hyper_parameters']['learning_rate'],
                           "epoch":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['hyper_parameters']['epoch'],
                           "batch_size":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['hyper_parameters']['batch_size'],
                           "loss":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['algorithm']['loss'],
                           "model_size":
                           event_data.EVENT_TRAIN_DICT_5['train_lineage']
                           ['model']['size'],
                           "metric": {},
                           "dataset_graph":
                           event_data.DATASET_DICT_0,
                           "dataset_mark":
                           '2'
                       }],
            'count':
            7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_8(self):
        """Test the success of filter_summary_lineage."""
        condition = {'metric_accuracy': {'lt': 1.0000006, 'gt': 1.0000004}}
        expected_result = {
            'object': [
                create_filtration_result('/path/to/summary4',
                                         event_data.EVENT_TRAIN_DICT_4,
                                         event_data.EVENT_EVAL_DICT_4,
                                         event_data.METRIC_4,
                                         event_data.DATASET_DICT_0),
            ],
            'count':
            1,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_success_9(self):
        """Test the success of filter_summary_lineage."""
        condition = {'limit': 3, 'offset': 3}
        expected_result = {
            'object': [],
            'count': 7,
        }
        result = self.multi_querier.filter_summary_lineage(condition=condition)
        self.assertDictEqual(expected_result, result)

    def test_filter_summary_lineage_fail(self):
        """Test the function of filter_summary_lineage with exception."""
        condition = {'xxx': {'lt': 1.0000006, 'gt': 1.0000004}}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

        condition = {'accuracy': {'xxx': 1}}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

        condition = {'sorted_name': 'xxx'}
        self.assertRaises(LineageQuerierParamException,
                          self.multi_querier.filter_summary_lineage,
                          condition=condition)

    @mock.patch(
        'mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos'
    )
    def test_init_fail(self, *args):
        """Test the function of init with exception."""
        summary_path = {'xxx': 1}
        with self.assertRaises(LineageParamTypeError):
            Querier(summary_path)

        summary_path = None
        with self.assertRaises(LineageQuerierParamException):
            Querier(summary_path)

        args[0].side_effect = LineageSummaryAnalyzeException
        summary_path = '/path/to/summary0/log0'
        with self.assertRaises(LineageSummaryParseException):
            Querier(summary_path)

    @mock.patch(
        'mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos'
    )
    def test_parse_fail_summary_logs_1(self, *args):
        """Test the function of parsing fail summary logs."""
        lineage_infos = get_lineage_infos()
        args[0].side_effect = lineage_infos

        summary_path = ['/path/to/summary0/log0']
        querier = Querier(summary_path)
        querier._parse_failed_paths.append('/path/to/summary1/log1')
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }, {
            'summary_dir':
            '/path/to/summary1',
            **event_data.EVENT_TRAIN_DICT_1['train_lineage'], 'metric':
            event_data.METRIC_1,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_1['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = querier.get_summary_lineage()
        self.assertListEqual(expected_result, result)
        self.assertListEqual([], querier._parse_failed_paths)

    @mock.patch(
        'mindinsight.lineagemgr.querier.querier.LineageSummaryAnalyzer.get_summary_infos'
    )
    def test_parse_fail_summary_logs_2(self, *args):
        """Test the function of parsing fail summary logs."""
        args[0].return_value = create_lineage_info(
            event_data.EVENT_TRAIN_DICT_0,
            event_data.EVENT_EVAL_DICT_0,
            event_data.EVENT_DATASET_DICT_0,
        )

        summary_path = ['/path/to/summary0/log0']
        querier = Querier(summary_path)
        querier._parse_failed_paths.append('/path/to/summary1/log1')

        args[0].return_value = create_lineage_info(None, None, None)
        expected_result = [{
            'summary_dir':
            '/path/to/summary0',
            **event_data.EVENT_TRAIN_DICT_0['train_lineage'], 'metric':
            event_data.METRIC_0,
            'valid_dataset':
            event_data.EVENT_EVAL_DICT_0['evaluation_lineage']
            ['valid_dataset'],
            'dataset_graph':
            event_data.DATASET_DICT_0
        }]
        result = querier.get_summary_lineage()
        self.assertListEqual(expected_result, result)
        self.assertListEqual(['/path/to/summary1/log1'],
                             querier._parse_failed_paths)