示例#1
0
def read_file_to_graph_def(graph_def: [tf_v1.GraphDef, tf_v1.MetaGraphDef],
                           graph_file_name: str = "",
                           is_binary: bool = True):
    """
    Reads file to protobuf
    :param graph_def: GraphDef orr MetaGraphDef object to store the network
    :param graph_file_name: path to file with graph
    :param is_binary: flag to switch between binary and test protobuf format of graph file
    :return: GraphDef or MetaGaphDef containing the network with cleared device info.
    """
    try:
        if is_binary:
            with open(graph_file_name, "rb") as f:
                graph_def.ParseFromString(f.read())
        else:
            with open(graph_file_name, "r") as f:
                text_format.Merge(f.read(), graph_def)
        nodes_to_clear_device = graph_def.node if isinstance(
            graph_def, tf_v1.GraphDef) else graph_def.graph_def.node
        for node in nodes_to_clear_device:
            node.device = ""
    except Exception as e:
        raise FrameworkError(
            'TensorFlow cannot read the model file: "{}" is incorrect TensorFlow model file. '
            '\nThe file should contain one of the following TensorFlow graphs:'
            '\n1. frozen graph in text or binary format'
            '\n2. inference graph for freezing with checkpoint (--input_checkpoint) in text or binary format'
            '\n3. meta graph'
            '\n\nMake sure that --input_model_is_text is provided for a model in text format. '
            'By default, a model is interpreted in binary format. Framework error details: {}. '
            + refer_to_faq_msg(43), graph_file_name, str(e)) from e
    return graph_def
示例#2
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        try:
            model_nodes, model_params, model_name, iteration_number = load_symbol_def(
                argv.input_model, argv.input_symbol, argv.input,
                argv.nd_prefix_name, argv.pretrained_model_name,
                argv.legacy_mxnet_model)
        except (ValueError, mxnet.base.MXNetError) as e:
            raise FrameworkError(
                'The following error happened while loading mxnet model {}: {}. '
                + refer_to_faq_msg(53), argv.input_model, str(e)) from e

        if argv.nd_prefix_name and argv.pretrained_model_name and argv.save_params_from_nd:
            save_params_file(model_name, model_params._arg_params,
                             model_params._aux_params, iteration_number)

        update_extractors_with_extensions(mxnet_op_extractors)
        symbol2nx(graph, model_nodes, model_params, argv.input)
        graph.check_empty_graph(
            'symbol2nx. It may happen due to problems with loaded model')

        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'mxnet'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3

        extract_node_attrs(graph, mxnet_op_extractor)
        send_op_names_info('mxnet', graph)
        send_shapes_info('mxnet', graph)
示例#3
0
def load_onnx_model(file_name: str):
    try:
        onnx_model = onnx.load(file_name)
    except Exception as e:
        raise FrameworkError(
            'Cannot read the model file: "{}" is incorrect ONNX model file. Details: {}',
            file_name, str(e)) from e

    return onnx_model
class TestMainErrors(unittest.TestCase):
    @patch('argparse.ArgumentParser.parse_args',
           return_value=argparse.Namespace())
    @patch('mo.main.driver', side_effect=FrameworkError('FW ERROR MESSAGE'))
    @ngraph_needed
    def test_FrameworkError(self, mock_argparse, mock_driver):
        with self.assertLogs() as logger:
            main(argparse.ArgumentParser(), None, 'framework_string')
            self.assertEqual(logger.output, ['ERROR:root:FW ERROR MESSAGE'])
示例#5
0
def apply_transform(graph: Graph, replacer_cls, **kwargs):
    """
    Safely executes transform if it should be and validates graph after transform execution
    """
    replacer = replacer_cls()
    replacement_id = 'REPLACEMENT_ID'
    if hasattr(replacer, 'replacement_id'):
        replacement_id = replacer.replacement_id

    if hasattr(replacer, 'enabled') and not replacer.enabled:
        log.info("Skip replacer {} (enabled = False)".format(replacer_cls))
        return

    if hasattr(replacer, 'graph_condition') and \
            not all([condition(graph) for condition in replacer.graph_condition]):
        log.info("Skip replacer {} (graph_condition not satisfied)".format(
            replacer_cls))
        return

    log.debug("Run replacer {}".format(replacer_cls))

    try:
        if hasattr(replacer,
                   'run_not_recursively') and replacer.run_not_recursively:
            replacer.find_and_replace_pattern(graph)
        else:
            for_graph_and_each_sub_graph_recursively(
                graph, replacer.find_and_replace_pattern)

        if hasattr(replacer, 'force_clean_up') and replacer.force_clean_up:
            for_graph_and_each_sub_graph_recursively(graph,
                                                     lambda G: G.clean_up())

        if hasattr(replacer,
                   'force_shape_inference') and replacer.force_shape_inference:
            shape_inference(graph)

        if hasattr(replacer,
                   'run_not_recursively') and replacer.run_not_recursively:
            graph.check_empty_graph(replacer_cls)
            graph.check_shapes_consistency()
        else:
            for_graph_and_each_sub_graph_recursively(
                graph, lambda _: graph.check_empty_graph(replacer_cls))
            for_graph_and_each_sub_graph_recursively(
                graph, lambda _: graph.check_shapes_consistency())

    except Error as err:
        raise Error(
            'Exception occurred during running replacer "{}" ({}): {}'.format(
                replacement_id,
                replacer_cls,
                str(err).replace('[REPLACEMENT_ID]', replacement_id),
            )) from err
    except FrameworkError as err:
        raise FrameworkError('{}'.format(str(err))) from err
    except Exception as err:
        raise Exception(
            'Exception occurred during running replacer "{} ({})": {}'.format(
                replacement_id,
                replacer_cls,
                str(err).replace('[REPLACEMENT_ID]', replacement_id),
            )) from err
示例#6
0
def load_caffe_proto_model(caffe_pb2, proto_path: str, model_path: [str, None] = None):
    # 1. python protobuf is used
    if api_implementation._implementation_type == 'python':
        message = 'Please expect that Model Optimizer conversion might be slow. ' \
                  'You are currently using Python protobuf library implementation. \n'
        try:
            from google.protobuf.pyext import cpp_message
            # Check os windows and env variable PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION
            if os.name == 'nt' and os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', default='') != 'cpp':
                # 2. cpp implementation is available but not used
                message += 'However, cpp implementation is available, you can boost ' \
                           'model conversion by setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION env variable to cpp. \n' \
                           'Run: set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \n'
        except ImportError:
            # 3. cpp implementation is not available
            message += 'However you can use the C++ protobuf implementation that is supplied with the OpenVINO toolkit ' \
                       'or build protobuf library from sources. \n' \
                       'Navigate to "install_prerequisites" folder and run: ' \
                       'python -m easy_install protobuf-3.5.1-py($your_python_version)-win-amd64.egg \n' \
                       'set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp'
        print(message + '\n\n' + refer_to_faq_msg(80))

    # Read proto layers
    try:
        proto = caffe_pb2.NetParameter()
        with open(proto_path, "r") as file:
            text_format.Merge(str(file.read()), proto)
    except Exception as e:
        log.error('Exception message: {}\n\n'.format(e) +
                  '    Possible reasons:\n' +
                  '      1. {} does not exist\n'.format(proto_path) +
                  '      2. {} does not have a valid structure, for example, it was downloaded as html\n'.format(
                      proto_path) +
                  '      3. {} contains custom layers or attributes that are not supported\n'.format(proto_path) +
                  '         in Model Optimizer by default.\n\n' +
                  '    After you made sure that {} has a valid structure and still see this issue, then\n'.format(
                      proto_path) +
                  '    you need to generate a python parser for caffe.proto that was used when the model\n' +
                  '    was created.\n' +
                  '    Run "python3 generate_caffe_pb2.py --input_proto ${PATH_TO_CAFFE}/src/caffe/proto/caffe.proto"' +
                  refer_to_faq_msg(1) + '\n\n', extra={'framework_error': True})
        raise FrameworkError('Model Optimizer is not able to parse {}'.format(proto_path)) from e

    # Read model layer if exists
    model = None
    try:
        if model_path:
            model = caffe_pb2.NetParameter()
            with open(model_path, "rb") as infile:
                map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
                model.MergeFromString(map)
    except Exception as e:
        third_point = ''
        if api_implementation._implementation_type == 'python':
            third_point = '      3. Python protobuf implementation was used. Some models can\'t be converted ' + \
                          ' in this configuration. Please, use Python version with existing cpp implementation of ' + \
                          'protobuf library or build it by yourself\n' + refer_to_faq_msg(103)
        log.error('Exception message: {}\n\n'.format(e) +
                  '    Possible reasons:\n' +
                  '      1. {} does not exist\n'.format(model_path) +
                  '      2. {} does not have a valid structure\n'.format(model_path) + third_point,
                  extra={'framework_error': True})
        raise FrameworkError('Model Optimizer is not able to parse {}'.format(model_path)) from e

    return proto, model
示例#7
0
def load_tf_graph_def(graph_file_name: str = "",
                      is_binary: bool = True,
                      checkpoint: str = "",
                      model_dir: str = "",
                      saved_model_tags: list = [],
                      meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match(r'.*\.(ckpt|meta)$',
                                                      graph_file_name)):
        print(
            '[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
            'extension.\n'
            'It means that the model is not frozen.\n'
            'To load non frozen model to Model Optimizer run:'
            '\n\n1. For "*.ckpt" file:'
            '\n- if inference graph is in binary format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
            '\n- if inference graph is in text format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
            '--input_checkpoint "path/to/*.ckpt"'
            '\n\n2. For "*.meta" file:'
            '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(
                graph_def, graph_file_name,
                is_binary), variables_values, 'tf', None
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name,
                                               is_binary)
            outputs = get_output_node_names_list(graph_def,
                                                 user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def,
                                              checkpoint=checkpoint,
                                              output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(
                    graph_def=graph_def,
                    checkpoint_dir=checkpoint,
                    output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values, 'tf', None
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(
                tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # Since version 2.2 TF can fail with internal error while loading graph from .meta file.
            # It happens because some operation may has an _output_shapes attribute inconsistent with the GraphDef
            # calculated value. To avoid this problem we must delete `_output_shapes` attributes from operations
            for node in input_meta_graph_def.graph_def.node:
                if '_output_shapes' in node.attr:
                    del node.attr['_output_shapes']
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub(r'\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(
                    input_meta_graph_def.graph_def,
                    user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, input_meta_graph_def.graph_def, outputs)
                return graph_def, variables_values, 'tf', None
        if model_dir:
            # saved model directory
            try:
                env_setup = get_environment_setup("tf")
                # enable eager execution temporarily while TensorFlow 2 model is being loaded
                tf_v1.enable_eager_execution()

                try:
                    # Code to extract Keras model.
                    # tf.keras.models.load_model function throws TypeError,KeyError or IndexError
                    # for TF 1.x SavedModel format in case TF 1.x installed
                    imported = tf.keras.models.load_model(model_dir,
                                                          compile=False)
                except:
                    imported = tf.saved_model.load(model_dir, saved_model_tags)  # pylint: disable=E1120

                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation
                # and a model with Embedding operation cannot properly converted to IR without this function parameter
                if "tensorflow" in env_setup and env_setup[
                        "tensorflow"] >= LooseVersion("2.2.0"):
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func,
                        lower_control_flow=False,
                        aggressive_inlining=True)  # pylint: disable=E1123
                else:
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func, lower_control_flow=False)  # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution since next steps are executed with a graph in non-eager mode
                tf_v1.disable_eager_execution()

                input_names = []
                if hasattr(imported, 'inputs'):
                    # Extract tensor names order from Keras model
                    input_names = [tensor.name for tensor in imported.inputs]

                # After model freezing output tensor names are changing and recieve "Func/PartitionedCall" prefix,
                # so output_names from saved_model cannot be used. Here tensor names from frozen graph are used,
                # as TF adds indexed Identity nodes during freezing to each output, so this indexing is used for
                # order alignment.
                output_names = [tensor.name for tensor in frozen_func.outputs]

                inputs_outputs_order = (input_names, output_names)

                return graph_def, variables_values, 'tf2', inputs_outputs_order
            except:
                # disable eager execution since TensorFlow 1 model is handled
                tf_v1.disable_eager_execution()
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [
                    tf_v1.saved_model.tag_constants.SERVING
                ]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(
                        sess, tags, model_dir)
                    outputs = get_output_node_names_list(
                        meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(
                        sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values, 'tf', None
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
示例#8
0
def load_tf_graph_def(graph_file_name: str = "",
                      is_binary: bool = True,
                      checkpoint: str = "",
                      model_dir: str = "",
                      saved_model_tags: list = [],
                      meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match(r'.*\.(ckpt|meta)$',
                                                      graph_file_name)):
        print(
            '[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
            'extension.\n'
            'It means that the model is not frozen.\n'
            'To load non frozen model to Model Optimizer run:'
            '\n\n1. For "*.ckpt" file:'
            '\n- if inference graph is in binary format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
            '\n- if inference graph is in text format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
            '--input_checkpoint "path/to/*.ckpt"'
            '\n\n2. For "*.meta" file:'
            '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(graph_def, graph_file_name,
                                          is_binary), variables_values, 'tf'
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name,
                                               is_binary)
            outputs = get_output_node_names_list(graph_def,
                                                 user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def,
                                              checkpoint=checkpoint,
                                              output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(
                    graph_def=graph_def,
                    checkpoint_dir=checkpoint,
                    output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values, 'tf'
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(
                tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub(r'\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(
                    input_meta_graph_def.graph_def,
                    user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, input_meta_graph_def.graph_def, outputs)
                return graph_def, variables_values, 'tf'
        if model_dir:
            # saved model directory
            try:
                env_setup = get_environment_setup("tf")
                # enable eager execution temporarily while TensorFlow 2 model is being loaded
                tf_v1.enable_eager_execution()
                # code to extract GraphDef for TF 2.0 SavedModel format
                # tf.saved_model.load function throws TypeError for TF 1.x SavedModel format in case TF 1.x installed
                imported = tf.saved_model.load(model_dir, saved_model_tags)  # pylint: disable=E1120
                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation
                # and a model with Embedding operation cannot properly converted to IR without this function parameter
                if "tensorflow" in env_setup and env_setup[
                        "tensorflow"] >= LooseVersion("2.2.0"):
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func,
                        lower_control_flow=False,
                        aggressive_inlining=True)  # pylint: disable=E1123
                else:
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func, lower_control_flow=False)  # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution since next steps are executed with a graph in non-eager mode
                tf_v1.disable_eager_execution()
                return graph_def, variables_values, 'tf2'
            except (TypeError, KeyError):
                # disable eager execution since TensorFlow 1 model is handled
                tf_v1.disable_eager_execution()
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [
                    tf_v1.saved_model.tag_constants.SERVING
                ]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(
                        sess, tags, model_dir)
                    outputs = get_output_node_names_list(
                        meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(
                        sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values, 'tf'
            except Exception as e:
                raise FrameworkError('SavedModel format load failure: {}',
                                     e) from e
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")