Exemple #1
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        try:
            model_nodes, model_params, model_name, iteration_number = load_symbol_def(
                argv.input_model, argv.input_symbol, argv.input,
                argv.nd_prefix_name, argv.pretrained_model_name,
                argv.legacy_mxnet_model)
        except (ValueError, mxnet.base.MXNetError) as e:
            raise FrameworkError(
                'The following error happened while loading mxnet model {}: {}. '
                + refer_to_faq_msg(53), argv.input_model, str(e)) from e

        if argv.nd_prefix_name and argv.pretrained_model_name and argv.save_params_from_nd:
            save_params_file(model_name, model_params._arg_params,
                             model_params._aux_params, iteration_number)

        update_extractors_with_extensions(mxnet_op_extractors)
        symbol2nx(graph, model_nodes, model_params, argv.input)
        graph.check_empty_graph(
            'symbol2nx. It may happen due to problems with loaded model')

        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'mxnet'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3

        extract_node_attrs(graph, mxnet_op_extractor)
        send_op_names_info('mxnet', graph)
        send_shapes_info('mxnet', graph)
Exemple #2
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        try:
            load_kaldi_model(graph, argv.input_model)
        except Exception as e:
            raise Error('Model Optimizer is not able to parse Kaldi model {}. '.format(argv.input_model) +
                        refer_to_faq_msg(91)) from e
        graph.check_empty_graph('load_kaldi_nnet_model')
        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'kaldi'

        update_extractors_with_extensions(kaldi_type_extractors)
        extract_node_attrs(graph, lambda node: kaldi_extractor(node))

        send_op_names_info('kaldi', graph)
        send_shapes_info('kaldi', graph)
Exemple #3
0
def extract_if(cls, if_node: Node):
    If.update_node_stat(if_node, {})

    # check that required body and condition functions exist in the graph library
    main_graph = if_node.graph
    then_graph_proto = get_graph_proto(main_graph, 'then_branch', if_node)
    else_graph_proto = get_graph_proto(main_graph, 'else_branch', if_node)

    then_graph = create_internal_graph(main_graph)
    if_node['then_graph'] = then_graph

    else_graph = create_internal_graph(main_graph)
    if_node['else_graph'] = else_graph

    # create Parameter nodes for the then/else graphs
    for input_index, (body_graph, body_graph_proto) in enumerate(
            zip((then_graph, else_graph),
                (then_graph_proto, else_graph_proto))):

        body_parameters, body_parameter_names = convert_graph_inputs_to_parameters(
            body_graph, body_graph_proto)

        # update the If body graph with the body function graph
        body_results = []
        update_body_graph(body_graph, body_graph_proto, body_parameter_names,
                          body_results)

        body_graph.stage = 'front'

        # connect external input ports with body parameter nodes except input with condition
        for idx in range(0, len(body_parameters)):
            If.connect_body_input(if_node, not input_index, idx + 1,
                                  body_parameters[idx])

        # connect body outputs with If operation output ports
        for idx in range(len(body_results)):
            If.connect_body_output(if_node, not input_index, idx,
                                   body_results[idx])

        # run function to parse body nodes attributes similar to the main graph
        extract_node_attrs(
            body_graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))

    return cls.enabled
Exemple #4
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        caffe_pb2 = loader.import_caffe_pb2(argv.caffe_parser_path)

        proto, model = loader.load_caffe_proto_model(caffe_pb2,
                                                     argv.input_proto,
                                                     argv.input_model)

        update_extractors_with_extensions(
            caffe_type_extractors, argv.disable_omitting_optional if hasattr(
                argv, 'disable_omitting_optional') else False,
            argv.disable_flattening_optional_params if hasattr(
                argv, 'disable_flattening_optional_params') else False)

        try:
            original_shapes = loader.caffe_pb_to_nx(graph, proto, model)
        except ValueError as e:
            raise Error(
                'Invalid prototxt file: value error {}. ' +
                refer_to_faq_msg(11), str(e)) from e
        graph.check_empty_graph('load_caffe_proto_model')

        graph.__setattr__('proto_path', argv.input_proto)
        graph.__setattr__('caffemodel_path', argv.input_model)
        graph.__setattr__('name',
                          getattr(proto, 'name', None) or argv.model_name)
        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'caffe'
        graph.graph['original_shapes'] = original_shapes
        graph.graph['caffe_pb2'] = caffe_pb2

        custom_layers_map = custom_layers_mapping.load_layers_xml(argv.k)
        custom_layers_mapping.update_extractors(
            caffe_type_extractors, custom_layers_map,
            argv.disable_omitting_optional if hasattr(
                argv, 'disable_omitting_optional') else False,
            argv.enable_flattening_nested_params if hasattr(
                argv, 'enable_flattening_nested_params') else False)
        extract_node_attrs(
            graph, lambda node: caffe_extractor(
                node, check_for_duplicates(caffe_type_extractors)))
        send_op_names_info('caffe', graph)
        send_shapes_info('caffe', graph)
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        graph.graph['fw'] = 'pytorch'
        graph.graph['layout'] = 'NCHW'

        update_extractors_with_extensions(pytorch_op_extractors)

        # Create a dummy input
        if argv.input:
            placeholder_shapes = argv.placeholder_shapes
            placeholder_data_types = argv.placeholder_data_types
        else:
            placeholder_shapes = {'input': argv.placeholder_shapes}
            placeholder_data_types = {'input': np.float32}

        inputs = {}
        for name, shape in placeholder_shapes.items():
            dtype = placeholder_data_types[name]
            inp = np.random.randint(0, 255, shape).astype(dtype)
            inp = OpenVINOTensor(torch.tensor(inp))

            inputs[name] = inp
            inp.graph = graph
            inp.node_name = name
            graph.add_node(name, kind='op', op='Parameter', name=name, shape=shape)

        for name, value in argv.freeze_placeholder_with_value.items():
            inputs[name] = value

        model = argv.input_model

        register_model_hook(model, argv.is_dynamic)

        with torch.no_grad():
            if argv.input:
                model(**inputs)
            else:
                model(inputs['input'])

        extract_node_attrs(graph, lambda node: pytorch_op_extractor(node, check_for_duplicates(pytorch_op_extractors)))
Exemple #6
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        model_proto = load_onnx_model(argv.input_model)
        model_graph = model_proto.graph  # pylint: disable=no-member
        # print(model_graph)
        # assert len(model_graph) == 1, "An ONNX model contains more than 1 graph: unsupported"
        log.debug("Number of nodes in graph_def: {}".format(len(model_graph.node)))
        log.debug("Number of all input ports (not true inputs) in graph_def: {}".format(len(model_graph.input)))
        log.debug("Number of initializers in graph_def: {}".format(len(model_graph.initializer)))
        log.debug(
            "Number of real inputs in graph_def: {}".format(len(model_graph.input) - len(model_graph.initializer)))
        update_extractors_with_extensions(onnx_op_extractors)

        try:
            protobuf2nx(graph, model_proto)
        except Exception as e:
            raise Error(
                'Cannot pre-process ONNX graph after reading from model file "{}". ' \
                'File is corrupt or has unsupported format. Details: {}. ' +
                refer_to_faq_msg(44),
                argv.input_model,
                str(e)
            ) from e
        log.debug("Number of nodes in NX graph: {}".format(graph.number_of_nodes()))

        graph.__setattr__('name',
                          argv.model_name if argv.model_name else model_proto.graph.name)  # pylint: disable=no-member
        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'onnx'
        graph.graph['feature_dim'] = 1
        if hasattr(model_proto, 'opset_import'):
            graph.graph['fw_opset_version'] = model_proto.opset_import[0].version   # pylint: disable=no-member
        else:
            graph.graph['fw_opset_version'] = None

        graph.check_empty_graph('protobuf2nx. It may happen due to problems with loaded model')
        extract_node_attrs(graph, lambda node: onnx_op_extractor(node, check_for_duplicates(onnx_op_extractors)))
        send_op_names_info('onnx', graph)
        send_shapes_info('onnx', graph)
Exemple #7
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        if argv.tensorflow_custom_layer_libraries:
            libraries = argv.tensorflow_custom_layer_libraries.split(',')
            for library in libraries:
                log.info('Loading library "{}" with custom operations'.format(
                    library))
                tf_v1.load_op_library(library)

        graph_def, variables_values, framework, inputs_outputs_order = load_tf_graph_def(
            graph_file_name=argv.input_model,
            is_binary=not argv.input_model_is_text,
            checkpoint=argv.input_checkpoint,
            user_output_node_names_list=argv.output,
            model_dir=argv.saved_model_dir,
            meta_graph_file=argv.input_meta_graph,
            saved_model_tags=argv.saved_model_tags)

        if inputs_outputs_order is not None and isinstance(
                inputs_outputs_order, tuple):
            graph.inputs_order = inputs_outputs_order[0]
            graph.outputs_order = inputs_outputs_order[1]

        send_framework_info(framework)

        try:
            tf_v1.import_graph_def(graph_def, name='')
        except:
            log.warning(
                "TensorFlow post-processing of loaded model was unsuccessful. "
                "This is an optional step that Model Optimizer performs for any input model but it is not usually "
                "required for all models. "
                "It likely means that the original model is ill-formed. "
                "Model Optimizer will continue converting this model.")

        log.debug("Number of nodes in graph_def: {}".format(len(
            graph_def.node)))  # pylint: disable=no-member

        if argv.tensorboard_logdir:
            tensorboard_util.dump_for_tensorboard(graph_def,
                                                  argv.tensorboard_logdir)

        update_extractors_with_extensions(tf_op_extractors)

        try:
            protobuf2nx(graph, graph_def)
        except Exception as e:
            raise Error(
                'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \
                'File is corrupt or has unsupported format. Details: {}. ' +
                refer_to_faq_msg(44),
                argv.model_name,
                str(e)
            ) from e

        graph.__setattr__('name', argv.model_name)
        # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
        # and convert_nhwc_to_nchw(graph)
        graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
        graph.graph['fw'] = 'tf'

        graph.graph['variables_values'] = variables_values
        del variables_values

        used_tensors = restore_edges(graph, get_tf_edges)

        # Tensor names information corresponding to a node is stored on outgoing edges.
        # As output nodes do not have outgoing edges, fake outputs are required. In the following code
        # for each output Identity node is added, and tensor name for the output is kept
        # on (output, fake output) edge. After Result nodes adding transformation fake outputs
        # are deleted from graph.
        add_outputs_identity(
            graph, graph.nodes - used_tensors,
            lambda g, output, fake_node_name: g.add_edges_from(
                [create_tf_edge(output, fake_node_name, 0)]))

        remove_control_dependency_inputs(graph)

        graph.check_empty_graph(
            'protobuf2nx. It may happen due to problems with loaded model')
        extract_node_attrs(
            graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))

        # try to detect layout from the nodes of the graph. If there are no convolution nodes in N(D)HWC layout then we
        # consider that the graph is in NCHW layout and no layout conversion should be performed
        if not argv.disable_nhwc_to_nchw and not graph_or_sub_graph_has_nhwc_ops(
                graph):
            if not argv.silent:
                log.debug('disable_nhwc_to_nchw" was automatically enabled.')
            for_graph_and_each_sub_graph_recursively(
                graph, update_cmd_params_and_layout)

        send_op_names_info(framework, graph)
        send_shapes_info(framework, graph)
Exemple #8
0
    def extract(cls, loop_node):
        Loop.update_node_stat(loop_node, {})

        # check that required body and condition functions exist in the graph library
        main_graph = loop_node.graph
        body_graph_proto = get_graph_proto(main_graph, 'body', loop_node)
        cond_graph_proto = get_graph_proto(main_graph, 'cond', loop_node)

        body_graph = create_internal_graph(main_graph)
        loop_node['body'] = body_graph
        # create Parameter nodes for the body graph
        body_parameters, body_parameter_names = convert_graph_inputs_to_parameters(
            body_graph, body_graph_proto)

        # update the loop body graph with the body function graph
        body_results = []
        update_body_graph(body_graph, body_graph_proto, body_parameter_names,
                          body_results)

        # update the loop body graph with the condition function graph
        update_body_graph(body_graph, cond_graph_proto, body_parameter_names,
                          body_results)

        # add 'internal_layer_id' attribute which is a must have attribute for the loop body node
        for idx, body_node in enumerate(body_graph.get_op_nodes()):
            body_node['internal_layer_id'] = idx

        body_graph.stage = 'front'

        # Currently,
        # Loop Inputs Order:
        #   0    - current iteration
        #   1    - trip count
        #   2..  - "loop carried" dependencies variables
        #
        # Body Inputs Order:
        #   0    - current iteration
        #   1    - trip count
        #   2..  - "loop carried" dependencies variables
        #
        # Body Outputs Order:
        #   0      - current iteration
        #   1      - trip count
        #   2..    - "loop carried" dependencies variables
        #
        # Loop Outputs Order:
        #   0    - current iteration
        #   1    - trip count
        #   2..  - "loop carried" dependencies variables
        #
        # so inputs must be reordered and execution condition must be created in the front transformation
        # to be aligned with the specification

        # connect external input ports with body parameter nodes except current iteration
        # since it must be disconnected from external port
        for idx in range(1, len(body_parameters)):
            Loop.connect_body_input(loop_node, idx, body_parameters[idx])

        # mark current iteration input Parameter node and execution condition Result node
        Loop.mark_current_iteration_parameter_node(loop_node,
                                                   body_parameters[0])
        Loop.mark_execution_condition_result_node(loop_node, body_results[-1])

        # connect back edges in the body except current iteration
        for idx in range(1, len(body_parameters)):
            Loop.add_back_edge(loop_node, body_parameters[idx],
                               body_results[idx])

        # connect body outputs with Loop operation output ports except the execution condition result
        for idx in range(len(body_results) - 1):
            Loop.connect_body_output(loop_node, idx, body_results[idx])

        # run function to parse body nodes attributes similar to the main graph
        extract_node_attrs(
            body_graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))
        return cls.enabled
Exemple #9
0
    def extract(cls, loop_node):
        Loop.update_node_stat(loop_node, {})

        body_graph_proto = onnx_attr(loop_node, 'body', 'g', None)
        main_graph = loop_node.graph

        # create a Graph object for the body and take graph attributes from the main graph
        body_graph = Graph()
        main_graph_attrs_copy = {}
        for attr_key, attr_value in main_graph.graph.items():
            if attr_key not in ['tensor_mapping', 'parent_node']:
                main_graph_attrs_copy[attr_key] = copy.deepcopy(attr_value)
        body_graph.graph.update(main_graph_attrs_copy)
        loop_node['body'] = body_graph
        # save parent node for nested loops to know which node contains body (and which graph is on upper level)
        body_graph.graph['parent_node'] = loop_node

        # maps a tensor name to a node produced it and the node port: str -> (node_id, node_port)
        data_nodes_map = {}
        body_graph.graph['tensor_mapping'] = data_nodes_map  # save mapping for possible Loop inside the Loop

        body_parameters = add_initializers_and_inputs_to_graph(body_graph, body_graph_proto, data_nodes_map)

        external_edges = []  # (src_node, src_out_port), dest_body_parameter_node
        # save additional edges information for graph on each level, the first one is the deepest
        additional_params = []  # (src_node, src_out_port) -> parameter_node (for manually added Parameters)
        # Go through all nodes in the original model order because data nodes are defined on-the-fly and order matters
        for pb_node in body_graph_proto.node:
            # create an NX node
            id = body_graph.unique_id(node_id(pb_node))
            body_graph.add_node(id, pb=pb_node, kind='op')
            if hasattr(body_graph, 'op_names_statistic') and hasattr(pb_node, 'op_type'):
                body_graph.op_names_statistic[pb_node.op_type] += 1

            # add incoming edges based on data_nodes_map
            for dst_port, inp in enumerate(pb_node.input):
                # should add edge src_internal_id --> dst_id
                if inp not in data_nodes_map:
                    if inp == '':
                        # input is omitted; most likely it corresponds to an optional input for an operator
                        continue
                    else:
                        is_finished = create_cross_body_edge(body_graph, external_edges, additional_params,
                                                             inp, id, dst_port)
                        if not is_finished:
                            raise Error(
                                'Reference to "{}" is not satisfied. A node refer not existing data tensor. ONNX '
                                'model is not consistent. Protobuf fragment: {}', inp, pb_node)
                else:
                    src_id, src_port = data_nodes_map[inp]
                    create_edge_with_attrs(body_graph, inp, src_id, src_port, id, dst_port)

            # add outgoing edges to data_nodes_map
            for src_port, out in enumerate(pb_node.output):
                if out in data_nodes_map:
                    log.debug("Detected reuse of blob {}.".format(out))
                data_nodes_map[out] = (id, src_port)

        body_results = []
        for output in body_graph_proto.output:
            tensor_name = str(output.name)
            node_name, output_port = data_nodes_map[tensor_name]
            assert body_graph.has_node(node_name), 'The body graph does not contain output with name "{}"'.format(
                node_name)
            body_results.append(Node(body_graph, add_opoutput(body_graph, node_name, output_port, False)))

        # add 'internal_layer_id' attribute which is a must have attribute for the loop body node
        for idx, body_node in enumerate(body_graph.get_op_nodes()):
            body_node['internal_layer_id'] = idx

        loop_carried_dependencies_count = len(body_graph_proto.input) - 2
        scan_outputs_count = len(body_graph_proto.output) - 1 - loop_carried_dependencies_count

        # Loop inputs:
        #   0 - trip count
        #   1 - execution condition
        #   2 .. - loop carried dependencies

        # Loop outputs:
        #   0 .. loop_carried_dependencies_count - 1 - loop carried dependencies
        #   loop_carried_dependencies_count .. - scan outputs

        # Body inputs:
        #   0 - iteration number
        #   1 - execution condition
        #   2 .. - loop carried dependencies

        # Body outputs:
        #   0 - execution condition
        #   1 .. loop_carried_dependencies_count - loop carried dependencies
        #   loop_carried_dependencies_count + 1 .. - scan outputs

        # some of the inputs/outputs may not be connected but the normalization transformation will take care of it
        # connection Loop body nodes with external input edges
        next_loop_input_port_idx = sorted(loop_node.in_edges().keys())[-1] + 1
        cur_graph = body_graph
        for external_edges_subg in external_edges:
            if 'parent_node' not in cur_graph.graph:
                continue
            cur_loop_node = cur_graph.graph['parent_node']
            parent_graph = cur_loop_node.graph
            for (src_node, src_port), body_node, tensor_name in external_edges_subg:
                create_edge_with_attrs(parent_graph, tensor_name, src_node, src_port,
                                       cur_loop_node.id, next_loop_input_port_idx)

                Loop.connect_body_input(cur_loop_node, next_loop_input_port_idx, body_node)
                next_loop_input_port_idx += 1
            cur_graph = parent_graph

        # mark current iteration input Parameter node
        Loop.mark_current_iteration_parameter_node(loop_node, body_parameters[0])

        # connect initial value for "execution condition" input of the loop
        Loop.connect_body_input(loop_node, 1, body_parameters[1])
        # add back edge with "execution condition"
        Loop.add_back_edge(loop_node, body_parameters[1], body_results[0])
        # mark "execution condition" Result node
        Loop.mark_execution_condition_result_node(loop_node, body_results[0])

        # connect initial value for "loop carried" dependencies variables
        for idx in range(loop_carried_dependencies_count):
            Loop.connect_body_input(loop_node, idx + 2, body_parameters[idx + 2])
        # add back edge for "loop carried" dependencies variables
        for idx in range(loop_carried_dependencies_count):
            Loop.add_back_edge(loop_node, body_parameters[idx + 2], body_results[idx + 1])
        # connect final value for "loop carried" dependencies variables
        for idx in range(loop_carried_dependencies_count):
            Loop.connect_body_output(loop_node, idx, body_results[idx + 1])

        # connect "scan outputs" and mark axis for concatenation
        for idx in range(loop_carried_dependencies_count, loop_carried_dependencies_count + scan_outputs_count):
            Loop.connect_body_output(loop_node, idx, body_results[idx + 1], axis=0)

        # run function to parse body nodes attributes similar to the main graph
        extract_node_attrs(body_graph, lambda node: onnx_op_extractor(node, check_for_duplicates(onnx_op_extractors)))
        return cls.enabled