コード例 #1
0
def parse_custom_replacement_config_file(file_name: str):
    """
    Reads custom replacement configuration file file_name.
    :param file_name: name of the file to read from.
    :return: The dictionary where key is the layer id and value is an instance of the CustomLayerDescriptor object.
    """
    if not os.path.exists(file_name):
        raise Error("Custom replacements configuration file '{}' does not exist. ".format(file_name) +
                    refer_to_faq_msg(69))

    data = load_and_validate_json_config(file_name)
    result = list()
    validation_errors = list()
    for attrs in data:
        if 'id' not in attrs:
            raise Error('One of the custom replacements in the configuration file "{}" does not contain attribute '
                        '"id". '.format(file_name) +
                        refer_to_faq_msg(71))
        if 'match_kind' not in attrs:
            raise Error('One of the custom replacements in the configuration file "{}" does not contain attribute '
                        '"match_kind". Possible values are "points", "scope" and "general". '.format(file_name) +
                        refer_to_faq_msg(71))
        desc = CustomReplacementDescriptor.create_instance(attrs['match_kind'], attrs['id'], attrs)
        validation_errors.extend(desc.validate_data())
        result.append(desc)
    if len(validation_errors) > 0:
        raise Error("File '{}' validation failed:\n{}. ".format(file_name, "\n".join(validation_errors)) +
                    refer_to_faq_msg(72))
    return result
コード例 #2
0
ファイル: loader.py プロジェクト: mateusztabaka/openvino
def load_symbol_nodes(model_name,
                      input_symbol: str = None,
                      legacy_mxnet_model: bool = False):
    if input_symbol:
        json_name = input_symbol
        if legacy_mxnet_model:
            log.warning(
                'If you use --input_symbol with legacy MXNet models be sure that symbol and param names '
                + 'have correct format supported by MXNet')
    else:
        json_name = '%s-symbol.json' % model_name
        input_symbol = json_name

    if legacy_mxnet_model and (input_symbol == json_name):
        log.warning(
            'For legacy MXNet models Model Optimizer does not support conversion of old MXNet models'
            +
            '(trained with 1.0.0 version of MXNet and lower) with custom layers. '
            + refer_to_faq_msg(93))
        sym = mx.symbol.load(json_name)
        model_nodes = json.loads(sym.tojson())
    else:
        if os.path.isfile(json_name):
            model_nodes = json.load(open(json_name))
        else:
            raise Error(
                'Specified input json {} does not exist. ' +
                refer_to_faq_msg(84), json_name)

    return model_nodes['nodes']
コード例 #3
0
def check_keys_valid(ov_function: Model, keys: list, search_outputs: bool):
    """
    Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs
    Throws if some key is not found
    Throws if some different keys point to the same actual input/output
    """
    nodes_used = {}
    nodes = ov_function.inputs
    if search_outputs:
        nodes += ov_function.outputs

    for name in keys:
        node_found = False
        for ov_node in nodes:
            if name in ov_node.get_tensor().get_names():
                if ov_node in nodes_used:
                    raise Error(
                        'Key for {} and {} point to same model input/output.'.
                        format(name, nodes_used[ov_node]))
                nodes_used[ov_node] = name
                node_found = True
                break

        if not node_found:
            if not search_outputs:
                raise Error('Input with name {} wasn\'t found! {}'.format(
                    name, refer_to_faq_msg(83)))
            else:
                raise Error(
                    'Input/Output with name {} wasn\'t found! {}'.format(
                        name, refer_to_faq_msg(83)))
コード例 #4
0
ファイル: loader.py プロジェクト: mateusztabaka/openvino
def load_symbol_def(input_model_name,
                    input_symbol,
                    input_names: str = '',
                    nd_prefix_name: str = '',
                    pretrained_model_name: str = '',
                    legacy_mxnet_model: bool = False):
    if not nd_prefix_name and not pretrained_model_name:
        # model name always has extension 'param'
        try:
            model_name, iteration_number = parse_input_model(input_model_name)
        except ValueError as err:
            raise Error(
                'Input model name {} is not in an expected format, cannot extract iteration number. '
                + refer_to_faq_msg(48), input_model_name)

        if input_names:
            model_params = load_params(input_model_name,
                                       data_names=input_names.split(','))
        else:
            model_params = load_params(input_model_name)

    elif nd_prefix_name and pretrained_model_name and input_symbol:
        model_name, iteration_number = parse_input_model(pretrained_model_name)
        model_name = '-'.join(input_symbol.split('-')[:-1])
        model_params = build_params_file(nd_prefix_name, pretrained_model_name,
                                         input_names)
    else:
        raise Error(
            "Arguments --nd_prefix_name, --pretrained_model_name and --input_symbol should be provided. Please provide all or do not use any. "
            + refer_to_faq_msg(81))

    model_nodes = load_symbol_nodes(model_name, input_symbol,
                                    legacy_mxnet_model)

    return model_nodes, model_params, model_name, iteration_number
コード例 #5
0
ファイル: preprocessing.py プロジェクト: terfendail/openvino
def check_keys_valid(ov_function: Model, dict_to_validate: dict,
                     search_outputs: bool):
    """
    Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs
    Throws if some key is not found
    Throws if some different keys point to the same actual input/output
    """
    nodes_used = {}
    nodes = ov_function.inputs
    if search_outputs:
        nodes += ov_function.outputs

    # We need to replace all node names from dict to tensor names
    rename_dict = {}
    # Find names for replacing
    for name in dict_to_validate.keys():
        for ov_node in nodes:
            if name in ov_node.get_tensor().get_names():
                break
            elif name == ov_node.get_node().get_friendly_name():
                assert len(ov_node.get_tensor().get_names()
                           ) > 0, 'Node must have at least one tensor name'
                new_name = list(ov_node.get_tensor().get_names())[0]
                rename_dict[name] = new_name
                break

    # Replace found node names with tensor names
    for name, new_name in rename_dict.items():
        assert name in dict_to_validate, 'Key {} is not in initial dict'.format(
            name)
        assert new_name not in dict_to_validate, 'Key {} is already in initial dict'.format(
            new_name)
        dict_to_validate[new_name] = dict_to_validate[name]
        del dict_to_validate[name]

    # validate the dict
    for name in dict_to_validate.keys():
        node_found = False
        for ov_node in nodes:
            if name in ov_node.get_tensor().get_names():
                if ov_node in nodes_used:
                    raise Error(
                        'Key for {} and {} point to same model input/output.'.
                        format(name, nodes_used[ov_node]))
                nodes_used[ov_node] = name
                node_found = True
                break

        if not node_found:
            if not search_outputs:
                raise Error('Input with name {} wasn\'t found! {}'.format(
                    name, refer_to_faq_msg(83)))
            else:
                raise Error(
                    'Input/Output with name {} wasn\'t found! {}'.format(
                        name, refer_to_faq_msg(83)))
コード例 #6
0
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
                refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
                        refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
コード例 #7
0
ファイル: loader.py プロジェクト: Flex-plaidml-team/openvino
def parse_mean(file_path: str, in_shape: np.ndarray, mean_file_offsets: [tuple, None], caffe_pb2):
    blob = caffe_pb2.BlobProto()
    with open(file_path, 'rb') as file:
        data = file.read()

    if not data:
        raise Error('Mean file "{}" is empty.' + refer_to_faq_msg(5),
                    file_path)

    try:
        blob.ParseFromString(data)
        data = np.array(blob.data)  # pylint: disable=no-member

        if blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
            data = data.reshape(blob.channels, blob.height, blob.width)  # pylint: disable=no-member
        else:
            data = data.reshape(blob.shape.dim)  # pylint: disable=no-member
        # crop mean image according to input size
        if in_shape[2] > data.shape[1] or in_shape[3] > data.shape[2]:
            raise Error(
                'Input image of shape {} is larger than mean image {} from file "{}". ' +
                refer_to_faq_msg(4),
                in_shape,
                data.shape,
                file_path
            )

        if mean_file_offsets is not None and len(mean_file_offsets) == 2:
            offset_x = mean_file_offsets[0]
            offset_y = mean_file_offsets[1]
        else:
            offset_x = int((data.shape[1] - in_shape[2]) / 2)
            offset_y = int((data.shape[2] - in_shape[3]) / 2)

        mean = []
        for i in range(in_shape[1]):
            data_channel = np.zeros(in_shape[2] * in_shape[3], dtype=np.float32)
            for x in range(in_shape[2]):
                for y in range(in_shape[3]):
                    data_channel[x * in_shape[3] + y] = data[i, x + offset_x, y + offset_y]
            mean.append(data_channel)

        return mean

    except Exception as err:
        raise Error(
            'While processing mean file "{}": {}. Probably mean file has incorrect format. ' +
            refer_to_faq_msg(6),
            file_path,
            str(err)) from err
コード例 #8
0
def build_net(graph: Graph):
    try:
        if not hasattr(os.environ, 'GLOG_minloglevel'):
            os.environ['GLOG_minloglevel'] = '2'
        import caffe
        log.info('Partial inference via the framework is available')
    except ImportError:
        log.warning(
            'pyCaffe is not available. Partial inference via the framework is not '
            + 'possible')
        return

    try:
        net = caffe.Net(graph.proto_path, graph.caffemodel_path, caffe.TEST)
    except Exception as err:
        raise Error(
            'Error happened while constructing caffe.Net in the Caffe fallback function: {}. '
            + refer_to_faq_msg(12), str(err)) from err

    inputs_node_name = find_inputs(graph)

    reshape_flag = False
    for i in inputs_node_name:
        new_input_shape = graph.node[i]['shape'].astype(int)
        top_node = get_node_top(graph, i)
        caffe_shape = list(net.blobs[top_node].shape)
        if not np.all(caffe_shape == new_input_shape):
            net.blobs[top_node].reshape(*[int(x) for x in new_input_shape])
            reshape_flag = True

    if reshape_flag:
        net.reshape()

    try:
        net.forward()
    except KeyError as err:
        log.error('Error happened in Caffe net.forward: {}.'.format(str(err)))
        log.error(
            'It may point to the known bug in pycaffe when top and name of the layer do not match.'
        )
        log.error('Please make sure that the latest pycaffe is used.')
        raise Error(
            'Cannot infer shapes due to exception in Caffe: {}. ' +
            refer_to_faq_msg(13), str(err)) from err
    except Exception as err:
        raise Error(
            'Cannot infer shapes in Caffe net.forward due to exception: {}.' +
            refer_to_faq_msg(13), str(err)) from err

    graph.__setattr__('caffe_net', net)
コード例 #9
0
def read_counts_file(file_path):
    with open(file_path, 'r') as f:
        file_content = f.readlines()
    if len(file_content) > 1:
        raise Error('Expect counts file to be one-line file. ' +
                    refer_to_faq_msg(90))

    counts_line = file_content[0].strip().replace('[', '').replace(']', '')
    try:
        counts = np.fromstring(counts_line, dtype=float, sep=' ')
    except TypeError:
        raise Error('Expect counts file to contain list of floats.' +
                    refer_to_faq_msg(90))

    return counts
コード例 #10
0
ファイル: Cast.py プロジェクト: yury-intel/openvino
    def helper_value_propagation(node_name, value, dst_type):
        new_blob, finite_match_count, zero_match_count = convert_blob(
            value, dst_type)

        if finite_match_count:
            log.error(
                "{} elements of {} were clipped to infinity while converting an input blob for node '{}' to {}."
                " ".format(finite_match_count, new_blob.size, node_name,
                           dst_type) + refer_to_faq_msg(76))
        if zero_match_count:
            log.warning(
                "{} elements of {} were clipped to zero while converting an input blob for node '{}' to {}."
                " ".format(zero_match_count, new_blob.size, node_name,
                           dst_type) + refer_to_faq_msg(77))
        return new_blob
コード例 #11
0
def caffe_native_node_infer(node: Node):
    """
    Infers shape of the unknown operation via Caffe if it is available.
    Requires graph to contain paths to both prototxt and caffemodel files.
    When it is visited for the first time, net object is created and written to graph.
    Next time, it just takes the built net from graph.

    Parameters
    ----------
    node node to infer the shape for

    """
    log.error(
        "Caffe fallback is deprecated. It will be removed in future releases. Please use extensions for unsupported layers.\n"
        +
        "See more information in the \"Custom Layers in the Model Optimizer\" chapter of the Model Optimizer Developer Guide",
        extra={'is_warning': True})
    log.info('Called "caffe_native_node_infer" for node "{}"'.format(node.id))

    graph = node.graph
    net = get_net(graph)
    if not net:
        raise Error(
            'Cannot infer shape for node "{}" because there is no Caffe available. '
            +
            'Please register python infer function for op = {} or use Caffe for shape inference. '
            + refer_to_faq_msg(14), node.soft_get('name'), node.soft_get('op'))

    for iout in range(len(node.out_nodes())):
        output_shape = int64_array(net.blobs[node.top].data.shape)
        node.out_node(iout).shape = output_shape
コード例 #12
0
ファイル: leaky_relu.py プロジェクト: mikhailk62/openvino
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        act_type = attrs.str('act_type', 'leaky')
        if act_type == 'prelu':
            prelu_attrs = {'channel_shared': 1,
                           'filler_type': 'constant',
                           'filler_value': 0,
                           'min': 0,
                           'max': 1,
                           'mean': 0,
                           'std': 0,
                           'sparse': -1,
                           'variance_norm': "caffe.FillerParameter.FAN_IN"}
            PReLU.update_node_stat(node, prelu_attrs)
        elif act_type == 'elu':
            alpha = attrs.float('slope', 0.25)
            Elu.update_node_stat(node, {'alpha': alpha})
        elif act_type == 'leaky':
            negative_slope = attrs.float('slope', 0.25)
            if negative_slope == 0:
                ReLU.update_node_stat(node)
            else:
                LeakyReLU.update_node_stat(node, {'negative_slope': negative_slope})
        elif act_type == 'gelu':
            GeLUOP.update_node_stat(node, {'approximation_mode': 'erf'})
        else:
            raise Error(
                "Operation '{}' not supported. Please register it as custom op. " +
                refer_to_faq_msg(86),
                act_type)

        return LeakyReLUFrontExtractor.enabled
コード例 #13
0
ファイル: infer.py プロジェクト: yeonbok/openvino
def validate_batch_in_shape(shape, layer_name: str):
    """
    Raises Error #39 if shape is not valid for setting batch size
    Parameters
    ----------
    shape: current shape of layer under validation
    layer_name: name of layer under validation
    """
    if len(shape) == 0 or (shape[0] is not dynamic_dimension
                           and shape[0] not in (-1, 0, 1)):
        raise Error((
            'The input layer {} has a shape {} defined in the model. \n\n' +
            'When you use -b (--batch) option, Model Optimizer applies its value to the first '
            +
            'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous '
            +
            'situation - Model Optimizer can not know in advance whether the layer has the batch '
            +
            'dimension or not.\n\n For example, you want to set batch dimension equals 100 '
            +
            'for the input layer "data" with shape (10,34). Although you can not use --batch, '
            +
            'you should pass --input_shape (100,34) instead of --batch 100. \n\n'
            +
            'You can also tell Model Optimizer where batch dimension is located by specifying --layout. \n\n'
            + refer_to_faq_msg(39)).format(layer_name, shape))
コード例 #14
0
ファイル: main.py プロジェクト: MaximProshin/openvino
def prepare_ir(argv: argparse.Namespace):
    argv = arguments_post_parsing(argv)
    t = tm.Telemetry()
    graph = None
    ngraph_function = None
    moc_front_end, available_moc_front_ends = get_moc_frontends(argv)
    if moc_front_end:
        fallback_reasons = check_fallback(argv)
        if len(fallback_reasons) == 0:
            t.send_event("mo", "conversion_method",
                         moc_front_end.get_name() + "_frontend")
            moc_front_end.add_extension(
                TelemetryExtension("mo", t.send_event, t.send_error,
                                   t.send_stack_trace))
            moc_front_end.add_extension(
                ProgressReporterExtension(progress_printer(argv)))
            ngraph_function = moc_pipeline(argv, moc_front_end)
            return graph, ngraph_function
        else:  # apply fallback
            reasons_message = ", ".join(fallback_reasons)
            load_extensions(argv, *list(deduce_framework_by_namespace(argv)))
            t.send_event("mo", "fallback_reason", reasons_message)
            log.warning(
                "The IR preparation was executed by the legacy MO path. "
                "This is a fallback scenario applicable only for some specific cases. "
                f"The detailed reason why fallback was executed: not supported {reasons_message} were used. "
                "You can specify --use_new_frontend flag to force using the Frontend MO path to avoid additional checks. "
                + refer_to_faq_msg(105))

    t.send_event("mo", "conversion_method", "mo_legacy")
    graph = unified_pipeline(argv)

    return graph, ngraph_function
コード例 #15
0
ファイル: loader.py プロジェクト: mikhailk62/openvino
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        try:
            model_nodes, model_params, model_name, iteration_number = load_symbol_def(
                argv.input_model, argv.input_symbol, argv.input,
                argv.nd_prefix_name, argv.pretrained_model_name,
                argv.legacy_mxnet_model)
        except (ValueError, mxnet.base.MXNetError) as e:
            raise FrameworkError(
                'The following error happened while loading mxnet model {}: {}. '
                + refer_to_faq_msg(53), argv.input_model, str(e)) from e

        if argv.nd_prefix_name and argv.pretrained_model_name and argv.save_params_from_nd:
            save_params_file(model_name, model_params._arg_params,
                             model_params._aux_params, iteration_number)

        update_extractors_with_extensions(mxnet_op_extractors)
        symbol2nx(graph, model_nodes, model_params, argv.input)
        graph.check_empty_graph(
            'symbol2nx. It may happen due to problems with loaded model')

        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'mxnet'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3

        extract_node_attrs(graph, mxnet_op_extractor)
        send_op_names_info('mxnet', graph)
        send_shapes_info('mxnet', graph)
コード例 #16
0
ファイル: loader.py プロジェクト: liubo-intel/openvino
def read_file_to_graph_def(graph_def: [tf_v1.GraphDef, tf_v1.MetaGraphDef],
                           graph_file_name: str = "",
                           is_binary: bool = True):
    """
    Reads file to protobuf
    :param graph_def: GraphDef orr MetaGraphDef object to store the network
    :param graph_file_name: path to file with graph
    :param is_binary: flag to switch between binary and test protobuf format of graph file
    :return: GraphDef or MetaGaphDef containing the network with cleared device info.
    """
    try:
        if is_binary:
            with open(graph_file_name, "rb") as f:
                graph_def.ParseFromString(f.read())
        else:
            with open(graph_file_name, "r") as f:
                text_format.Merge(f.read(), graph_def)
        nodes_to_clear_device = graph_def.node if isinstance(
            graph_def, tf_v1.GraphDef) else graph_def.graph_def.node
        for node in nodes_to_clear_device:
            node.device = ""
    except Exception as e:
        raise FrameworkError(
            'TensorFlow cannot read the model file: "{}" is incorrect TensorFlow model file. '
            '\nThe file should contain one of the following TensorFlow graphs:'
            '\n1. frozen graph in text or binary format'
            '\n2. inference graph for freezing with checkpoint (--input_checkpoint) in text or binary format'
            '\n3. meta graph'
            '\n\nMake sure that --input_model_is_text is provided for a model in text format. '
            'By default, a model is interpreted in binary format. Framework error details: {}. '
            + refer_to_faq_msg(43), graph_file_name, str(e)) from e
    return graph_def
コード例 #17
0
def caffe_extractor(node: Node, lowered_keys_map: dict) -> (bool, dict):
    if node.has_valid('op') and node.op == 'Identity':
        return True, {}
    result = common_caffe_fields(node)
    supported = False
    name = None

    layer_type = result['type'].lower()
    if layer_type in lowered_keys_map:
        layer_type = lowered_keys_map[layer_type]
        assert layer_type in caffe_type_extractors
        name = layer_type

    if name:  # it is either standard or registered via CustomLayersMapping.xml
        attrs = caffe_type_extractors[name](node)
        # intentionally as Python registry if not found returns None
        if attrs is not None:
            result.update(attrs)
            supported = True

    if not supported:
        raise Error(
            'Found custom layer "{}". Model Optimizer does not support this layer. '
            .format(node.id) + 'Please, implement extension. ' +
            refer_to_faq_msg(45))

    if 'infer' not in result or not result['infer']:
        result.update(native_caffe_node_extractor(node))

    phase_attr = check_phase(node)
    result.update(phase_attr)
    return supported, result
コード例 #18
0
def load_and_validate_json_config(config_file_name: str):
    """
    Reads and validate custom replacement configuration file config_file_name.
    :param config_file_name: name of the file to read from.
    :return: A dictionary serialized from json config file.
    """
    try:
        with open(config_file_name, 'r') as f:
            json_config = json.load(f)
            try:
                import fastjsonschema as json_validate

                validator = json_validate.compile(schema_dict)
                validator(json_config)
            except ModuleNotFoundError as e:
                log.error(
                    "Module 'fastjsonschema' for json validation not installed. Please update requirements.",
                    extra={'is_warning': True})

    except Exception as e:
        raise Error(
            "Failed to parse custom replacements configuration file '{}': {}. "
            .format(config_file_name, e) + refer_to_faq_msg(70)) from e

    return json_config
コード例 #19
0
ファイル: loader.py プロジェクト: Flex-plaidml-team/openvino
def get_layers(proto):
    if len(proto.layer):
        return proto.layer
    elif len(proto.layers):
        return proto.layers
    else:
        raise Error('Invalid proto file: there is neither "layer" nor "layers" top-level messages. ' +
                    refer_to_faq_msg(7))
コード例 #20
0
def mxnet_op_extractor(node: Node):
    result = common_mxnet_fields(node)
    op = result['op']
    if op not in mxnet_op_extractors:
        raise Error(
            "Operation '{}' not supported. Please register it as custom op. " +
            refer_to_faq_msg(86), op)
    result_attr = mxnet_op_extractors[op](node)

    if result_attr is None:
        raise Error(
            'Model Optimizer does not support layer "{}". Please, implement extension. '
            .format(node.name) + refer_to_faq_msg(45))

    result.update(result_attr)
    supported = bool(result_attr)
    return supported, result
コード例 #21
0
def partial_infer(graph: Graph, start_node: str = None):
    """
    Tries to execute constant parts of the graph and deduce as much as possible
    information following the data flow, e.g. calculate and propagate shapes and
    constant values. Partially or completely defined values are stored in data
    nodes (kind='data').
    """
    # We have to turn off strict mode due to above we add and remove edeges without attributes that is prohibited
    graph.strict_mode = False
    cycle_nodes = graph.get_nodes_with_attributes(is_cyclic=True)
    cycle_nodes = [Node(graph, node).out_node().id for node in cycle_nodes]
    ebunch_cyclic = list(graph.out_edges(nbunch=cycle_nodes, data=True, keys=True))
    ebunch_reconnected = exit_bound_edges(graph, sources=cycle_nodes, end_node_attrs={'op': 'Exit'})
    graph.remove_edges_from(ebunch_cyclic)
    graph.add_edges_from(ebunch_reconnected)

    try:
        nodes = list(nx.topological_sort(graph))
    except:
        raise Error('Graph contains a cycle. Can not proceed. ' + refer_to_faq_msg(97))

    graph.remove_edges_from(ebunch_reconnected)
    graph.add_edges_from(ebunch_cyclic)
    graph.strict_mode = True

    # Mark all nodes as not inferred yet
    if start_node is not None:
        start_index = nodes.index(start_node)
        nx.set_node_attributes(G=graph.subgraph(nodes[start_index:]), name='is_partial_inferred', values=False)
    else:
        nx.set_node_attributes(G=graph, name='is_partial_inferred', values=False)

    nx.set_node_attributes(G=graph, name='executable',
                           values={n: True for n in graph.get_nodes_with_attributes(kind='data')})

    # first we infer constant sub-graphs so the reverse infer could use constant values sub-graphs. For example,
    # convolution weights may be reshuffled by some operation in the graph and are not directly consumed by the conv
    # node
    infer_nodes(graph, nodes, True)

    # we may need to deduce shape for Parameter node(s) if it is not defined
    need_reverse_infer = False
    for parameter in graph.get_op_nodes(op='Parameter'):
        if parameter.soft_get('shape', None) is None:
            need_reverse_infer = True

    if need_reverse_infer:
        reverse_infer(graph, nodes)

    infer_nodes(graph, nodes, False)

    not_fully_inferred = graph.get_nodes_with_attributes(is_not_fully_inferred=True)
    for n in not_fully_inferred:
        node = Node(graph, n)
        if node.has('infer') and not node.infer is None:
            node.infer(node)

    return graph
コード例 #22
0
def tf_tensor_content(tf_dtype, shape, pb_tensor):
    type_helper = tf_data_type_decode[tf_dtype] if tf_dtype in tf_data_type_decode else None
    if type_helper is None:
        raise Error("Data type is unsupported: {}. " +
                    refer_to_faq_msg(50), tf_dtype)

    decode_err_msg = 'Failed to parse a tensor with Unicode characters. Note that Inference Engine does not support ' \
                     'string literals, so the string constant should be eliminated from the graph.'
    if pb_tensor.tensor_content:
        value = mo_array(np.frombuffer(pb_tensor.tensor_content, type_helper[0]))
    else:
        # load typed value
        if type_helper[0] != np.str:
            value = mo_array(type_helper[1](pb_tensor), dtype=type_helper[0])
        else:
            try:
                value = mo_array(type_helper[1](pb_tensor), dtype=type_helper[0])
            except UnicodeDecodeError:
                log.error(decode_err_msg, extra={'is_warning': True})
                value = mo_array(type_helper[1](pb_tensor))

    # Ignore an empty value, if len(shape) > 1
    # For example, value = [] and shape = [1, 1, 0]
    # This is needed to reshape this value later and to return reshaped value = [[[]]]
    # Otherwise there can be failures during partial inference, because we are storing an empty value with incorrect
    # shape
    if len(shape) == 0 or (len(shape) == 1 and shape.prod() == 0):
        try:
            value_length = len(value)
        except TypeError:
            # case, when value is a scalar
            value_length = 0
        if value_length == 1:
            # return scalar if shape is [] otherwise broadcast according to shape
            try:
                return mo_array(value[0], dtype=type_helper[0])
            except UnicodeDecodeError:
                log.error(decode_err_msg, extra={'is_warning': True})
                return mo_array(value[0])
        else:
            # no shape, return value as is
            return value

    if len(value) != shape.prod():
        log.warning("Shape and content size of tensor don't match, shape: {} content size: {}".
                    format(shape, len(value)))
        # broadcast semantics according to TensorFlow v1.5 documentation:
        # The argument value can be a constant value, or a list of values of type dtype. If value is a list,
        # then the length of the list must be less than or equal to the number of elements implied by the shape
        # argument (if specified). In the case where the list length is less than the number of elements specified
        # by shape, the last element in the list will be used to fill the remaining entries.
        value_flatten = value.flatten()
        add_value = value_flatten[-1]
        add_length = shape.prod() - len(value_flatten)
        value = np.concatenate([value_flatten, np.full([add_length], add_value)])

    return value.reshape(shape)
コード例 #23
0
def register_caffe_python_extractor(op: Op, name: str = None):
    if not name and hasattr(op, 'op'):
        name = op.op
    if not name:
        raise Error(
            "Can not register Op {}. Please, call function 'register_caffe_python_extractor' "
            "with parameter 'name' .".format(op), refer_to_faq_msg(87))
    CaffePythonFrontExtractorOp.registered_ops[
        name] = lambda node: extension_op_extractor(node, op)
コード例 #24
0
    def update_custom_replacement_attributes(self, graph: Graph):
        if not self.has('instances'):
            raise Error("No instance(s) is(are) defined for the custom replacement '{}'. ".format(self.replacement_id) +
                        refer_to_faq_msg(66))
        if not isinstance(self.instances, dict):
            raise Error("The instance must be a single dictionary for the custom replacement with id '{}'. ".format(
                self.replacement_id) +
                        refer_to_faq_msg(67))

        start_points = self.get_internal_input_nodes(graph)
        end_points = self.get_internal_output_nodes(graph)

        matched_nodes = sub_graph_between_nodes(graph, start_points, end_points, include_control_flow=False)
        output_tensors = set()
        input_nodes_mapping = dict()  # key is the input tensor name, value is the pair: (input_port, output_node_name)
        for src_node_name, dst_node_name, edge_attrs in graph.edges(data=True):
            dst_node = graph.node[dst_node_name]

            # edge outside sub-graph into sub-graph
            if (src_node_name not in matched_nodes) and (dst_node_name in matched_nodes):
                tensor_name = src_node_name + ":" + str(edge_attrs['out'])
                if tensor_name not in input_nodes_mapping:
                    input_nodes_mapping[tensor_name] = list()
                input_nodes_mapping[tensor_name].append(('^' + dst_node_name + '$', edge_attrs['in']))

            # edge from inside sub-graph to outside sub-graph
            if (src_node_name in matched_nodes) and (dst_node_name not in matched_nodes):
                output_tensors.add(('^' + dst_node['pb'].input[edge_attrs['in']] + '$', edge_attrs['out']))

        for node_name in graph.nodes():
            node = Node(graph, node_name)
            if node_name in matched_nodes and len(node.out_nodes()) == 0 and node['pb'].op != 'Const':
                log.debug("Node {} doesn't have output edges. Consider it output".format(node_name))
                output_tensors.add(('^' + node_name + '$', 0))

        if not self.has('inputs'):
            self._replacement_desc['inputs'] = [[{'node': desc[0], 'port': desc[1]} for desc in inp]
                                                for inp in sorted(input_nodes_mapping.values())]
            log.debug('Updated inputs of sub-graph for instance "{}"'.format(self.instances))

        if not self.has('outputs'):
            self._replacement_desc['outputs'] = [{'node': node, 'port': port} for node, port in sorted(output_tensors)]
            log.debug('Updated outputs of sub-graph for instance "{}"'.format(self.instances))
コード例 #25
0
    def add_custom_replacement_description_from_config(self, file_name: str):
        if not os.path.exists(file_name):
            raise Error(
                "Custom replacement configuration file '{}' doesn't exist. ".
                format(file_name) + refer_to_faq_msg(46))

        descriptions = parse_custom_replacement_config_file(file_name)
        for desc in descriptions:
            self.registry.setdefault(desc.id, list()).append(desc)
            log.info("Registered custom replacement with id '{}'".format(
                desc.id))
コード例 #26
0
def check_for_duplicates(extractors_collection: dict):
    """
    Check if extractors_collection has case-insensitive duplicates, if it does,
    raise exception with information about duplicates
    """
    # Check if extractors_collection is a normal form, that is it doesn't have case-insensitive duplicates
    duplicates, keys = find_case_insensitive_duplicates(extractors_collection)
    if len(duplicates) > 0:
        raise Error('Extractors collection have case insensitive duplicates {}. ' +
                    refer_to_faq_msg(47), duplicates)
    return {k: v[0] for k, v in keys.items()}
コード例 #27
0
    def find_and_replace_pattern(self, graph: Graph):
        values = graph.graph['cmd_params'].mean_scale_values
        input_nodes = graph.get_op_nodes(op='Parameter')

        if not isinstance(values, dict):
            # The case when input names to apply mean/scales weren't specified
            if len(values) != len(input_nodes):
                raise Error('Numbers of inputs and mean/scale values do not match. ' + refer_to_faq_msg(61))

            data = np.copy(values)
            values = {}
            for idx, node in enumerate(input_nodes):
                values.update(
                    {
                        node.soft_get('name', node.id): {
                            'mean': data[idx][0],
                            'scale': data[idx][1]
                        }
                    }
                )

        for node_name, node_mean_scale_values in values.items():
            node_id = None
            node_name = get_node_name_with_port_from_input_value(node_name)
            try:
                node_id, direction, port = get_node_id_with_ports(graph, node_name, skip_if_no_port=False)
                assert direction != 'out', 'Only input port can be specified for mean/scale application'
            except Error as e:
                log.warning('node_name {} is not found in graph'.format(node_name))
            if Node(graph, node_id) not in input_nodes:
                # if the user cutted-off input of the network then input node name specified in the --scale_values
                # or --mean_values doesn't correspond to a real input node generated by Model Optimizer. But
                # the information about initial input node name is stored in Placeholder's attribute 'initial_node_name'
                new_node_id = None
                for placeholder in input_nodes:
                    try:
                        placeholder_port = int(placeholder.id.split("_")[-1])
                    except Exception as ex:
                        log.debug('Can not get the port number from the node {}'.format(placeholder.id))
                        log.debug('Port will be defined as None')
                        port = None
                    if placeholder.has('initial_node_name') and placeholder.initial_node_name == node_id and (
                            port is None or placeholder_port == port):
                        new_node_id = placeholder.id
                        break
                if new_node_id is None:
                    raise Error('Input with name {} wasn\'t found!'.format(node_name) +
                                refer_to_faq_msg(83))
                node_id = new_node_id

            input_node = Node(graph, node_id)
            AddMeanScaleValues.apply_scale(graph, input_node, node_mean_scale_values)
            AddMeanScaleValues.apply_mean_value(graph, input_node, node_mean_scale_values)
コード例 #28
0
ファイル: emitter.py プロジェクト: yury-intel/openvino
def generate_ie_ir(graph: Graph,
                   file_name: str,
                   input_names: tuple = (),
                   mean_offset: tuple = (),
                   mean_size: tuple = (),
                   meta_info: dict = dict()):
    """
    Extracts IE/IR attributes from kind='op' nodes in three ways:
      (1) node.IE xml scheme that sets correspondence from existing attributes to generated xml elements
      (2) input/output edges that don't have 'bin' attributes are transformed to input/output ports
      (3) input edges that has 'bin' attributes are handled in special way like weights/biases

    Args:
        graph: nx graph with FW-independent model
        file_name: name of the resulting IR
        input_names: names of input layers of the topology to add mean file to
        input_name: name of the layer which is referenced from pre-processing block if any
        mean_values: tuple of mean values for channels in RGB order
        scale_values:  tuple of mean values for channels in RGB order
        mean_offset: offset in binary file, where mean file values start
        mean_size: size of the mean file
    """
    net = Element('net')
    net.set('name', graph.name)
    net.set('version', str((graph.graph['ir_version'])))

    if mean_size or mean_offset:
        create_pre_process_block_for_image(net, input_names, mean_offset,
                                           mean_size)

    if 'mean_values' in graph.graph.keys():
        for input_name, values in graph.graph['mean_values'].items():
            create_pre_process_block(net, input_name, values)

    unsupported = UnsupportedOps(graph)

    serialize_network(graph, net, unsupported)
    add_quantization_statistics(graph, net)
    add_meta_data(net, meta_info, legacy_path=True)
    add_quantization_info_section(net, meta_info)
    xml_string = tostring(net)
    xml_doc = parseString(xml_string)
    pretty_xml_as_string = xml_doc.toprettyxml()
    if len(unsupported.unsupported):
        log.debug('Partially correct IR XML:\n{}'.format(pretty_xml_as_string))
        unsupported.report(
            log.error,
            "List of operations that cannot be converted to Inference Engine IR:"
        )
        raise Error('Part of the nodes was not converted to IR. Stopped. ' +
                    refer_to_faq_msg(24))
    with open(file_name, 'wb') as file:
        file.write(bytes(pretty_xml_as_string, "UTF-8"))
コード例 #29
0
def dump_for_tensorboard(graph_def: tf_v1.GraphDef, logdir: str):
    try:
        # TODO: graph_def is a deprecated argument, use graph instead
        print('Writing an event file for the tensorboard...')
        with tf_v1.summary.FileWriter(logdir=logdir,
                                      graph_def=graph_def) as writer:
            writer.flush()
        print('Done writing an event file.')
    except Exception as err:
        raise Error(
            'Cannot write an event file for the tensorboard to directory "{}". '
            + refer_to_faq_msg(36), logdir) from err
コード例 #30
0
def invert_sub_graph_between_nodes(graph: Graph,
                                   start_nodes: list,
                                   end_nodes: list,
                                   detect_extra_start_node: callable = None):
    """
    Finds nodes of the sub-graph between 'start_nodes' and 'end_nodes'. But doing it from start_nodes stepping
    backward by in edges.

    Input nodes for the sub-graph nodes are also added to the sub-graph. Constant inputs of the 'start_nodes'
    are also added to the sub-graph.
    :param graph: graph to operate on.
    :param start_nodes: list of nodes names that specifies start nodes.
    :param end_nodes: list of nodes names that specifies end nodes.
    :return: list of nodes of the identified sub-graph or None if the sub-graph cannot be extracted.
    """
    sub_graph_nodes = list()
    visited = set(start_nodes)
    d = deque(start_nodes)
    extra_start_nodes = []

    nx.set_node_attributes(G=graph, name='prev', values=None)
    while len(d) != 0:
        cur_node_name = d.popleft()
        sub_graph_nodes.append(cur_node_name)
        if cur_node_name not in start_nodes and \
                detect_extra_start_node is not None and detect_extra_start_node(Node(graph, cur_node_name)):
            extra_start_nodes.append(cur_node_name)
        else:
            if cur_node_name not in end_nodes:  # do not add output nodes of the end_nodes
                for src_node_name, _ in graph.in_edges(cur_node_name):
                    if src_node_name not in visited:
                        d.append(src_node_name)
                        visited.add(src_node_name)
                        graph.node[cur_node_name]['prev'] = src_node_name

    for node_name in sub_graph_nodes:
        # sub-graph should not contain Input nodes
        if graph.node[node_name].get('op', '') == 'Parameter':
            path = list()
            cur_node = node_name
            while cur_node and 'prev' in graph.node[cur_node]:
                path.append(str(cur_node))
                cur_node = graph.node[cur_node]['prev']
            log.debug("The path from input node is the following: {}".format(
                '\n'.join(path)))
            raise Error(
                'The matched sub-graph contains network input node "{}". '.
                format(node_name) + refer_to_faq_msg(75))
    if detect_extra_start_node is None:
        return sub_graph_nodes
    else:
        return sub_graph_nodes, extra_start_nodes