def common_pool_extender(op: Node):
    for attr in ['strides', 'pads_begin', 'pads_end', 'kernel', 'dilations']:
        Extender.attr_to_list(op, attr)
    op['stride'] = int64_array([1, 1] + op.strides)
    op['window'] = int64_array([1, 1] + op.kernel)
    op['kernel_spatial'] = op.kernel
    op['output_spatial_shape'] = None

    if op.has_valid('dilations'):
        op['dilation'] = int64_array([1, 1] + op.dilations)
    if op.has_valid('index_element_type'):
        op['index_element_type'] = destination_type_to_np_data_type(
            op.index_element_type)

    op['batch_dims'] = int64_array([0]),
    op['channel_dims'] = int64_array([1]),

    op['pool_method'] = 'max' if op.type is 'MaxPool' else 'avg'

    dim = len(op.pads_begin)

    assert dim in (1, 2, 3), '{}D {} not supported! Node name: {}'.format(
        dim, op.soft_get('type'), op.soft_get('name', op.id))

    pad = [[0, 0], [0, 0]]
    pad.extend([[op.pads_begin[i], op.pads_end[i]] for i in range(dim)])

    op['pad'] = int64_array(pad)

    op['spatial_dims'] = [i + 2 for i in range(dim)]

    if op.has_valid('rounding_type') and op.rounding_type == 'ceil':
        op['pooling_convention'] = 'full'
Exemple #2
0
    def add_input_data_to_prior_boxes(graph: Graph, input_names: str = ''):
        """
        PriorBox layer has data input unlike mxnet.
        Need to add data input to _contrib_MultiBoxPrior for
        for correct conversion to PriorBox layer.

        Parameters
        ----------
        graph : Graph
           Graph with loaded model.
        """
        if not input_names:
            input_names = ('data', )
        else:
            input_names = input_names.split(',')

        input_nodes = {}
        for node in graph.nodes():
            node = Node(graph, node)
            if node.has_valid('op') and node.name in input_names:
                input_nodes.update({node.id: node})

        if len(input_nodes) > 0:
            for node in graph.nodes():
                node = Node(graph, node)
                if node.has_valid(
                        'op') and node.op == '_contrib_MultiBoxPrior':
                    node.add_input_port(idx=1)
                    graph.create_edge(list(input_nodes.values())[0],
                                      node,
                                      out_port=0,
                                      in_port=1)
Exemple #3
0
def find_unsupported_ops(graph: Graph):
    """
    The function returns list of node name those are not supported. Currently nodes that product non FP32 data tensors
    or has undefined 'type' attribute are considered unsupported.
    :param graph: current graph with operations. Data nodes are not yet added.
    :return: the list of node names which are not supported
    """
    unsupported = list()
    for node_name in graph.nodes():
        node = Node(graph, node_name)
        # op node that produce non FP32 data or has no type are considered unsupported
        if node.kind == 'op':
            if node.has_valid('type') or (node.has_valid('op')
                                          and node.op == 'Result'):
                for out_data_node in node.out_nodes().values():
                    if out_data_node.has_valid(
                            'data_type'
                    ) and out_data_node.data_type != np.float32:
                        log.info(
                            'Node "{}" produces output as non FP32. Consider it unsupported'
                            .format(node_name))
                        unsupported.append(node.id)
            else:
                log.info(
                    'Node "{}" does not have type. Consider it unsupported'.
                    format(node_name))
                unsupported.append(node.id)
    return unsupported
Exemple #4
0
    def _one_input_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        node_name = node.soft_get('name', node.id)
        if input_shape is None:
            raise Error('input_shape is none for {} node'.format(node_name))

        if not node.has_valid('axis'):
            raise Error('axis attribute is missing for {} node. should be set in crop extractor'.format(node_name))

        output_shape = input_shape.copy()
        if node.has_valid('dim'):
            if len(node.dim) != len(node.axis):
                raise Error('Number of axis "{}" should match number of dim "{}" for node "{}"'
                            ''.format(node.axis, node.dim, node_name))
            output_shape[node.axis] = node.dim
        elif node.has_valid('crop_begin') and node.has_valid('crop_end'):
            if len(node.crop_begin) != len(node.axis) or len(node.crop_end) != len(node.axis):
                raise Error('number of crop_begin({})/crop_end({}) should match number of axis "{}" for node "{}"'
                            ''.format(node.crop_begin, node.crop_end, node.axis, node_name))
            if type(node.axis) in [list, tuple]:
                for i in range(len(node.axis)):
                    output_shape[node.axis[i]] = output_shape[node.axis[i]] - node.crop_begin[i] - node.crop_end[i]
            else:
                output_shape[node.axis] = output_shape[node.axis] - node.crop_begin - node.crop_end
        else:
            raise Error('Crop node {} should have either dim or crop_begin and crop_end attributes'.format(node_name))

        node.out_port(0).data.set_shape(output_shape)
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
    def add_reshapes_for_tf_subgraph_calls(graph: Graph):
        """
        Input and output tensors of the TFCustomSubgraphCall must be 4D because IE layer accepts and produces only 4D
        tensors. This function adds reshape operations where it is necessary.
        :param graph: graph to operate on.
        :return: None.
        """
        for src_node_name, dst_node_name, edge_attrs in list(
                graph.edges(data=True)):
            src_node = Node(graph, src_node_name)
            dst_node = Node(graph, dst_node_name)
            if dst_node.kind == 'op' and dst_node.has_valid('type') and dst_node.type == 'TFCustomSubgraphCall' and \
                    src_node.has_valid('shape') and len(src_node.shape) != 4:
                log.info(
                    "There is an data tensor of shape '{}' which goes into '{}' node"
                    .format(src_node.shape, dst_node.type))
                CustomSubgraphCall.add_reshape_before_op_node(
                    graph, src_node_name, dst_node_name, edge_attrs)

        for node in graph.get_op_nodes(op='TFCustomSubgraphCall'):
            for index, data_node in node.out_nodes().items():
                real_dims_count = len(data_node.shape)
                if real_dims_count != 4:
                    log.info(
                        "There is an data tensor of shape '{}' with real dims count '{}' which goes out of '{}' "
                        "node".format(data_node.shape, real_dims_count,
                                      node.name))
                    CustomSubgraphCall.add_reshape_after_data_node(
                        graph, data_node.id)

                    # need to update shape of the op so IE generates XML with 4D tensors
                    out_shape = CustomSubgraphCall.make_shape_4d(
                        data_node['shape'])

                    data_node['shape'] = out_shape
Exemple #6
0
 def pad_attribute_helper(node: Node, pad_type: str = 'begin'):
     assert pad_type in ['begin', 'end']
     if not node.has_valid('pad'):
         return None
     pad = get_backend_pad(node.pad, node.spatial_dims,
                           0 if pad_type == 'begin' else 1)
     if node.has_valid('auto_pad') and node.auto_pad != 'explicit':
         pad = [0 for _ in pad]
     return ','.join(map(str, pad))
Exemple #7
0
def get_fw_tensor_debug_info(node: Node):
    while not node.has_valid('fw_tensor_debug_info') and not node.has_valid('output_sort_order') \
            and len(node.in_nodes()):
        try:
            node = node.in_node()
        except Exception as e:
            log.warning('Was not able to determine tensor debug info for node {}'.format(node.name))
            return "dummy_node_name"
    if node.has_valid('output_sort_order'):
        return node.soft_get('output_sort_order')
    return node.soft_get('fw_tensor_debug_info')
Exemple #8
0
    def find_and_replace_pattern(self, graph: Graph):
        graph.stage = 'front'
        for node_id in graph.nodes(data=False):
            node = Node(graph, node_id)
            inputs = node.get_sorted_inputs()
            outputs = node.get_sorted_outputs()

            in_ports_count = node.in_ports_count if node.has_valid(
                'in_ports_count') else len(inputs)
            out_ports_count = node.out_ports_count if node.has_valid(
                'out_ports_count') else len(outputs)

            if len(outputs) > out_ports_count > 1:
                raise Error("Node {} has more children than it should: " +
                            "should be {} but there is {}".format(
                                node_id, out_ports_count, len(outputs)))

            node['_in_ports'] = {}
            node['_out_ports'] = {}
            if in_ports_count is not None:
                for idx in range(in_ports_count):
                    node.add_input_port(idx=idx)

            if out_ports_count is not None:
                for idx in range(out_ports_count):
                    node.add_output_port(idx=idx)
            idx = 0
            for in_node_id, edge_attrs in inputs:
                graph.remove_edge(in_node_id, node_id)
                if len(Node(graph, in_node_id).out_ports()) == 0:
                    Node(graph, in_node_id).add_output_port(0)
                in_node = Node(graph, in_node_id)
                in_node.out_port(edge_attrs['out']).connect(node.in_port(idx))
                # need to keep this attribute in edge for correct .mapping file generation and
                # for generation of "names" field in IR
                in_node.out_edge(
                    edge_attrs['out']
                )['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info']
                if idx < in_ports_count - 1:
                    idx = idx + 1

            idx = 0
            for out_node_id, edge_attrs in outputs:
                graph.remove_edge(node_id, out_node_id)
                if len(Node(graph, out_node_id).in_ports()) == 0:
                    Node(graph, out_node_id).add_input_port(0)
                node.out_port(idx).connect(
                    Node(graph, out_node_id).in_port(edge_attrs['in']))
                # need to keep this attribute in edge for correct .mapping file generation and
                # for generation of "names" field in IR
                node.out_edge(idx)['fw_tensor_debug_info'] = edge_attrs[
                    'fw_tensor_debug_info']
                if idx < out_ports_count - 1:
                    idx = idx + 1
 def get_concat_axis(concat: Node):
     # Concat axis may be stored as an attribute and as an input (TF) and this is not resolved yet
     # TODO: should be removed after Concat operation normalization
     assert concat.soft_get('type') == 'Concat'
     if concat.has_valid('axis'):
         return concat.axis
     if concat.has_valid('N'):
         axis_node = concat.in_port(concat.N).get_source().node
         if axis_node.has_valid('value'):
             return axis_node.value.item(0)
     return None
Exemple #10
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)
        connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()}
        assert len(connected_inputs) == 1 and 0 in connected_inputs, \
            "AttributedPower should have 1 connected input port, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_inputs)

        assert node.has_valid('scale'), \
            'AttributedPower operation should have `scale` parameter set, but it doesn`t for node {}'.format(name)
        assert node.has_valid('shift'), \
            'AttributedPower operation should have `shift` parameter set, but it doesn`t for node {}'.format(name)
        assert node.has_valid('power'), \
            'AttributedPower operation should have `power` parameter set, but it doesn`t for node {}'.format(name)

        eltwise_infer(node, lambda a: np.power(a * node.scale + node.shift, node.power))
    def get_non_interpolate_concat_sources(self, concat: Node):
        """
        Traverses Concat input ports up to find which of them are not connected to Interpolate operations directly
        or through identity operation sequence. Returns the list of Concat sources that satisfy the condition.
        """
        assert concat.soft_get('type') == 'Concat'
        sources, ports_to_omit = [], []
        if concat.has_valid('N'):
            # TODO: should be removed after Concat operation normalization
            ports_to_omit.append(concat.N)

        for in_port in concat.in_ports().values():
            if in_port.disconnected() or in_port.idx in ports_to_omit:
                continue
            next_node = in_port.get_source().node
            while next_node.soft_get(
                    'type') != 'Interpolate' and next_node.has_and_set(
                        'identity'):
                node = self.get_single_input_source_safely(next_node)
                if node is not None:
                    next_node = node
                else:
                    break
            if next_node.soft_get('type') != 'Interpolate':
                sources.append(in_port.get_connection().get_source())
        return sources
Exemple #12
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_in_ports) >= 2 and 0 in connected_in_ports and 1 in connected_in_ports, \
            'FullyConnected should have 2 connected input ports, but it doesn\'t for node: `{}`. Ports: {}' \
            ''.format(name, connected_in_ports)

        assert node.has_valid('out-size')
        input_shape = node.in_port(0).data.get_shape()
        weights_shape = node.in_port(1).data.get_shape()
        assert input_shape is not None and weights_shape is not None, \
            'Incorrect FullyConnected input shapes. Node: {}. Shapes: {}'.format(name, [input_shape, weights_shape])
        assert weights_shape.size == 2
        out_size = node.soft_get('out-size')
        assert compatible_dims(weights_shape[0], out_size), \
            'weights_shape={}, out-size={}'.format(weights_shape, out_size)

        if 2 in connected_in_ports:
            bias_value = node.in_port(2).data.get_value()
            bias_shape = node.in_port(2).data.get_shape()
            assert bias_shape is not None, 'Shape was not inferred for biases of FullyConnected {}'.format(
                name)
            assert bias_value is not None, 'Value was not inferred for biases of FullyConnected {}'.format(
                name)
            assert compatible_shapes(bias_shape, [out_size]) or compatible_shapes(bias_shape, [1, out_size]), \
                'Incorrect FullyConnected bias shape `{}` for node {}. `out-size`={}'.format(bias_shape, node, out_size)

        node.out_port(0).data.set_shape([*input_shape[:-1], out_size])
Exemple #13
0
def common_onnx_fields(node: Node):
    return {
        'kind': 'op',
        'name': node.id,
         # no reliable name for an onnx node, name can be empty, so we use that surrogate built as ID in the loader
        'op': node.op if node.has_valid('op') else node.pb.op_type,
    }
    def array_infer(node: Node):
        size = node.in_node(0)
        assert size.value is not None

        # 0 port: handle
        if 0 in node.out_nodes().keys():
            if node.has_valid('element_shape'):
                element_shape = node['element_shape']
            else:
                element_shape = None

            out_node = node.out_node(0).id
            output_value = node.out_node(0).id
            node.graph.node[out_node]['value'] = np.array(output_value)

            output_shape = node.graph.node[out_node]['value'].shape
            node.graph.node[out_node]['shape'] = shape_array(output_shape)

            node.graph.node[out_node]['element_shape'] = shape_array(
                element_shape)
            node.graph.node[out_node]['size'] = size.value
        # 1 port flow
        if 1 in node.out_nodes().keys():
            output_value = None

            out_node = node.out_node(1).id
            node.graph.node[out_node][
                'value'] = None if output_value is None else np.array(
                    output_value)
            node.graph.node[out_node]['shape'] = shape_array(output_shape)
Exemple #15
0
def reverse_infer(graph: Graph, nodes: list):
    nodes = reversed(nodes)
    for n in nodes:
        node = Node(graph, n)
        if node.has_valid('reverse_infer'):
            log.debug("Executed reverse infer for node '{}'".format(node.soft_get('name', node.id)))
            node.reverse_infer(node)
def caffe_extractor(node: Node, lowered_keys_map: dict) -> (bool, dict):
    if node.has_valid('op') and node.op == 'Identity':
        return True, {}
    result = common_caffe_fields(node)
    supported = False
    name = None

    layer_type = result['type'].lower()
    if layer_type in lowered_keys_map:
        layer_type = lowered_keys_map[layer_type]
        assert layer_type in caffe_type_extractors
        name = layer_type

    if name:  # it is either standard or registered via CustomLayersMapping.xml
        attrs = caffe_type_extractors[name](node)
        # intentionally as Python registry if not found returns None
        if attrs is not None:
            result.update(attrs)
            supported = True

    if not supported:
        raise Error(
            'Found custom layer "{}". Model Optimizer does not support this layer. '
            .format(node.id) + 'Please, implement extension. ' +
            refer_to_faq_msg(45))

    if 'infer' not in result or not result['infer']:
        result.update(native_caffe_node_extractor(node))

    phase_attr = check_phase(node)
    result.update(phase_attr)
    return supported, result
Exemple #17
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        assert node.with_right_bound is not None, \
            "Attribute \"with_right_bound\" is not defined"
        assert len(node.in_nodes()) == 2, \
            "Incorrect number of inputs for {} node".format(node.id)
        if node.get_opset() != "extension":
            assert node.has_valid('output_type'), \
                '`output_type` attribute is not set for Bucketize node `{}`'.format(node_name)
            assert node.output_type in [np.int64, np.int32], \
                'Bucketize `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name)

        output_shape = node.in_port(0).data.get_shape()
        node.out_port(0).data.set_shape(output_shape)

        input_value = node.in_port(0).data.get_value()
        buckets_value = node.in_port(1).data.get_value()

        # compute if all input is constant
        if input_value is not None and buckets_value is not None:
            node.out_port(0).data.set_value(
                mo_array(np.digitize(input_value,
                                     buckets_value,
                                     right=node.with_right_bound),
                         dtype=node.output_type))
    def replace_identityN(node: Node):
        graph = node.graph
        name = node.soft_get('name', node.id)

        assert node.has_valid(
            'data_types'), 'IdentityN {} has no `data_types` attribute'.format(
                name)
        dtypes = node.data_types

        for idx, port in node.in_ports().items():
            if not node.is_in_port_connected(
                    idx) or not node.is_out_port_connected(idx):
                # ATTENTION section in the description above
                continue
            assert idx < len(
                dtypes
            ), 'IdentityN {} has inconsistent `data_types` attribute {}'.format(
                name, dtypes)
            identity = Identity(graph, {
                'name': '{}/{}_port'.format(name, idx),
                'data_type': dtypes[idx]
            }).create_node()
            port.get_connection().set_destination(identity.in_port(0))
            node.out_port(idx).get_connection().set_source(
                identity.out_port(0))

        # ATTENTION section in the description above
        for in_port in node.in_ports().values():
            in_port.disconnect()
        for out_port in node.out_ports().values():
            out_port.disconnect()
    def get_fw_index(node: Node, idx: int) -> int:
        if not node.has_valid('rt_info'):
            return idx

        rt_info = node.rt_info
        if not rt_info.contains('old_api_map_order'):
            return idx

        old_api_map_version = rt_info.get_attribute_version(
            'old_api_map_order')
        old_api_map = rt_info.info['old_api_map_order', old_api_map_version]
        if 'inverse_order' not in old_api_map.info:
            return idx

        order = old_api_map.info['inverse_order']
        node_name = node.soft_get('name', node.id)

        if idx < 0:
            assert not node.out_port(0).disconnected(), 'Cannot normalize negative axis {} in node {} ' \
                                                        'as out port is disconnected.'.format(idx, node_name)
            data_rank = len(list(node.out_port(0).data.get_shape()))
            idx = data_rank + idx

        assert len(order) > idx >= 0, \
            'Channel index {} is incompatible with old_api_map in node {}.'.format(idx, node_name)
        return list(order).index(idx)
Exemple #20
0
 def infer(node: Node):
     if node.has_valid('element_size'):
         # element_size should be set by Kaldi loader or MemoryOffsetAdjustment or SplitRecurrentMemoryOffset
         node.out_port(0).data.set_shape(node.element_size)
     else:
         # for TDNN blocks
         copy_shape_infer(node)
Exemple #21
0
    def insert_pre_processing(graph: Graph, input_node: Node, node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (-1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'], len(input_node.shape))
        assert compatible_dims(value.size, input_node.shape[features_dim_idx]) or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name', input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph, op=op, port_value_dict={1: value}, op_attrs={'name': name})

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0), "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
    def replace_op(self, graph: Graph, node: Node):
        node_name = node.soft_get('name', node.id)
        assert node.has_valid(
            'axis'
        ), 'The node "{}" does not have mandatory attribute "axis"'.format(
            node_name)

        flatten_node = FlattenONNX(graph, {
            'name': node_name + '/FlattenONNX_',
            'axis': node.axis
        }).create_node()
        shape_node = Shape(graph, {
            'name': node_name + '/ShapeOf_'
        }).create_node()
        logsoftmax_node = LogSoftmax(graph, {
            'name': node_name + '/LogSoftmax_',
            'axis': 1
        }).create_node()
        reshape_node = Reshape(graph, {}).create_node()

        rename_nodes([(node, node_name + '/delete'),
                      (reshape_node, node_name)])

        shape_node.out_port(0).connect(reshape_node.in_port(1))
        logsoftmax_node.out_port(0).connect(reshape_node.in_port(0))
        flatten_node.out_port(0).connect(logsoftmax_node.in_port(0))

        source = node.in_port(0).get_source()

        flatten_node.in_port(0).connect(source)
        shape_node.in_port(0).connect(source)

        return [reshape_node.id]
Exemple #23
0
def arg_ops_infer(node: Node):
    shape = node.in_port(0).data.get_shape()
    node_name = node.soft_get('name', node.id)
    assert shape is not None, "Input shape for the node {} is None".format(node_name)

    # there are two inputs in TensorFlow. The second input is the axis for ArgMax
    connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
    if len(connected_in_ports) == 2:
        axis = node.in_port(1).data.get_value()
        if axis is None:
            log.debug('The second argument to {} is None'.format(node.soft_get('name', node.id)))
            return
        node.axis = axis
        # remove the unnecessary input
        node.in_port(1).disconnect()

    num_top_axes = shape.size
    if num_top_axes < 3:
        num_top_axes = 3

    out_shape = np.ones(num_top_axes, dtype=np.int64)

    if node.has_valid('axis'):
        axis = get_canonical_axis_index(shape, node.axis)
        node.axis = axis
        out_shape = shape.copy()
        out_shape[axis] = node.top_k
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
    else:
        out_shape[0] = shape[0]
        out_shape[2] = node.top_k
        if node.has_and_set('out_max_val'):
            out_shape[1] = 2

    node.out_port(0).data.set_shape(out_shape)
Exemple #24
0
    def replace_op(self, graph: Graph, node: Node):
        name = node.soft_get('name', node.id)
        axis = node.soft_get('axis', 0)

        rename_node(node=node, name=name + '/to_be_removed')
        cumsum_node = create_op_node_with_second_input(graph, CumSum,
                                                       int64_array(axis), {
                                                           'name': name,
                                                           'reverse': False,
                                                           'exclusive': False
                                                       })
        rename_node(cumsum_node, name)

        node.in_port(0).get_connection().set_destination(
            cumsum_node.in_port(0))
        if node.has_valid('mx_out_type') and node['mx_out_type'] is not None:
            rename_node(node=cumsum_node, name=name + '/CumSum')
            convert = Cast(graph, {
                'name': name,
                'dst_type': node['mx_out_type']
            }).create_node()
            rename_node(convert, name)
            cumsum_node.out_port(0).connect(convert.in_port(0))
            return [convert.id]
        else:
            return [cumsum_node.id]
Exemple #25
0
    def infer(node: Node):
        if node.has_and_set('extra_inputs'):
            assert len(node.in_nodes()) == 8
        else:
            assert len(node.in_nodes()) == 5
        assert len(node.out_nodes()) in [1, 2]

        hidden_shape = node.in_node(1).shape.copy()
        cell_shape = node.in_node(2).shape.copy()

        mark_input_bins(node, start_port=3)
        node.out_node(0).shape = hidden_shape
        if len(node.out_nodes()) == 2:
            node.out_node(1).shape = cell_shape

        hidden_size = hidden_shape[1]

        if node.has_valid('hidden_size'):
            if node.hidden_size != hidden_size:
                raise Error(
                    "Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}"
                    .format(node.in_node(1).shape, node.soft_get('name')))
        else:
            node['hidden_size'] = hidden_size

        assert cell_shape[1] == hidden_size

        input_shape = node.in_node(0).shape
        assert input_shape is not None
        assert compatible_dims(hidden_shape[0], cell_shape[0]) and \
               compatible_dims(cell_shape[0], input_shape[0]), 'States are not broadcast-able by batch for node {}' \
                                                               ''.format(node.soft_get('name', node.id))
Exemple #26
0
    def mark_execution_condition_result_node(loop_node: Node,
                                             body_result_node: Node):
        assert body_result_node.id in loop_node.body
        assert body_result_node.soft_get('op') == 'Result'
        assert body_result_node.has_valid('internal_layer_id')
        assert len(
            loop_node.body.get_op_nodes(purpose='execution_condition')) == 0

        loop_node.output_port_map.append({
            'axis':
            None,
            'stride':
            None,
            'part_size':
            None,
            'start':
            None,
            'end':
            None,
            'external_port_id':
            -1,
            'purpose':
            'execution_condition',
            'internal_layer_id':
            body_result_node['internal_layer_id']
        })
Exemple #27
0
    def mark_current_iteration_parameter_node(loop_node: Node,
                                              body_parameter_node: Node):
        assert body_parameter_node.id in loop_node.body
        assert body_parameter_node.soft_get('op') == 'Parameter'
        assert body_parameter_node.has_valid('internal_layer_id')
        assert len(
            loop_node.body.get_op_nodes(purpose='current_iteration')) == 0

        loop_node.input_port_map.append({
            'axis':
            None,
            'stride':
            None,
            'part_size':
            None,
            'start':
            None,
            'end':
            None,
            'external_port_id':
            -1,
            'purpose':
            'current_iteration',
            'internal_layer_id':
            body_parameter_node['internal_layer_id']
        })
Exemple #28
0
    def insert_pre_processing(graph: Graph, input_node: Node,
                              node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (
            -1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'],
                                            len(input_node.shape))
        assert compatible_dims(
            value.size, input_node.shape[features_dim_idx]) or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name',
                                   input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph,
                                                    op=op,
                                                    port_value_dict={1: value},
                                                    op_attrs={'name': name})

        if input_node.is_out_port_connected(0) and len(
                input_node.out_port(0).get_destinations()) == 1:
            # There are models with pattern Parameter(uint8) -> Convert(float).
            # Adding mean/scale leads to the following:
            # Parameter(uint8) -> Mean/Scale -> Convert(float) which is incorrect.
            # To fix this mean and scale preprocessing node is inserted after Convert(float) node.
            out_node = input_node.out_port(0).get_destination().node
            convert_type = out_node.soft_get('dst_type')
            if out_node.soft_get('type') == "Convert" and (convert_type in [
                    np.float32, np.float16
            ]):
                input_node = out_node
                if convert_type != value.dtype:
                    new_value = value.astype(convert_type)
                    const_node = preprocessing.in_port(
                        1).get_connection().get_source().node
                    const_node['value'] = new_value

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0),
                                                "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
def infer_for_opset4(node: Node):
    assert len([p for p in node.in_ports().values() if not p.disconnected()]) in [3, 4], \
        "Interpolate-4 node {} must have 3 or 4 inputs".format(node.soft_get(node.name, node.id))
    assert node.has_valid('mode')
    assert node.has_valid('shape_calculation_mode')
    src_shape = node.in_port(0).data.get_shape()
    assert src_shape is not None

    input_rank = len(src_shape)

    pads_begin = correct_pad(node.soft_get('pads_begin', [0]), input_rank)
    pads_end = correct_pad(node.soft_get('pads_end', [0]), input_rank)
    node['pads_begin'] = pads_begin
    node['pads_end'] = pads_end

    if len(node.in_ports()) == 3:
        axes = list(range(0, input_rank))
    else:
        axes = node.in_port(3).get_source().data.get_value()
        assert axes is not None, \
            "Interpolate-4 node with name {} has None as 'axes' input".format(node.soft_get('name', node.id))

    axes = int64_array(axes)
    output_shape = src_shape + pads_begin + pads_end
    if node.shape_calculation_mode == 'sizes':
        dst_shape = node.in_port(1).data.get_value()
        assert dst_shape is not None
        correct_scales_using_dst_shape(node, dst_shape, src_shape, axes)
        for i, axis in enumerate(axes):
            output_shape[axis] = dst_shape[i]
    else:
        scales = node.in_port(2).data.get_value()
        assert scales is not None
        for i, axis in enumerate(axes):
            if output_shape[axis] is not dynamic_dimension and scales[
                    i] is not dynamic_dimension:
                output_shape[axis] = math.floor(scales[i] *
                                                output_shape[axis] + 1.0e-5)
            else:
                output_shape[axis] = dynamic_dimension_value

    if node.is_in_port_connected(3):
        PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0',
                                              'axis')

    node.out_port(0).data.set_shape(output_shape)
Exemple #30
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
        num_inputs = len(connected_in_ports)
        assert node.has_valid('equation'), "Einsum node {} must contain `equation` attribute".format(node_name)
        equation = node.equation

        # parse the equation and extract input and output subscripts
        input_subscripts, output_subscript = Einsum.parse_equation(node_name, equation)

        # check that each operand has the corresponding input subscript
        assert len(input_subscripts) == num_inputs, "The number of input operands of Einsum node {} " \
                                                    "must match the number of input subscripts " \
                                                    "in `equation`".format(node_name)

        # check compatibility of dimension sizes with the same label and generate a dictionary of shapes for labels
        label_to_shape = {}
        for input_ind in range(num_inputs):
            input_shape = node.in_port(input_ind).data.get_shape()
            input_subscript = input_subscripts[input_ind]
            labels = Einsum.extract_subscript_labels(node_name, input_subscript)
            num_dims = len(input_shape)
            num_labels = len(labels)
            num_broadcasted_dims = num_dims - num_labels + 1
            dim_ind = 0
            label_ind = 0
            while label_ind < num_labels and dim_ind < num_dims:
                label = labels[label_ind]
                if label == "...":
                    sub_shape = input_shape[dim_ind:dim_ind + num_broadcasted_dims]
                    if label in label_to_shape.keys():
                        common_shape = bi_directional_shape_broadcasting(sub_shape, label_to_shape[label])
                        assert common_shape is not None, "The dimensions labeled of ellipsis must be broadcastable " \
                                                         "for Einsum node {}".format(node_name)
                        label_to_shape[label] = common_shape
                    else:
                        label_to_shape[label] = sub_shape
                    dim_ind += num_broadcasted_dims
                else:
                    dim_size = input_shape[dim_ind]
                    sub_shape = shape_array([dim_size])
                    assert label not in label_to_shape.keys() or np.array_equal(label_to_shape[label], sub_shape), \
                        "Sizes of dimensions with the same label of Einsum node {} " \
                        "must be compatible".format(node_name)
                    label_to_shape[label] = sub_shape
                    dim_ind += 1
                label_ind += 1

        # generate output shape based on the output subscript
        output_shape = shape_array([])
        labels = Einsum.extract_subscript_labels(node_name, output_subscript)
        for label in labels:
            assert label in label_to_shape.keys(), "The label in the output subscript must appear" \
                                                   " in input subscripts in equation {} " \
                                                   "of Einsum node {}".format(equation, node_name)
            output_shape = np.ma.concatenate((output_shape, label_to_shape[label]))

        node.out_port(0).data.set_shape(output_shape)