コード例 #1
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        initial_fake_quantize = match['quantize']

        new_fake_quantize = initial_fake_quantize.copy_node(dict(name=initial_fake_quantize.name + '/Copy',
                                                                 stop_value_propagation=False), graph)

        initial_fake_quantize.in_port(1).get_connection().set_destination(new_fake_quantize.in_port(1))
        initial_fake_quantize.in_port(2).get_connection().set_destination(new_fake_quantize.in_port(2))

        dst_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        i_min = np.array([0.], dtype=dst_type)
        i_max = np.array([initial_fake_quantize.levels - 1.], dtype=dst_type)

        new_out_low_node = Const(graph, dict(name=initial_fake_quantize.name + '/Copy/out_low',
                                             value=i_min)).create_node()
        new_out_high_node = Const(graph, dict(name=initial_fake_quantize.name + '/Copy/out_high',
                                              value=i_max)).create_node()

        new_out_low_node.out_port(0).connect(new_fake_quantize.in_port(3))
        new_out_high_node.out_port(0).connect(new_fake_quantize.in_port(4))
        new_out_low_node.out_port(0).connect(initial_fake_quantize.in_port(1))
        new_out_high_node.out_port(0).connect(initial_fake_quantize.in_port(2))

        cast_node = Cast(graph, dict(name=initial_fake_quantize.name + "/Convert_to_float", dst_type=dst_type,
                                     stop_value_propagation=True)).create_node()
        new_fake_quantize.out_port(0).connect(cast_node.in_port(0))
        initial_fake_quantize.in_port(0).get_connection().set_destination(new_fake_quantize.in_port(0))
        cast_node.out_port(0).connect(initial_fake_quantize.in_port(0))

        cast_node['force_precision_in_ports'] = {0: 'uint8'}
コード例 #2
0
 def replace_pattern(graph: Graph, match: dict):
     node = match['node']
     for in_port, precision in node.force_precision_in_ports.items():
         if in_port in node.in_ports().keys() and not node.in_port(in_port).disconnected():
             cast = Cast(graph, {'name': node.name + '/Cast_' + str(in_port),
                                 'dst_type': data_type_str_to_np(precision)}).create_node()
             node.in_port(in_port).get_connection().insert_node(cast)
コード例 #3
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['conv']
        node_name = node.soft_get('name', node.id)

        # create Reshape before convolution
        # shape = [in_shape[0], in_shape[1]/patch_stride, 1, patch_stride]
        i_shape = Shape(graph, {'name': node_name + '/Shape'}).create_node()
        shape = Cast(
            graph, {
                'name':
                node_name + '/to_float',
                'dst_type':
                data_type_str_to_np(graph.graph['cmd_params'].data_type)
            }).create_node()
        i_shape.in_port(0).connect(node.in_port(0).get_source())
        shape.in_port(0).connect(i_shape.out_port(0))

        N, H = node_to_get_shape_value_of_indices(
            shape, [0]), node_to_get_shape_value_of_indices(shape, [1])

        div = create_op_with_const_inputs(
            graph, Div, {1: float_array([node.patch_stride])},
            {'name': node_name + '/div_stride_h'})
        div.in_port(0).connect(H.out_port(0))

        concat = create_op_with_const_inputs(
            graph, Concat, {
                2: float_array([1]),
                3: float_array([node.patch_stride])
            }, {
                'name': node_name + '/concat_all_dims',
                'in_ports_count': 4,
                'axis': 0
            })
        concat.in_port(0).connect(N.out_port(0))
        concat.in_port(1).connect(div.out_port(0))

        reshape_pattern = Cast(graph, {
            'name': node_name + '/to_int',
            'dst_type': np.int64
        }).create_node()
        concat.out_port(0).connect(reshape_pattern.in_port(0))

        reshape_in = Reshape(graph, {
            'name': node_name + '/reshape_in'
        }).create_node()
        reshape_in.in_port(1).connect(reshape_pattern.out_port(0))

        # create Reshape after Convolution
        reshape_out = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            {'name': node_name + '/reshape_out'})

        # connect input_reshape_node
        source = node.in_port(0).get_source()
        node.in_port(0).get_connection().set_source(reshape_in.out_port(0))
        reshape_in.in_port(0).connect(source)
        # connect output_reshape_node
        node.out_port(0).get_connection().set_source(reshape_out.out_port(0))
        node.out_port(0).connect(reshape_out.in_port(0))
コード例 #4
0
    def find_and_replace_pattern(self, graph: Graph):
        ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        for node in graph.get_op_nodes():
            if node.op in operations_with_data_type_attributes:
                dst_type = operations_with_data_type_attributes[
                    node.op]['attr_name']
                node_name = node.soft_get('name', node.id)
                assert node.has_valid(
                    dst_type), '{} attribute is missing for node {}'.format(
                        dst_type, node_name)

                final_type = None
                if node[dst_type] == np.float64:
                    final_type = np.float32

                if node[dst_type] in [np.float32, np.float64] and ir_data_type == np.float16 and \
                        not node.has_and_set('returns_shape_value'):
                    final_type = np.float16
                elif node.has_and_set(
                        'returns_shape_value') and node.dst_type == np.float16:
                    # return back FP32 for all nodes with shape values
                    final_type = np.float32

                if final_type is not None:
                    log.warning(
                        'Change data type from {} to {} for node {}'.format(
                            node[dst_type], final_type, node_name))
                    node[dst_type] = final_type

                if final_type == np.float16:
                    assert_that_is_castable_to_fp16(node)
コード例 #5
0
ファイル: const.py プロジェクト: zkzt/openvino
    def __init__(self, graph, attrs: dict = None):
        super().__init__(
            graph, {
                'type': self.op,
                'op': self.op,
                'version': 'opset1',
                'infer': self.infer,
                'value': None,
                'shape': None,
                'data_type': None,
                'out_ports_count': 1,
                'type_infer': self.type_infer,
            }, attrs)
        if not isinstance(self.attrs['value'], np.ndarray):
            self.attrs['value'] = np.array(self.attrs['value'])

        self.attrs['shape'] = np.array(self.attrs['value'].shape,
                                       dtype=np.int64)
        if 'force_shape' in self.attrs and self.attrs[
                'force_shape'] is not None:
            self.attrs['shape'] = np.array(self.attrs['force_shape'],
                                           dtype=np.int64)

        self.attrs['data_type'] = self.attrs['value'].dtype
        if 'force_type' in self.attrs and self.attrs['force_type'] is not None:
            self.attrs['data_type'] = data_type_str_to_np(
                self.attrs['force_type'])
コード例 #6
0
    def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
        cmp = match['complex']
        complex_abs = match['abs']
        complex_abs_name = complex_abs.soft_get('name', complex_abs.id)

        power_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        pow0 = create_op_with_const_inputs(
            graph, Pow, {1: power_type(2.0)},
            {'name': complex_abs_name + '/real_part_squared'})
        pow1 = create_op_with_const_inputs(
            graph, Pow, {1: power_type(2.0)},
            {'name': complex_abs_name + '/imag_part_squared'})

        cmp.in_port(0).get_connection().set_destination(pow0.in_port(0))
        cmp.in_port(1).get_connection().set_destination(pow1.in_port(0))

        add = Add(graph, {
            'name': complex_abs_name + '/squared_abs'
        }).create_node([pow0, pow1])
        sqrt = create_op_with_const_inputs(graph, Pow, {1: power_type(0.5)},
                                           {})
        add.out_port(0).connect(sqrt.in_port(0))

        complex_abs.out_port(0).get_connection().set_source(sqrt.out_port(0))

        rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'),
                      (sqrt, complex_abs_name)])
コード例 #7
0
def calculate_prior_box_value(value: Node, value_to_div: Port,
                              value_to_add: Port):
    """
    :param value: Node with value. Here is supposed the node with op='Split'
    :param value_to_div: Output port with values to be divided by 2
    :param value_to_add: Output port with values to be added to values from value_to_div port
    :return: Sub and Add nodes

    The sub-graph can be described by formulas:
    min = value[value_to_add] - (value[value_to_div] / 2)
    max = value[value_to_add] + (value[value_to_div] / 2)
    """
    graph = value.graph
    dtype = data_type_str_to_np(graph.graph['cmd_params'].data_type)
    _min = Sub(graph, dict(name=value.name + '/Sub')).create_node()
    div = create_op_node_with_second_input(graph,
                                           Div,
                                           np.array([2], dtype=dtype),
                                           op_attrs=dict(name=value.name +
                                                         '/Div'))
    div.in_port(0).connect(value_to_div)
    _min.in_port(0).connect(value_to_add)
    _min.in_port(1).connect(div.out_port(0))

    _max = Add(graph, dict(name=value.name + '/Add')).create_node()
    _max.in_port(0).connect(div.out_port(0))
    _max.in_port(1).connect(value_to_add)

    return _min, _max
コード例 #8
0
def build_range_test_graphs(start=0,
                            limit=10,
                            delta=1,
                            dst_type_str='FP16',
                            src_type_str='FP32',
                            returns_shape_value=None):
    nodes = {
        **valued_const_with_data('start', float32_array(start)),
        **valued_const_with_data('limit', float32_array(limit)),
        **valued_const_with_data('delta', float32_array(delta)),
        **regular_op_with_empty_data(
            'range', {
                'type': 'Range',
                'op': 'Range',
                'returns_shape_value': returns_shape_value,
                'output_type': data_type_str_to_np(src_type_str),
                'infer': Range.infer
            }),
        **result('res'),
    }

    nodes_ref = deepcopy(nodes)
    nodes_ref.update({
        **regular_op_with_empty_data(
            'range', {
                'type': 'Range',
                'op': 'Range',
                'returns_shape_value': returns_shape_value,
                'output_type': data_type_str_to_np(dst_type_str),
                'infer': Range.infer
            }),
    })

    edges = [
        *connect('start', '0:range'),
        *connect('limit', '1:range'),
        *connect('delta', '2:range'),
        *connect('range', 'res'),
    ]
    graph = build_graph(nodes, edges)
    graph_ref = build_graph(nodes_ref, edges)

    graph = partial_infer(graph)

    graph.graph['cmd_params'].data_type = dst_type_str
    convert_blobs(graph, dst_type_str)
    return graph, graph_ref
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        fake_quantize = match['fake_quantize']

        dst_type = match['const'].value.dtype
        if np.issubdtype(dst_type, np.floating):
            dst_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        self.quantize_data(fake_quantize, dst_type)
        self.dequantize_data(fake_quantize, dst_type)
コード例 #10
0
 def type_infer(node):
     # dynamic power output data type is complicate to predict, so we set float data type by default,
     # if we haven't got actual value
     value = node.out_port(0).data.get_value()
     if value is not None:
         node.out_port(0).set_data_type(value.dtype)
     else:
         node.out_port(0).set_data_type(
             data_type_str_to_np(node.graph.graph['cmd_params'].data_type))
コード例 #11
0
    def find_and_replace_pattern(self, graph: Graph):
        for dequantize_node in graph.get_op_nodes(op='DequantizeLinear'):
            node_name = dequantize_node.soft_get('name', dequantize_node.id)
            axis = dequantize_node.soft_get('axis', None)
            scale_y_shape = dequantize_node.in_port(1).data.get_shape()
            model_data_type = data_type_str_to_np(
                graph.graph['cmd_params'].data_type)
            cast = Cast(graph, {
                'dst_type': model_data_type,
                'name': node_name + '/Cast'
            }).create_node()
            dequantize_node.in_port(0).get_connection().set_destination(
                cast.in_port(0))
            mul = Mul(graph, {}).create_node()

            is_second_port_connected = dequantize_node.is_in_port_connected(2)
            if is_second_port_connected:
                sub = Sub(graph, {'name': node_name + '/Sub'}).create_node()
                cast.out_port(0).connect(sub.in_port(0))
                dequantize_node.in_port(2).get_connection().set_destination(
                    sub.in_port(1))
                sub.out_port(0).connect(mul.in_port(0))
            else:
                cast.out_port(0).connect(mul.in_port(0))

            dequantize_node.in_port(1).get_connection().set_destination(
                mul.in_port(1))
            dequantize_node.out_port(0).get_connection().set_source(
                mul.out_port(0))
            rename_nodes([(dequantize_node, node_name + '/TBD'),
                          (mul, node_name)])

            assert scale_y_shape is not None
            if axis is not None and len(
                    scale_y_shape) > 0 and scale_y_shape[0] > 1:
                input_shape = cast.in_port(0).data.get_shape()
                target_shape = np.ones(len(input_shape), np.int64)
                target_shape[axis] = input_shape[axis]

                mul_reshape = create_op_with_const_inputs(
                    graph, Reshape, {1: int64_array(target_shape)},
                    {'name': node_name + '/Reshape/Mul'})
                mul.in_port(1).get_connection().set_destination(
                    mul_reshape.in_port(0))
                mul_reshape.out_port(0).connect(mul.in_port(1))

                if is_second_port_connected:
                    sub_reshape = create_op_with_const_inputs(
                        graph, Reshape, {1: int64_array(target_shape)},
                        {'name': node_name + '/Reshape/Sub'})
                    sub.in_port(1).get_connection().set_destination(
                        sub_reshape.in_port(0))
                    sub_reshape.out_port(0).connect(sub.in_port(1))
コード例 #12
0
    def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
        node = match['cast']
        if node.dst_type == np.float64:
            log.warning('Change data type from {} to {} for node {}'.format(
                node.dst_type, np.float32, node.name))
            node.dst_type = np.float32

        ir_data_type = data_type_str_to_np(
            node.graph.graph['cmd_params'].data_type)
        if node.dst_type == np.float32 and ir_data_type == np.float16:
            log.warning('Change data type from {} to {} for node {}'.format(
                node.dst_type, ir_data_type, node.name))
            node.dst_type = ir_data_type
コード例 #13
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        fake_quantize = match['fake_quantize']

        dst_type = match['const'].value.dtype
        if np.issubdtype(dst_type, np.floating):
            dst_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        quantized_type, mode = None, None
        for quantization_levels in sorted(self.QUANTIZATION_MAP):
            if quantization_levels >= fake_quantize.levels:
                quantized_type, mode = self.QUANTIZATION_MAP[quantization_levels]
                break

        self.quantize_data(fake_quantize, dst_type, quantized_type, mode)
        self.dequantize_data(fake_quantize, dst_type, quantized_type)
コード例 #14
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='Cast'):
            if node.dst_type == np.float64:
                log.warning('Change data type from {} to {} for node {}'.format(node.dst_type, np.float32, node.name))
                node.dst_type = np.float32

            ir_data_type = data_type_str_to_np(node.graph.graph['cmd_params'].data_type)
            if node.dst_type == np.float32 and ir_data_type == np.float16 and not node.has_and_set('returns_shape_value'):
                log.warning('Change data type from {} to {} for node {}'.format(node.dst_type, ir_data_type, node.name))
                node.dst_type = ir_data_type
            elif node.has_and_set('returns_shape_value') and node.dst_type == np.float16:
                # return back FP32 for all Convert nodes with shape values
                log.warning('Change data type from {} to {} for node {} in ShapeOf subgraph'.
                            format(node.dst_type, np.float32, node.name))
                node.dst_type = np.float32
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='ThresholdedRelu'):
            name = node.soft_get('name', node.id)

            greater = create_op_with_const_inputs(graph, Greater, {1: float_array([node.alpha])})
            greater.in_port(0).connect(node.in_port(0).get_source())
            float_greater = Cast(graph,
                                 {'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node()
            greater.out_port(0).connect(float_greater.in_port(0))

            mul = Mul(graph, {}).create_node()
            node.out_port(0).get_connection().set_source(mul.out_port(0))
            mul.in_port(0).connect(node.in_port(0).get_source())
            mul.in_port(1).connect(float_greater.out_port(0))

            rename_nodes([(node, name + '/TBR'), (mul, name)])
コード例 #16
0
    def find_and_replace_pattern(self, graph: Graph):
        for complex_abs in graph.get_op_nodes(op='ComplexAbs'):
            complex_abs_name = complex_abs.soft_get('name', complex_abs.id)
            power_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

            squared = create_op_with_const_inputs(graph, Pow, {1: power_type(2.0)},
                                                  {'name': complex_abs_name + '/squared_parts'})
            complex_abs.in_port(0).get_connection().set_destination(squared.in_port(0))
            sum = create_op_with_const_inputs(graph, ReduceSum, {1: int64_array(-1)},
                                              {'name': complex_abs_name + '/squared_abs'},
                                              squared)
            sqrt = create_op_with_const_inputs(graph, Pow, {1: power_type(0.5)}, {}, sum)

            complex_abs.out_port(0).get_connection().set_source(sqrt.out_port(0))

            rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'), (sqrt, complex_abs_name)])
コード例 #17
0
    def find_and_replace_pattern(self, graph: Graph):
        ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        for node in graph.get_op_nodes(op='RandomUniform'):
            assert node.has_valid('output_type')

            if node.has_and_set('returns_shape_value'):
                continue

            if node.output_type != ir_data_type and np.issubdtype(
                    node.output_type, np.floating):
                node_name = node.soft_get('name', node.id)
                convert_node = Cast(graph, {
                    'name': node_name + "/cast",
                    'dst_type': ir_data_type
                }).create_node()
                node.out_port(0).get_connection().insert_node(convert_node)
コード例 #18
0
    def replace_op(self, graph: Graph, node: Node):
        if node.has_and_set('inputs_preprocessed'):
            log.debug('Node "{}" has already been preprocessed'.format(
                node.soft_get('name')))
            return []
        # reshape tensor with batch indices to 2d
        unsqueeze_node = create_op_node_with_second_input(
            graph, Unsqueeze, int64_array([1]),
            {'name': node.name + '/Unsqueeze'}, node.in_node(2))

        convert_node = Cast(
            graph, {
                'name':
                unsqueeze_node.name + '/ToFloat',
                'dst_type':
                data_type_str_to_np(graph.graph['cmd_params'].data_type)
            }).create_node()

        convert_node.in_port(0).connect(unsqueeze_node.out_port(0))

        concat_op = Concat(
            graph, {
                'axis': 1,
                'name': node.name + '/concat_batch_indices_and_boxes',
                'in_ports_count': 2
            })
        concat_node = concat_op.create_node([convert_node, node.in_node(1)])

        # do not remove edge with crop_size because it is needed in the partial infer
        graph.remove_edge(node.in_node(1).id, node.id)

        # input to the CropAndResize contains boxes coordinates in YXYX layout. But IE layer ROIPooling expects
        # coordinates in the XYXY layout, so convolution is added here to swap coordinates
        swapped_box_coordinates_node = add_convolution_to_swap_xy_coordinates(
            graph, concat_node, 5)

        # reshape locations tensor to 2D so it could be passed to Eltwise which will be converted to ScaleShift
        reshape_2d_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([-1, 5]),
            dict(name=swapped_box_coordinates_node.id + '/reshape_2d_'),
            swapped_box_coordinates_node)
        graph.create_edge(reshape_2d_node, node, 0, 1)

        # do not replace any output edge
        return []
コード例 #19
0
ファイル: common.py プロジェクト: zoeysgithub/openvino
def convert_inputs_of_specific_ops(graph: Graph):
    type_port = {'Broadcast': {1: 'int64', 2: 'int64'},
                 'ConvolutionBackpropData': {2: 'int64'},
                 'Deconvolution': {2: 'int64'},
                 'Gather': {2: 'int64'},
                 'GroupConvolutionBackpropData': {2: 'int64'},
                 'Interpolate': {1: 'int64'},
                 'LRN': {1: 'int64'},
                 'NonMaxSuppression': {2: 'int64'},
                 'NormalizeL2': {1: 'int64'},
                 'OneHot': {1: 'int64'},
                 'Pad': {1: 'int64', 2: 'int64'},
                 'PriorBox': {0: 'int64', 1: 'int64'},
                 'PriorBoxClustered': {0: 'int64', 1: 'int64'},
                 'ReduceLogicalAnd': {1: 'int64'},
                 'ReduceLogicalOr': {1: 'int64'},
                 'ReduceMax': {1: 'int64'},
                 'ReduceMean': {1: 'int64'},
                 'ReduceMin': {1: 'int64'},
                 'ReduceProd': {1: 'int64'},
                 'ReduceSum': {1: 'int64'},
                 'Reshape': {1: 'int64'},
                 'Squeeze': {1: 'int64'},
                 'StridedSlice': {1: 'int64', 2: 'int64', 3: 'int64'},
                 'Split': {1: 'int64'},
                 'Tile': {1: 'int64'},
                 'Transpose': {1: 'int64'},
                 'Unsqueeze': {1: 'int64'},
                 'VariadicSplit': {1: 'int64', 2: 'int64'},
                 }

    for node in graph.get_op_nodes():
        if node.soft_get('type') in type_port:
            ports_to_update = type_port[node.soft_get('type')]
            for port_id, precision in ports_to_update.items():
                if port_id in node.in_ports() and not node.in_port(port_id).disconnected():
                    log.debug('Converting value for the input port "{}" of op "{}" to "{}".'
                              ''.format(port_id, node.soft_get('name', node.id), precision))
                    in_port = node.in_port(port_id)
                    np_type = data_type_str_to_np(precision)
                    if in_port.get_source().node.type == 'Const':
                        convert_const_node_value_type(node.in_port(port_id).get_source().node, np_type)
                    else:
                        in_port.get_connection().insert_node(Cast(graph, {'dst_type': np_type}).create_node())
コード例 #20
0
    def replace_op(self, graph: Graph, node: Node):
        node_name = node.soft_get('name', node.id)
        model_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)
        cast = Cast(graph, {'dst_type': model_data_type, 'name': node_name + '/Cast'}).create_node()
        node.in_port(0).get_connection().set_destination(cast.in_port(0))
        mul = Mul(graph, {}).create_node()

        if node.is_in_port_connected(2):
            sub = Sub(graph, {'name': node_name + '/Sub'}).create_node()
            cast.out_port(0).connect(sub.in_port(0))
            node.in_port(2).get_connection().set_destination(sub.in_port(1))
            sub.out_port(0).connect(mul.in_port(0))
        else:
            cast.out_port(0).connect(mul.in_port(0))

        node.in_port(1).get_connection().set_destination(mul.in_port(1))
        rename_nodes([(node, node_name + '/TBD'), (mul, node_name)])

        return [mul.id]
コード例 #21
0
ファイル: common.py プロジェクト: srinivasdasu24/dldt
def convert_outputs_of_specific_ops(graph: Graph):
    type_port = {
        'ShapeOf': {
            0: 'int32'
        },
        'NonMaxSuppression': {
            0: 'int32'
        },
    }

    for node in graph.get_op_nodes():
        if node.soft_get('type') in type_port:
            ports_to_update = type_port[node.soft_get('type')]
            for port_id, precision in ports_to_update.items():
                if port_id in node.out_ports():
                    log.debug(
                        'Insert Convert after op "{}" to type "{}"'.format(
                            node.soft_get('name', node.id), precision))
                    node.out_port(port_id).get_connection().insert_node(
                        Cast(graph, {
                            'dst_type': data_type_str_to_np(precision)
                        }).create_node())
コード例 #22
0
def build_cast_test_graphs(input_data, dst_type_str='FP16'):
    nodes = {
        **valued_const_with_data('input', float32_array(input_data)),
        **regular_op_with_empty_data(
            'cast', {
                'type': 'Convert',
                'op': 'Cast',
                'dst_type': np.float32,
                'infer': Cast.infer
            }),
        **result('res'),
    }

    nodes_ref = deepcopy(nodes)
    nodes_ref.update({
        **regular_op_with_empty_data(
            'cast', {
                'type': 'Convert',
                'op': 'Cast',
                'dst_type': data_type_str_to_np(dst_type_str),
                'infer': Cast.infer
            }),
    })

    edges = [
        *connect('input', 'cast'),
        *connect('cast', 'res'),
    ]
    graph = build_graph(nodes, edges)
    graph_ref = build_graph(nodes_ref, edges)

    graph = partial_infer(graph)

    graph.graph['cmd_params'].data_type = dst_type_str
    convert_blobs(graph, dst_type_str)
    return graph, graph_ref
コード例 #23
0
 def type_infer(node: Node):
     if node.has_valid('dst_type'):
         node.out_port(0).set_data_type(node.dst_type)
     else:
         node.out_port(0).set_data_type(
             data_type_str_to_np(node.graph.graph['cmd_params'].data_type))
コード例 #24
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        seq_len_tf = match['seq_len']
        transpose_tf = match['transpose']
        ctc_greedy_decoder_tf = match['ctc_greedy_decoder']
        cast_tf = match['cast']
        ctc_loss_tf = match['ctc_loss']
        sparse_to_dense_tf = match['sparse_to_dense']

        output_sparse_to_dense_name = sparse_to_dense_tf.soft_get(
            'name', sparse_to_dense_tf.id)
        output_ctc_loss_name = ctc_loss_tf.soft_get('name', ctc_loss_tf.id)
        ctc_greedy_decoder_tf_name = ctc_greedy_decoder_tf.soft_get(
            'name', ctc_greedy_decoder_tf.id)

        log.debug(
            'Found CTCLossFrontReplacer pattern after {} with name {}'.format(
                ctc_greedy_decoder_tf.op, ctc_greedy_decoder_tf.name))

        # create sequence mask node, sub-graph for transforming into sequence length and connect with consumers
        seq_len_tf_shape = seq_len_tf.soft_get('shape', None)
        if seq_len_tf_shape is None or len(seq_len_tf_shape) != 2:
            raise Error(
                'The sequence length that is the second input to the CTCGreedyDecoder node "{}"'
                ' must be specified in a mask format.'.format(
                    ctc_greedy_decoder_tf_name))
        log.error(
            'The format of input sequence length has been changed to a mask format',
            extra={'is_warning': True})
        seq_len_tf_type = seq_len_tf.soft_get('data_type', None)
        seq_len_tf_name = seq_len_tf.soft_get('name', seq_len_tf.id)
        seq_mask_placeholder = Parameter(
            graph, {
                'name': seq_len_tf_name,
                'shape': seq_len_tf_shape,
                'data_type': seq_len_tf_type
            }).create_node()
        reduce_to_seq_len_node = create_op_with_const_inputs(
            graph, ReduceSum, {1: np.array(1, dtype=np.int32)}, {
                'name': seq_len_tf_name + '/ReduceToSeqLen',
                'keep_dims': False
            })
        reduce_to_seq_len_node.in_port(0).connect(
            seq_mask_placeholder.out_port(0))
        seq_len_tf.out_port(0).get_connection().set_source(
            reduce_to_seq_len_node.out_port(0))

        cast_fp_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)
        casted_seq_mask_node = Cast(graph, {
            'name': seq_len_tf_name + '/CastToFP32',
            'dst_type': cast_fp_type
        }).create_node()
        casted_seq_mask_node.in_port(0).connect(
            seq_mask_placeholder.out_port(0))
        permuted_casted_seq_mask = create_op_with_const_inputs(
            graph, Transpose, {1: int64_array([1, 0])},
            {'name': seq_len_tf_name + '/Permute'})
        permuted_casted_seq_mask.in_port(0).connect(
            casted_seq_mask_node.out_port(0))
        rename_nodes([(seq_len_tf, seq_len_tf_name + '/AbandonedName'),
                      (seq_mask_placeholder, seq_len_tf_name)])

        # create CTCGreedyDecoder node and set mask node
        ctc_merge_repeated_i = ctc_greedy_decoder_tf.soft_get(
            'ctc_merge_repeated', ctc_greedy_decoder_tf.id)
        ctc_greedy_decoder = CTCGreedyDecoderOp(
            graph, {
                'name': output_sparse_to_dense_name,
                'ctc_merge_repeated': ctc_merge_repeated_i
            }).create_node()
        ctc_greedy_decoder.in_port(1).connect(
            permuted_casted_seq_mask.out_port(0))
        rename_nodes([(sparse_to_dense_tf,
                       output_sparse_to_dense_name + '/AbandonedName'),
                      (ctc_greedy_decoder, output_sparse_to_dense_name)])

        # create CTCLoss node and set attributes
        assert ctc_loss_tf.has_valid('preprocess_collapse_repeated'), \
            'The CTCLoss node "{}" misses "preprocess_collapse_repeated" attribute'.format(output_ctc_loss_name)
        assert ctc_loss_tf.has_valid('ctc_merge_repeated'), \
            'The CTCLoss node "{}" misses "ctc_merge_repeated" attribute'.format(output_ctc_loss_name)
        assert ctc_loss_tf.has_valid('unique'), \
            'The CTCLoss node "{}" misses "unique" attribute'.format(output_ctc_loss_name)
        preprocess_collapse_repeated = ctc_loss_tf.preprocess_collapse_repeated
        ctc_merge_repeated = ctc_loss_tf.ctc_merge_repeated
        unique = ctc_loss_tf.unique
        ctc_loss = CTCLoss(
            graph, {
                'name': output_ctc_loss_name,
                'preprocess_collapse_repeated': preprocess_collapse_repeated,
                'ctc_merge_repeated': ctc_merge_repeated,
                'unique': unique
            }).create_node()
        rename_nodes([(ctc_loss_tf, output_ctc_loss_name + '/AbandonedName'),
                      (ctc_loss, output_ctc_loss_name)])

        # connect logits
        ctc_greedy_decoder_tf.in_port(0).get_connection().set_destination(
            ctc_greedy_decoder.in_port(0))
        ctc_loss.in_port(0).disconnect()
        transpose_tf.in_port(0).get_connection().add_destination(
            ctc_loss.in_port(0))

        # connect logit lengths
        ctc_greedy_decoder_tf.in_port(1).disconnect()
        ctc_loss.in_port(1).connect(reduce_to_seq_len_node.out_port(0))

        # connect labels to ctc_loss
        squeeze_op = create_op_with_const_inputs(graph, Squeeze,
                                                 {1: int64_array([2, 3])})
        cast_labels_op = Cast(
            graph, {
                'name': output_sparse_to_dense_name + '/CastLabels',
                'dst_type': np.int32
            }).create_node()
        squeeze_op.in_port(0).connect(ctc_greedy_decoder.out_port(0))
        cast_labels_op.in_port(0).connect(squeeze_op.out_port(0))
        ctc_loss.in_port(2).connect(cast_labels_op.out_port(0))

        # connect label lengths
        equal_op = create_op_with_const_inputs(
            graph, Equal, {1: np.array([-1], dtype=np.int32)},
            {'name': output_sparse_to_dense_name + '/Equal'})
        equal_op.in_port(0).connect(cast_labels_op.out_port(0))
        labels_shape_op = Shape(
            graph, {
                'name': output_sparse_to_dense_name + '/ShapeOf'
            }).create_node()
        labels_shape_op.in_port(0).connect(equal_op.out_port(0))
        broadcast_one = create_op_with_const_inputs(
            graph, Broadcast, {0: np.array([1], dtype=np.int32)}, {
                'mode': 'numpy',
                'name': output_sparse_to_dense_name + '/One'
            })
        broadcast_one.in_port(1).connect(labels_shape_op.out_port(0))
        broadcast_zero = create_op_with_const_inputs(
            graph, Broadcast, {0: np.array([0], dtype=np.int32)}, {
                'mode': 'numpy',
                'name': output_sparse_to_dense_name + '/Zero'
            })
        broadcast_zero.in_port(1).connect(labels_shape_op.out_port(0))

        select_node = Select(graph, {
            'name': output_sparse_to_dense_name + '/Select'
        }).create_node()
        select_node.in_port(0).connect(equal_op.out_port(0))
        select_node.in_port(1).connect(broadcast_zero.out_port(0))
        select_node.in_port(2).connect(broadcast_one.out_port(0))
        label_length_node = create_op_with_const_inputs(
            graph,
            ReduceSum, {1: int64_array([1])},
            op_attrs={
                'name': output_sparse_to_dense_name + '/LabelLength',
                'keep_dims': False
            })
        label_length_node.in_port(0).connect(select_node.out_port(0))
        ctc_loss.in_port(3).connect(label_length_node.out_port(0))

        # set source for output of new sub-graph and remove old nodes
        ctc_loss_tf.out_port(0).get_connection().set_source(
            ctc_loss.out_port(0))
        graph.remove_nodes_from([
            ctc_greedy_decoder_tf.id, ctc_loss_tf.id, cast_tf.id,
            sparse_to_dense_tf.id
        ])
コード例 #25
0
def replace_resize(graph: Graph, resize: Node):
    log.debug("Converting of ONNX Resize-11 to Interpolate-4 "
              "is triggered for node {}.".format(
                  resize.soft_get('name', resize.id)))

    input_shape = resize.in_port(0).data.get_shape()
    input_rank = len(input_shape)
    resize_name = resize.soft_get('name', resize.id)
    if input_rank not in {4, 5}:
        log.warning(
            'The input shape is not 4D or 5D for op with name {}'.format(
                resize_name))
        return

    num_of_inputs = len([
        port for port in resize.in_ports().values() if not port.disconnected()
    ])
    assert num_of_inputs in {3, 4}, \
        "Number of inputs of ONNXResize (with name {}) should be equal to 3 or 4".format(resize_name)

    assert resize.soft_get('coordinate_transformation_mode') != 'tf_crop_and_resize', \
        'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(resize.op, resize_name)

    layout = graph.graph['layout']

    if input_rank == 4:
        begin_dim = get_height_dim(layout, input_rank)
        end_dim = get_width_dim(layout, input_rank) + 1
    else:
        begin_dim = get_depth_dim(layout, input_rank)
        end_dim = get_width_dim(layout, input_rank) + 1

    sizes_ss = create_op_with_const_inputs(
        graph, StridedSlice, {
            1: int64_array([begin_dim]),
            2: int64_array([end_dim]),
            3: int64_array([1])
        }, {
            'name': resize_name + '/StridedSlice_sizes',
            'begin_mask': int64_array([1]),
            'end_mask': int64_array([1]),
            'new_axis_mask': int64_array([0]),
            'shrink_axis_mask': int64_array([0]),
            'ellipsis_mask': int64_array([0])
        })
    scales_ss = create_op_with_const_inputs(
        graph, StridedSlice, {
            1: int64_array([begin_dim]),
            2: int64_array([end_dim]),
            3: int64_array([1])
        }, {
            'name': resize_name + '/StridedSlice_scales',
            'begin_mask': int64_array([1]),
            'end_mask': int64_array([1]),
            'new_axis_mask': int64_array([0]),
            'shrink_axis_mask': int64_array([0]),
            'ellipsis_mask': int64_array([0])
        })
    axes_node = Const(
        graph, {
            'name': resize_name + '/axis',
            'value': int64_array(np.arange(begin_dim, end_dim))
        }).create_node()

    shape_calculation_mode = 'scales' if num_of_inputs == 3 else 'sizes'

    interpolate_node = Interpolate(
        graph, {
            'version': 'opset4',
            'mode': convert_mode(resize.mode),
            'coordinate_transformation_mode':
            resize.coordinate_transformation_mode,
            'cube_coeff': resize.cube_coeff,
            'nearest_mode': resize.nearest_mode,
            'pads_begin': int64_array([0]),
            'pads_end': int64_array([0]),
            'antialias': 0,
            'shape_calculation_mode': shape_calculation_mode,
            'in_ports_count': 4
        }).create_node()

    axes_node.out_port(0).connect(interpolate_node.in_port(3))
    shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node()

    add_node = create_op_with_const_inputs(graph, Add,
                                           {1: float_array([1.0e-5])},
                                           {'name': resize_name + '/Add'})

    input_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

    if num_of_inputs == 3:
        cast_shape_to_float = Cast(graph, {
            'dst_type': input_data_type
        }).create_node()
        mul_node = Mul(graph, {'name': resize_name + '/Mul'}).create_node()
        shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
        cast_shape_to_float.out_port(0).connect(mul_node.in_port(0))
        cast_add_result_to_int = Cast(graph, {
            'dst_type': np.int64
        }).create_node()
        floor_node = Floor(graph, {
            'name': resize_name + '/Floor'
        }).create_node()
        mul_node.out_port(0).connect(add_node.in_port(0))
        add_node.out_port(0).connect(floor_node.in_port(0))
        floor_node.out_port(0).connect(cast_add_result_to_int.in_port(0))
        cast_add_result_to_int.out_port(0).connect(sizes_ss.in_port(0))
        sizes_ss.out_port(0).connect(interpolate_node.in_port(1))
        scales_ss.out_port(0).connect(interpolate_node.in_port(2))

        connection_of_resize_input = resize.in_port(0).get_connection()
        connection_of_resize_input.set_destination(interpolate_node.in_port(0))

        connection_of_scales = resize.in_port(2).get_connection()
        connection_of_scales.set_destination(scales_ss.in_port(0))

        connection_of_resize_input.get_source().connect(shape_of.in_port(0))
        connection_of_scales.get_source().connect(mul_node.in_port(1))
    else:
        cast_shape_to_float = Cast(graph, {
            'dst_type': input_data_type
        }).create_node()
        cast_sizes_to_float = Cast(graph, {
            'dst_type': input_data_type
        }).create_node()
        div_node = Div(graph, {'name': resize_name + '/Div'}).create_node()
        cast_sizes_to_float.out_port(0).connect(div_node.in_port(0))
        cast_shape_to_float.out_port(0).connect(div_node.in_port(1))
        shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
        div_node.out_port(0).connect(add_node.in_port(0))
        add_node.out_port(0).connect(scales_ss.in_port(0))
        scales_ss.out_port(0).connect(interpolate_node.in_port(2))
        sizes_ss.out_port(0).connect(interpolate_node.in_port(1))

        connection_of_resize_input = resize.in_port(0).get_connection()
        connection_of_resize_input.set_destination(interpolate_node.in_port(0))

        connection_of_sizes = resize.in_port(3).get_connection()
        connection_of_sizes.set_destination(sizes_ss.in_port(0))

        connection_of_resize_input.get_source().connect(shape_of.in_port(0))
        connection_of_sizes.get_source().connect(
            cast_sizes_to_float.in_port(0))

    rename_nodes([(resize, resize_name + '/delete'),
                  (interpolate_node, resize_name)])
    resize.out_port(0).get_connection().set_source(
        interpolate_node.out_port(0))
コード例 #26
0
def create_ref_net_in_scales_mode(precision, input_shape, output_shape, sizes_value, scales_value, attrs):
    input_data_type = np_data_type_to_destination_type(data_type_str_to_np(precision))
    input_rank = len(input_shape)
    epsilon = np.array([1.0e-5])
    spatial_dims = spatial_dimensions(input_shape)
    begin_dim = spatial_dims[0]
    end_dim = input_rank

    spatial_scales_value = scales_value[spatial_dims]

    nodes_attrs = {
        'input': {'kind': 'op', 'type': 'Parameter'},
        'input_data': {'shape': input_shape, 'kind': 'data'},
        'shape_of': {'kind': 'op', 'type': 'ShapeOf'},
        'shape_of_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'shape_to_float': {'kind': 'op', 'type': 'Convert', 'destination_type': input_data_type},
        'shape_to_float_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'mul': {'kind': 'op', 'type': 'Multiply'},
        'mul_scales_const_data': {'kind': 'data', 'value': scales_value},
        'mul_scales_const': {'kind': 'op', 'type': 'Const'},
        'mul_scales_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'mul_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'eps_const_data': {'kind': 'data', 'value': epsilon},
        'eps_const': {'kind': 'op', 'type': 'Const'},
        'eps_data': {'shape': int64_array([1]), 'kind': 'data'},
        'add': {'kind': 'op', 'type': 'Add'},
        'add_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'floor': {'type': 'Floor', 'kind': 'op'},
        'floor_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'to_int': {'kind': 'op', 'type': 'Convert', 'destination_type': 'i64'},
        'to_int_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'strided_slice': {
            'kind': 'op', 'type': 'StridedSlice', 'begin_mask': 0,
            'end_mask': 0, 'new_axis_mask': 0,
            'shrink_axis_mask': 0, 'ellipsis_mask': 0
        },
        'strided_slice_data': {'shape': int64_array([len(spatial_scales_value)]), 'kind': 'data'},
        'begin_const_data': {'kind': 'data', 'value': int64_array([begin_dim])},
        'begin_const': {'kind': 'op', 'type': 'Const'},
        'begin_data': {'shape': int64_array([1]), 'kind': 'data'},
        'end_const_data': {'kind': 'data', 'value': int64_array([end_dim])},
        'end_const': {'kind': 'op', 'type': 'Const'},
        'end_data': {'shape': int64_array([1]), 'kind': 'data'},
        'stride_const_data': {'kind': 'data', 'value': int64_array([1])},
        'stride_const': {'kind': 'op', 'type': 'Const'},
        'stride_data': {'shape': int64_array([1]), 'kind': 'data'},
        'scales_const_data': {'kind': 'data', 'value': spatial_scales_value},
        'scales_const': {'kind': 'op', 'type': 'Const'},
        'scales_data': {'shape': int64_array([len(spatial_scales_value)]), 'kind': 'data'},
        'axes_const_data': {'kind': 'data', 'value': spatial_dims},
        'axes_const': {'kind': 'op', 'type': 'Const'},
        'axes_data': {'shape': int64_array([len(spatial_dims)]), 'kind': 'data'},
        'interpolate': attrs,
        'interpolate_data': {'shape': output_shape, 'kind': 'data'},
        'result': {'kind': 'op', 'type': 'Result'},
    }
    edges = [
        ('input', 'input_data'),
        ('input_data', 'interpolate', {'in': 0, 'out': 0}),
        ('input_data', 'shape_of', {'in': 0, 'out': 0}),
        ('shape_of', 'shape_of_data'),
        ('shape_of_data', 'shape_to_float'),
        ('shape_to_float', 'shape_to_float_data'),
        ('shape_to_float_data', 'mul', {'in': 0}),
        ('mul_scales_const_data', 'mul_scales_const'),
        ('mul_scales_const', 'mul_scales_data'),
        ('mul_scales_data', 'mul', {'in': 1}),
        ('mul', 'mul_data'),
        ('eps_const_data', 'eps_const'),
        ('eps_const', 'eps_data'),
        ('mul_data', 'add', {'in': 0}),
        ('eps_data', 'add', {'in': 1}),
        ('add', 'add_data'),
        ('add_data', 'floor'),
        ('floor', 'floor_data'),
        ('floor_data', 'to_int'),
        ('to_int', 'to_int_data'),
        ('to_int_data', 'strided_slice', {'in': 0}),
        ('strided_slice', 'strided_slice_data'),
        ('begin_const_data', 'begin_const'),
        ('begin_const', 'begin_data'),
        ('begin_data', 'strided_slice', {'in': 1}),
        ('end_const_data', 'end_const'),
        ('end_const', 'end_data'),
        ('end_data', 'strided_slice', {'in': 2}),
        ('stride_const_data', 'stride_const'),
        ('stride_const', 'stride_data'),
        ('stride_data', 'strided_slice', {'in': 3}),
        ('strided_slice_data', 'interpolate', {'in': 1}),
        ('scales_const_data', 'scales_const'),
        ('scales_const', 'scales_data'),
        ('scales_data', 'interpolate', {'in': 2}),
        ('axes_const_data', 'axes_const'),
        ('axes_const', 'axes_data'),
        ('axes_data', 'interpolate', {'in': 3}),
        ('interpolate', 'interpolate_data'),
        ('interpolate_data', 'result')
    ]

    return build_graph(nodes_attrs, edges)
コード例 #27
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        group_norm_node = match['op']
        group_norm_num_input_dims = len(
            group_norm_node.in_port(0).data.get_shape())

        # node computing initial GroupNorm input shape
        initial_shape_op_node = Shape(graph, {
            'name': group_norm_node.name + '/Shape'
        }).create_node()
        initial_shape_op_node.in_port(0).connect(
            group_norm_node.in_port(0).get_source())

        initial_shape_op_node_float = Cast(
            graph, {
                'name':
                initial_shape_op_node.name + '/to_float',
                'dst_type':
                data_type_str_to_np(graph.graph['cmd_params'].data_type)
            }).create_node()
        initial_shape_op_node.out_port(0).connect(
            initial_shape_op_node_float.in_port(0))

        initial_batch_dim_node = node_to_get_batch_value(
            initial_shape_op_node_float)
        initial_features_dim_node = node_to_get_features_dimension_value(
            initial_shape_op_node_float)
        initial_spatial_dims_node_int = node_to_get_spatial_dimensions_value(
            initial_shape_op_node)
        initial_spatial_dims_node = Cast(
            graph, {
                'name':
                initial_spatial_dims_node_int.name + '/to_float',
                'dst_type':
                data_type_str_to_np(graph.graph['cmd_params'].data_type)
            }).create_node()
        initial_spatial_dims_node_int.out_port(0).connect(
            initial_spatial_dims_node.in_port(0))

        group_size_node = Const(
            graph, {
                'value': int64_array([group_norm_node.num_groups]),
                'name': group_norm_node.name + '/GroupSize'
            }).create_node()

        # calculate "features // group_size" value
        reciprocal_group_size_node = Const(
            graph, {
                'value': np.array([1.0 / group_norm_node.num_groups]),
                'name': group_norm_node.name + '/ReciprocalGroupSize'
            }).create_node()

        c_div_g_node = Mul(graph, {}).create_node()
        c_div_g_node.in_port(0).connect(initial_features_dim_node.out_port(0))
        c_div_g_node.in_port(1).connect(reciprocal_group_size_node.out_port(0))

        batch_mul_group_size_node = Mul(graph, {}).create_node()
        batch_mul_group_size_node.in_port(0).connect(
            initial_batch_dim_node.out_port(0))
        batch_mul_group_size_node.in_port(1).connect(
            group_size_node.out_port(0))

        # create new node which concatenates several dims to one
        new_shape_node_float = new_shape_node_from_shape_nodes([
            batch_mul_group_size_node, c_div_g_node, initial_spatial_dims_node
        ])
        new_shape_node = Cast(graph, {
            'name': new_shape_node_float.name + '/to_int64',
            'dst_type': np.int64
        }).create_node()
        new_shape_node_float.out_port(0).connect(new_shape_node.in_port(0))

        reshape_for_mvn_node = Reshape(graph, {}).create_node()

        group_norm_node.in_port(0).get_connection().set_destination(
            reshape_for_mvn_node.in_port(0))
        reshape_for_mvn_node.in_port(1).connect(new_shape_node.out_port(0))

        # Reshape the gamma and beta constants to correct layout from [C] to [1,C], [1,C,1], [1,C,1,1] etc
        gamma_beta_shape = np.ones([group_norm_num_input_dims], dtype=np.int64)
        gamma_beta_shape[1] = -1

        gamma_value = group_norm_node.in_port(1).get_source().data.get_value()
        beta_value = group_norm_node.in_port(2).get_source().data.get_value()
        assert gamma_value is not None, 'The gamma should be constant'
        assert beta_value is not None, 'The beta should be constant'
        gamma_value = np.reshape(gamma_value, gamma_beta_shape)
        group_norm_node.in_port(1).get_source().data.set_value(gamma_value)
        beta_value = np.reshape(beta_value, gamma_beta_shape)
        group_norm_node.in_port(2).get_source().data.set_value(beta_value)

        # MVN
        mvn_node = MVN(
            graph, {
                'name': group_norm_node.name + '/MVN',
                'normalize_variance': 1,
                'eps': group_norm_node.eps,
                'eps_mode': 'inside_sqrt'
            }).create_node()
        mvn_node.in_port(0).connect(reshape_for_mvn_node.out_port(0))

        # MVN axes
        _, rank = get_shape_and_rank_nodes_by_port(
            mvn_node.in_port(0).get_connection().get_source(),
            return_as_a_scalar=True)
        rng = create_op_with_const_inputs(graph, Range, {
            0: int64_array(1),
            2: int64_array(1)
        }, {
            'name': group_norm_node.name + '/Range',
            'output_type': np.int64
        })
        mvn_node.in_port(1).connect(rng.out_port(0))
        rng.in_port(1).connect(rank.out_port(0))

        # reshape to the initial shape before multiplying with gamma and adding beta
        reshape_to_initial_shape_node = Reshape(graph, {}).create_node()
        reshape_to_initial_shape_node.in_port(0).connect(mvn_node.out_port(0))
        reshape_to_initial_shape_node.in_port(1).connect(
            initial_shape_op_node.out_port(0))

        mul_node = Mul(graph, {'name': mvn_node.name + '/Mul'}).create_node()
        mul_node.in_port(0).connect(reshape_to_initial_shape_node.out_port(0))
        group_norm_node.in_port(1).get_connection().set_destination(
            mul_node.in_port(1))

        add_node = Add(graph, {'name': mul_node.name + '/Add'}).create_node()
        add_node.in_port(0).connect(mul_node.out_port(0))
        group_norm_node.in_port(2).get_connection().set_destination(
            add_node.in_port(1))

        group_norm_node.out_port(0).get_connection().set_source(
            add_node.out_port(0))
コード例 #28
0
def replace_resize(graph: Graph, resize: Node):
    log.debug("Converting of ONNX Resize-10 to Interpolate-4 "
              "is triggered for node {}.".format(
                  resize.soft_get('name', resize.id)))

    resize_name = resize.soft_get('name', resize.id)

    rank_node = Rank(graph, {'name': resize_name + '/max_axes'}).create_node()
    range_node = create_op_with_const_inputs(graph, Range, {
        0: int64_array(2),
        2: int64_array(1)
    }, {'name': resize_name + '/axes'})

    sizes_ss = create_op_with_const_inputs(graph, StridedSlice, {
        1: int64_array([2]),
        2: int64_array([0]),
        3: int64_array([1])
    }, {
        'name': resize_name + '/sizes_ss',
        'begin_mask': int64_array([1]),
        'end_mask': int64_array([0]),
        'new_axis_mask': int64_array([0]),
        'shrink_axis_mask': int64_array([0]),
        'ellipsis_mask': int64_array([0])
    })
    scales_ss = create_op_with_const_inputs(
        graph, StridedSlice, {
            1: int64_array([2]),
            2: int64_array([0]),
            3: int64_array([1])
        }, {
            'name': resize_name + '/scales_ss',
            'begin_mask': int64_array([1]),
            'end_mask': int64_array([0]),
            'new_axis_mask': int64_array([0]),
            'shrink_axis_mask': int64_array([0]),
            'ellipsis_mask': int64_array([0])
        })

    rank_node.out_port(0).connect(range_node.in_port(1))

    interpolate_node = Interpolate(
        graph, {
            'version': 'opset4',
            'mode': 'linear_onnx' if resize.mode == 'linear' else 'nearest',
            'coordinate_transformation_mode': 'asymmetric',
            'cube_coeff': -0.75,
            'nearest_mode': 'simple',
            'pads_begin': int64_array([0]),
            'pads_end': int64_array([0]),
            'antialias': 0,
            'shape_calculation_mode': 'scales',
            'in_ports_count': 4
        }).create_node()

    range_node.out_port(0).connect(interpolate_node.in_port(3))
    shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node()

    # When we calculate 'sizes' input as floor(input_shape * scales), we can get incorrect 'sizes' if, e.g.,
    # scales = [1.0, 1.0, 1.33333, 2.0], input_shape = [1, 3, 30, 200], because
    # input_shape * scales = [1, 3, 39.9999, 400], and floor(input_shape * scales)[2] == 39, not 40.
    # Maybe we need to calculate 'sizes' input as floor(input_shape * scales + eps), where eps is some small
    # floating point number, e.g. 1.0e-5. But, in this case, if scales = [1.0, 1.0, 1.333333, 2.0],
    # input_shape = [1, 3, 30, 200], floor(input_shape * scales + eps) = 39, not 40, because
    # input_shape[2] * scales[2] + 1.0e-5 =  39.99991.
    # Hence, we need to calculate 'sizes' as floor(input_shape * (scales + eps)).
    add_node = create_op_with_const_inputs(graph, Add,
                                           {1: float_array([1.0e-5])},
                                           {'name': resize_name + '/Add'})

    input_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

    cast_shape_to_float = Cast(graph, {
        'dst_type': input_data_type
    }).create_node()

    shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
    mul_node = Mul(graph, {
        'name': resize_name + '/Mul'
    }).create_node([cast_shape_to_float, add_node])
    floor_node = Floor(graph, {
        'name': resize_name + '/Floor'
    }).create_node([mul_node])
    cast_mul_result_to_int = Cast(graph, {
        'dst_type': np.int64
    }).create_node([floor_node])
    cast_mul_result_to_int.out_port(0).connect(sizes_ss.in_port(0))
    sizes_ss.out_port(0).connect(interpolate_node.in_port(1))

    scales_ss.out_port(0).connect(interpolate_node.in_port(2))

    connection_of_resize_input = resize.in_port(0).get_connection()
    connection_of_resize_input.set_destination(interpolate_node.in_port(0))

    connection_of_scales = resize.in_port(1).get_connection()
    connection_of_scales.set_destination(scales_ss.in_port(0))

    connection_of_resize_input.get_source().connect(shape_of.in_port(0))
    connection_of_resize_input.get_source().connect(rank_node.in_port(0))
    connection_of_scales.get_source().connect(add_node.in_port(0))

    rename_nodes([(resize, resize_name + '/delete'),
                  (interpolate_node, resize_name)])
    resize.out_port(0).get_connection().set_source(
        interpolate_node.out_port(0))
コード例 #29
0
 def type_infer(node: Node):
     node.out_port(0).set_data_type(
         data_type_str_to_np(node.graph.graph['cmd_params'].data_type))
コード例 #30
0
    def transform_graph(self, graph: Graph, replacement_descriptions: dict):
        parameter_node = graph.get_op_nodes(op='Parameter')[0]
        parameter_node['data_type'] = data_type_str_to_np(
            parameter_node.graph.graph['cmd_params'].data_type)
        parameter_node.out_port(0).disconnect()

        # remove existing Result operations to remove unsupported sub-graph
        graph.remove_nodes_from(
            [node.id
             for node in graph.get_op_nodes(op='Result')] + ['detections'])

        # determine if the op which is a input/final result of mean value and scale applying to the input tensor
        # then connect it to the input of the first convolution of the model, so we remove the image pre-processing
        # which includes padding and resizing from the model
        preprocessing_input_node_id = replacement_descriptions[
            'preprocessing_input_node']
        assert preprocessing_input_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
                                                           'should be a last node before image normalization and is specified' \
                                                           ' in the json file.'.format(preprocessing_input_node_id)
        preprocessing_input_node = Node(graph, preprocessing_input_node_id)
        consumer_node = preprocessing_input_node.out_port(
            0).get_connection().get_destination().node
        consumer_node.in_port(0).get_connection().set_source(
            parameter_node.out_port(0))

        preprocessing_output_node_id = replacement_descriptions[
            'preprocessing_output_node']
        assert preprocessing_output_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
                                                            'node should provide scaled image output and is specified' \
                                                            ' in the json file.'.format(preprocessing_output_node_id)
        preprocessing_output_node = Node(graph, preprocessing_output_node_id)
        preprocessing_output_node.out_port(0).disconnect()

        convolution_nodes = [
            n for n in graph.pseudo_topological_sort()
            if n.soft_get('type') == 'Convolution'
        ]
        convolution_nodes[0].in_port(0).get_connection().set_source(
            preprocessing_output_node.out_port(0))

        # create prior boxes (anchors) generator
        aspect_ratios = replacement_descriptions['aspect_ratios']
        assert len(aspect_ratios) % 2 == 0
        aspect_ratios = list(zip(aspect_ratios[::2], aspect_ratios[1::2]))
        priors_generator = self.AnchorGenerator(
            min_level=int(replacement_descriptions['min_level']),
            aspect_ratios=aspect_ratios,
            num_scales=int(replacement_descriptions['num_scales']),
            anchor_scale=replacement_descriptions['anchor_scale'])

        prior_boxes = []
        for i in range(100):
            inp_name = 'box_net/box-predict{}/BiasAdd'.format('_%d' %
                                                              i if i else '')
            if inp_name not in graph:
                break
            widths, heights = priors_generator.get(i)
            prior_box_op = PriorBoxClusteredOp(
                graph, {
                    'width': np.array(widths),
                    'height': np.array(heights),
                    'clip': 0,
                    'flip': 0,
                    'variance': replacement_descriptions['variance'],
                    'offset': 0.5
                })
            prior_boxes.append(
                prior_box_op.create_node(
                    [Node(graph, inp_name), parameter_node]))

        # concatenate prior box operations
        concat_prior_boxes = Concat(graph, {'axis': -1}).create_node()
        for idx, node in enumerate(prior_boxes):
            concat_prior_boxes.add_input_port(idx)
            concat_prior_boxes.in_port(idx).connect(node.out_port(0))

        conf = Sigmoid(graph, dict(name='concat/sigmoid')).create_node(
            [Node(graph, 'concat')])
        reshape_size_node = Const(graph, {
            'value': int64_array([0, -1])
        }).create_node([])
        logits = Reshape(graph, dict(name=conf.name + '/Flatten')).create_node(
            [conf, reshape_size_node])
        deltas = Reshape(graph, dict(name='concat_1/Flatten')).create_node(
            [Node(graph, 'concat_1'), reshape_size_node])

        # revert convolution boxes prediction weights from yxYX to xyXY (convolutions share weights and bias)
        weights = Node(graph, 'box_net/box-predict/pointwise_kernel')
        weights.value = weights.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(
            weights.shape)
        bias = Node(graph, 'box_net/box-predict/bias')
        bias.value = bias.value.reshape(-1,
                                        4)[:, [1, 0, 3, 2]].reshape(bias.shape)

        detection_output_node = DetectionOutput(
            graph,
            dict(
                name='detections',
                num_classes=int(replacement_descriptions['num_classes']),
                share_location=1,
                background_label_id=int(
                    replacement_descriptions['num_classes']) + 1,
                nms_threshold=replacement_descriptions['nms_threshold'],
                confidence_threshold=replacement_descriptions[
                    'confidence_threshold'],
                top_k=100,
                keep_top_k=100,
                code_type='caffe.PriorBoxParameter.CENTER_SIZE',
            )).create_node([deltas, logits, concat_prior_boxes])

        output_op = Result(graph, dict(name='output'))
        output_op.create_node([detection_output_node])