예제 #1
0
def build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP16',
                            src_type_str='FP32', returns_shape_value=None):
    nodes = {
        **valued_const_with_data('start', float32_array(start)),
        **valued_const_with_data('limit', float32_array(limit)),
        **valued_const_with_data('delta', float32_array(delta)),
        **regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range',
                                               'returns_shape_value': returns_shape_value,
                                               'output_type': data_type_str_to_np(src_type_str),
                                               'infer': Range.infer}),
        **result('res'),
    }

    nodes_ref = deepcopy(nodes)
    nodes_ref.update({
        **regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range',
                                               'returns_shape_value': returns_shape_value,
                                               'output_type': data_type_str_to_np(dst_type_str),
                                               'infer': Range.infer}),
    })

    edges = [
        *connect('start', '0:range'),
        *connect('limit', '1:range'),
        *connect('delta', '2:range'),
        *connect('range', 'res'),
    ]
    graph = build_graph(nodes, edges)
    graph_ref = build_graph(nodes_ref, edges)

    graph = partial_infer(graph)

    graph.graph['cmd_params'].data_type = dst_type_str
    convert_blobs(graph, dst_type_str)
    return graph, graph_ref
예제 #2
0
def calculate_prior_box_value(value: Node, value_to_div: Port,
                              value_to_add: Port):
    """
    :param value: Node with value. Here is supposed the node with op='Split'
    :param value_to_div: Output port with values to be divided by 2
    :param value_to_add: Output port with values to be added to values from value_to_div port
    :return: Sub and Add nodes

    The sub-graph can be described by formulas:
    min = value[value_to_add] - (value[value_to_div] / 2)
    max = value[value_to_add] + (value[value_to_div] / 2)
    """
    graph = value.graph
    dtype = data_type_str_to_np(graph.graph['cmd_params'].data_type)
    _min = Sub(graph, dict(name=value.name + '/Sub')).create_node()
    div = create_op_node_with_second_input(graph,
                                           Div,
                                           mo_array([2], dtype=dtype),
                                           op_attrs=dict(name=value.name +
                                                         '/Div'))
    div.in_port(0).connect(value_to_div)
    _min.in_port(0).connect(value_to_add)
    _min.in_port(1).connect(div.out_port(0))

    _max = Add(graph, dict(name=value.name + '/Add')).create_node()
    _max.in_port(0).connect(div.out_port(0))
    _max.in_port(1).connect(value_to_add)

    return _min, _max
    def find_and_replace_pattern(self, graph: Graph):
        ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        for node in graph.get_op_nodes():
            if node.op in operations_with_data_type_attributes:
                dst_type = operations_with_data_type_attributes[
                    node.op]['attr_name']
                node_name = node.soft_get('name', node.id)
                assert node.has_valid(
                    dst_type), '{} attribute is missing for node {}'.format(
                        dst_type, node_name)

                final_type = None
                if node[dst_type] == np.float64:
                    final_type = np.float32

                if node[dst_type] in [np.float32, np.float64] and ir_data_type == np.float16 and \
                        not node.has_and_set('returns_shape_value'):
                    final_type = np.float16
                elif node.has_and_set('returns_shape_value'
                                      ) and node[dst_type] == np.float16:
                    # return back FP32 for all nodes with shape values
                    final_type = np.float32

                if final_type is not None:
                    log.warning(
                        'Change data type from {} to {} for node {}'.format(
                            node[dst_type], final_type, node_name))
                    node[dst_type] = final_type

                if final_type == np.float16:
                    assert_that_is_castable_to_fp16(node)
예제 #4
0
def build_cast_test_graphs(input_data, dst_type_str='FP16'):
    nodes = {
        **valued_const_with_data('input', float32_array(input_data)),
        **regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast',
                                              'dst_type': np.float32,
                                              'infer': Cast.infer}),
        **result('res'),
    }

    nodes_ref = deepcopy(nodes)
    nodes_ref.update({
        **regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast',
                                              'dst_type': data_type_str_to_np(dst_type_str),
                                              'infer': Cast.infer}),
    })

    edges = [
        *connect('input', 'cast'),
        *connect('cast', 'res'),
    ]
    graph = build_graph(nodes, edges)
    graph_ref = build_graph(nodes_ref, edges)

    graph = partial_infer(graph)

    graph.graph['cmd_params'].data_type = dst_type_str
    convert_blobs(graph, dst_type_str)
    return graph, graph_ref
예제 #5
0
    def __init__(self, graph, attrs: dict = None):
        super().__init__(
            graph, {
                'type': self.op,
                'op': self.op,
                'version': 'opset1',
                'infer': self.infer,
                'value': None,
                'shape': None,
                'data_type': None,
                'out_ports_count': 1,
                'type_infer': self.type_infer,
            }, attrs)
        if not isinstance(self.attrs['value'], np.ndarray):
            self.attrs['value'] = np.array(self.attrs['value'])

        self.attrs['shape'] = np.array(self.attrs['value'].shape,
                                       dtype=np.int64)
        if 'force_shape' in self.attrs and self.attrs[
                'force_shape'] is not None:
            self.attrs['shape'] = np.array(self.attrs['force_shape'],
                                           dtype=np.int64)

        self.attrs['data_type'] = self.attrs['value'].dtype
        if 'force_type' in self.attrs and self.attrs['force_type'] is not None:
            self.attrs['data_type'] = data_type_str_to_np(
                self.attrs['force_type'])
    def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
        cmp = match['complex']
        complex_abs = match['abs']
        complex_abs_name = complex_abs.soft_get('name', complex_abs.id)

        power_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        pow0 = create_op_with_const_inputs(
            graph, Pow, {1: power_type(2.0)},
            {'name': complex_abs_name + '/real_part_squared'})
        pow1 = create_op_with_const_inputs(
            graph, Pow, {1: power_type(2.0)},
            {'name': complex_abs_name + '/imag_part_squared'})

        cmp.in_port(0).get_connection().set_destination(pow0.in_port(0))
        cmp.in_port(1).get_connection().set_destination(pow1.in_port(0))

        add = Add(graph, {
            'name': complex_abs_name + '/squared_abs'
        }).create_node([pow0, pow1])
        sqrt = create_op_with_const_inputs(graph, Pow, {1: power_type(0.5)},
                                           {})
        add.out_port(0).connect(sqrt.in_port(0))

        complex_abs.out_port(0).get_connection().set_source(sqrt.out_port(0))

        rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'),
                      (sqrt, complex_abs_name)])
예제 #7
0
 def replace_pattern(graph: Graph, match: dict):
     node = match['node']
     for in_port, precision in node.force_precision_in_ports.items():
         if in_port in node.in_ports().keys() and not node.in_port(in_port).disconnected():
             cast = Cast(graph, {'name': node.name + '/Cast_' + str(in_port),
                                 'dst_type': data_type_str_to_np(precision)}).create_node()
             node.in_port(in_port).get_connection().insert_node(cast)
예제 #8
0
    def find_and_replace_pattern(self, graph: Graph):
        for dequantize_node in graph.get_op_nodes(op='DequantizeLinear'):
            node_name = dequantize_node.soft_get('name', dequantize_node.id)
            axis = dequantize_node.soft_get('axis', None)
            scale_y_shape = dequantize_node.in_port(1).data.get_shape()
            model_data_type = data_type_str_to_np(
                graph.graph['cmd_params'].data_type)
            cast = Cast(graph, {
                'dst_type': model_data_type,
                'name': node_name + '/Cast'
            }).create_node()
            dequantize_node.in_port(0).get_connection().set_destination(
                cast.in_port(0))
            mul = Mul(graph, {'can_be_fused': False}).create_node()

            is_second_port_connected = dequantize_node.is_in_port_connected(2)
            if is_second_port_connected:
                # its is necessary not to replace subrtract for pattern in offline transformations
                # See ConvertQuantizeDequantize transformation in ngraph
                sub = Sub(graph, {
                    'name': node_name + '/Sub',
                    'zero_point_sub': True
                }).create_node()
                cast.out_port(0).connect(sub.in_port(0))
                dequantize_node.in_port(2).get_connection().set_destination(
                    sub.in_port(1))
                sub.out_port(0).connect(mul.in_port(0))
            else:
                cast.out_port(0).connect(mul.in_port(0))

            dequantize_node.in_port(1).get_connection().set_destination(
                mul.in_port(1))
            dequantize_node.out_port(0).get_connection().set_source(
                mul.out_port(0))
            rename_nodes([(dequantize_node, node_name + '/TBD'),
                          (mul, node_name)])

            assert scale_y_shape is not None
            if axis is not None and len(
                    scale_y_shape) > 0 and scale_y_shape[0] > 1:
                input_shape = cast.in_port(0).data.get_shape()
                target_shape = np.ones(len(input_shape), np.int64)
                target_shape[axis] = input_shape[axis]

                mul_reshape = create_op_with_const_inputs(
                    graph, Reshape, {1: int64_array(target_shape)},
                    {'name': node_name + '/Reshape/Mul'})
                mul.in_port(1).get_connection().set_destination(
                    mul_reshape.in_port(0))
                mul_reshape.out_port(0).connect(mul.in_port(1))

                if is_second_port_connected:
                    sub_reshape = create_op_with_const_inputs(
                        graph, Reshape, {1: int64_array(target_shape)},
                        {'name': node_name + '/Reshape/Sub'})
                    sub.in_port(1).get_connection().set_destination(
                        sub_reshape.in_port(0))
                    sub_reshape.out_port(0).connect(sub.in_port(1))
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        fake_quantize = match['fake_quantize']

        dst_type = match['const'].value.dtype
        if np.issubdtype(dst_type, np.floating):
            dst_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        quantized_type, mode = None, None
        for quantization_levels in sorted(self.QUANTIZATION_MAP):
            if quantization_levels >= fake_quantize.levels:
                quantized_type, mode = self.QUANTIZATION_MAP[
                    quantization_levels]
                break

        self.quantize_data(fake_quantize, dst_type, quantized_type, mode)
        self.dequantize_data(fake_quantize, dst_type, quantized_type)
    def find_and_replace_pattern(self, graph: Graph):
        ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

        for node in graph.get_op_nodes(op='RandomUniform'):
            assert node.has_valid('output_type')

            if node.has_and_set('returns_shape_value'):
                continue

            if node.output_type != ir_data_type and np.issubdtype(
                    node.output_type, np.floating):
                node_name = node.soft_get('name', node.id)
                convert_node = Cast(graph, {
                    'name': node_name + "/cast",
                    'dst_type': ir_data_type
                }).create_node()
                node.out_port(0).get_connection().insert_node(convert_node)
    def replace_op(self, graph: Graph, node: Node):
        if node.has_and_set('inputs_preprocessed'):
            log.debug('Node "{}" has already been preprocessed'.format(
                node.soft_get('name')))
            return []
        # reshape tensor with batch indices to 2d
        unsqueeze_node = create_op_node_with_second_input(
            graph, Unsqueeze, int64_array([1]),
            {'name': node.name + '/Unsqueeze'}, node.in_node(2))

        convert_node = Cast(
            graph, {
                'name':
                unsqueeze_node.name + '/ToFloat',
                'dst_type':
                data_type_str_to_np(graph.graph['cmd_params'].data_type)
            }).create_node()

        convert_node.in_port(0).connect(unsqueeze_node.out_port(0))

        concat_op = Concat(
            graph, {
                'axis': 1,
                'name': node.name + '/concat_batch_indices_and_boxes',
                'in_ports_count': 2
            })
        concat_node = concat_op.create_node([convert_node, node.in_node(1)])

        # do not remove edge with crop_size because it is needed in the partial infer
        graph.remove_edge(node.in_node(1).id, node.id)

        # input to the CropAndResize contains boxes coordinates in YXYX layout. But IE layer ROIPooling expects
        # coordinates in the XYXY layout, so convolution is added here to swap coordinates
        swapped_box_coordinates_node = add_convolution_to_swap_xy_coordinates(
            graph, concat_node, 5)

        # reshape locations tensor to 2D so it could be passed to Eltwise which will be converted to ScaleShift
        reshape_2d_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([-1, 5]),
            dict(name=swapped_box_coordinates_node.id + '/reshape_2d_'),
            swapped_box_coordinates_node)
        graph.create_edge(reshape_2d_node, node, 0, 1)

        # do not replace any output edge
        return []
예제 #12
0
def convert_inputs_of_specific_ops(graph: Graph):
    type_port = {'Broadcast': {1: 'int64', 2: 'int64'},
                 'ConvolutionBackpropData': {2: 'int64'},
                 'Deconvolution': {2: 'int64'},
                 'Gather': {2: 'int64'},
                 'GroupConvolutionBackpropData': {2: 'int64'},
                 'Interpolate': {1: 'int64'},
                 'LRN': {1: 'int64'},
                 'NonMaxSuppression': {2: 'int64'},
                 'NormalizeL2': {1: 'int64'},
                 'OneHot': {1: 'int64'},
                 'Pad': {1: 'int64', 2: 'int64'},
                 'PriorBox': {0: 'int64', 1: 'int64'},
                 'PriorBoxClustered': {0: 'int64', 1: 'int64'},
                 'ReduceLogicalAnd': {1: 'int64'},
                 'ReduceLogicalOr': {1: 'int64'},
                 'ReduceMax': {1: 'int64'},
                 'ReduceMean': {1: 'int64'},
                 'ReduceMin': {1: 'int64'},
                 'ReduceProd': {1: 'int64'},
                 'ReduceSum': {1: 'int64'},
                 'Reshape': {1: 'int64'},
                 'Squeeze': {1: 'int64'},
                 'StridedSlice': {1: 'int64', 2: 'int64', 3: 'int64'},
                 'Split': {1: 'int64'},
                 'Tile': {1: 'int64'},
                 'Transpose': {1: 'int64'},
                 'Unsqueeze': {1: 'int64'},
                 'VariadicSplit': {1: 'int64', 2: 'int64'},
                 }

    for node in graph.get_op_nodes():
        if node.soft_get('type') in type_port:
            ports_to_update = type_port[node.soft_get('type')]
            for port_id, precision in ports_to_update.items():
                if port_id in node.in_ports() and not node.in_port(port_id).disconnected():
                    log.debug('Converting value for the input port "{}" of op "{}" to "{}".'
                              ''.format(port_id, node.soft_get('name', node.id), precision))
                    in_port = node.in_port(port_id)
                    np_type = data_type_str_to_np(precision)
                    if in_port.get_source().node.type == 'Const':
                        convert_const_node_value_type(node.in_port(port_id).get_source().node, np_type)
                    else:
                        in_port.get_connection().insert_node(Cast(graph, {'dst_type': np_type}).create_node())
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='ThresholdedRelu'):
            name = node.soft_get('name', node.id)

            greater = create_op_with_const_inputs(
                graph, Greater, {1: float_array([node.alpha])})
            greater.in_port(0).connect(node.in_port(0).get_source())
            float_greater = Cast(
                graph, {
                    'dst_type':
                    data_type_str_to_np(graph.graph['cmd_params'].data_type)
                }).create_node()
            greater.out_port(0).connect(float_greater.in_port(0))

            mul = Mul(graph, {}).create_node()
            node.out_port(0).get_connection().set_source(mul.out_port(0))
            mul.in_port(0).connect(node.in_port(0).get_source())
            mul.in_port(1).connect(float_greater.out_port(0))

            rename_nodes([(node, name + '/TBR'), (mul, name)])
            graph.remove_node(node.id)
예제 #14
0
    def find_and_replace_pattern(self, graph: Graph):
        for complex_abs in graph.get_op_nodes(op='ComplexAbs'):
            complex_abs_name = complex_abs.soft_get('name', complex_abs.id)
            power_type = data_type_str_to_np(
                graph.graph['cmd_params'].data_type)

            squared = create_op_with_const_inputs(
                graph, Pow, {1: power_type(2.0)},
                {'name': complex_abs_name + '/squared_parts'})
            complex_abs.in_port(0).get_connection().set_destination(
                squared.in_port(0))
            sum = create_op_with_const_inputs(
                graph, ReduceSum, {1: int64_array(-1)},
                {'name': complex_abs_name + '/squared_abs'}, squared)
            sqrt = create_op_with_const_inputs(graph, Pow,
                                               {1: power_type(0.5)}, {}, sum)

            complex_abs.out_port(0).get_connection().set_source(
                sqrt.out_port(0))

            rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'),
                          (sqrt, complex_abs_name)])
예제 #15
0
 def type_infer(node: Node):
     node.out_port(0).set_data_type(data_type_str_to_np(node.graph.graph['cmd_params'].data_type))
예제 #16
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        group_norm_node = match['op']
        group_norm_num_input_dims = len(group_norm_node.in_port(0).data.get_shape())

        # node computing initial GroupNorm input shape
        initial_shape_op_node = Shape(graph, {'name': group_norm_node.name + '/Shape'}).create_node()
        initial_shape_op_node.in_port(0).connect(group_norm_node.in_port(0).get_source())

        initial_shape_op_node_float = Cast(
            graph, {'name': initial_shape_op_node.name + '/to_float',
                    'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node()
        initial_shape_op_node.out_port(0).connect(initial_shape_op_node_float.in_port(0))

        initial_batch_dim_node = node_to_get_batch_value(initial_shape_op_node_float)
        initial_features_dim_node = node_to_get_features_dimension_value(initial_shape_op_node_float)
        initial_spatial_dims_node_int = node_to_get_spatial_dimensions_value(initial_shape_op_node)
        initial_spatial_dims_node = Cast(
            graph, {'name': initial_spatial_dims_node_int.name + '/to_float',
                    'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node()
        initial_spatial_dims_node_int.out_port(0).connect(initial_spatial_dims_node.in_port(0))

        group_size_node = Const(graph, {'value': int64_array([group_norm_node.num_groups]),
                                        'name': group_norm_node.name + '/GroupSize'}).create_node()

        # calculate "features // group_size" value
        reciprocal_group_size_node = Const(graph, {'value': np.array([1.0 / group_norm_node.num_groups]),
                                                   'name': group_norm_node.name + '/ReciprocalGroupSize'}).create_node()

        c_div_g_node = Mul(graph, {}).create_node()
        c_div_g_node.in_port(0).connect(initial_features_dim_node.out_port(0))
        c_div_g_node.in_port(1).connect(reciprocal_group_size_node.out_port(0))

        batch_mul_group_size_node = Mul(graph, {}).create_node()
        batch_mul_group_size_node.in_port(0).connect(initial_batch_dim_node.out_port(0))
        batch_mul_group_size_node.in_port(1).connect(group_size_node.out_port(0))

        # create new node which concatenates several dims to one
        new_shape_node_float = new_shape_node_from_shape_nodes([batch_mul_group_size_node, c_div_g_node,
                                                                initial_spatial_dims_node])
        new_shape_node = Cast(graph,
                              {'name': new_shape_node_float.name + '/to_int64', 'dst_type': np.int64}).create_node()
        new_shape_node_float.out_port(0).connect(new_shape_node.in_port(0))

        reshape_for_mvn_node = Reshape(graph, {}).create_node()

        group_norm_node.in_port(0).get_connection().set_destination(reshape_for_mvn_node.in_port(0))
        reshape_for_mvn_node.in_port(1).connect(new_shape_node.out_port(0))

        # Reshape the gamma and beta constants to correct layout from [C] to [1,C], [1,C,1], [1,C,1,1] etc
        gamma_beta_shape = np.ones([group_norm_num_input_dims], dtype=np.int64)
        gamma_beta_shape[1] = -1

        gamma_value = group_norm_node.in_port(1).get_source().data.get_value()
        beta_value = group_norm_node.in_port(2).get_source().data.get_value()
        assert gamma_value is not None, 'The gamma should be constant'
        assert beta_value is not None, 'The beta should be constant'
        gamma_value = np.reshape(gamma_value, gamma_beta_shape)
        group_norm_node.in_port(1).get_source().data.set_value(gamma_value)
        beta_value = np.reshape(beta_value, gamma_beta_shape)
        group_norm_node.in_port(2).get_source().data.set_value(beta_value)

        # MVN
        mvn_node = MVN(graph, {'name': group_norm_node.name + '/MVN',
                               'normalize_variance': 1,
                               'eps': group_norm_node.eps,
                               'eps_mode': 'inside_sqrt'}).create_node()
        mvn_node.in_port(0).connect(reshape_for_mvn_node.out_port(0))

        # MVN axes
        _, rank = get_shape_and_rank_nodes_by_port(mvn_node.in_port(0).get_connection().get_source(),
                                                   return_as_a_scalar=True)
        rng = create_op_with_const_inputs(graph, Range, {0: int64_array(1), 2: int64_array(1)},
                                          {'name': group_norm_node.name + '/Range', 'output_type': np.int64})
        mvn_node.in_port(1).connect(rng.out_port(0))
        rng.in_port(1).connect(rank.out_port(0))

        # reshape to the initial shape before multiplying with gamma and adding beta
        reshape_to_initial_shape_node = Reshape(graph, {}).create_node()
        reshape_to_initial_shape_node.in_port(0).connect(mvn_node.out_port(0))
        reshape_to_initial_shape_node.in_port(1).connect(initial_shape_op_node.out_port(0))

        mul_node = Mul(graph, {'name': mvn_node.name + '/Mul'}).create_node()
        mul_node.in_port(0).connect(reshape_to_initial_shape_node.out_port(0))
        group_norm_node.in_port(1).get_connection().set_destination(mul_node.in_port(1))

        add_node = Add(graph, {'name': mul_node.name + '/Add'}).create_node()
        add_node.in_port(0).connect(mul_node.out_port(0))
        group_norm_node.in_port(2).get_connection().set_destination(add_node.in_port(1))

        group_norm_node.out_port(0).get_connection().set_source(add_node.out_port(0))
예제 #17
0
def create_ref_net_in_scales_mode(precision, input_shape, output_shape, sizes_value, scales_value,
                                  attrs):
    input_data_type = np_data_type_to_destination_type(data_type_str_to_np(precision))
    input_rank = len(input_shape)
    epsilon = np.array([1.0e-5])
    spatial_dims = spatial_dimensions(input_shape)
    begin_dim = spatial_dims[0]
    end_dim = input_rank

    spatial_scales_value = scales_value[spatial_dims]

    nodes_attrs = {
        'input': {'kind': 'op', 'type': 'Parameter'},
        'input_data': {'shape': input_shape, 'kind': 'data'},
        'shape_of': {'kind': 'op', 'type': 'ShapeOf'},
        'shape_of_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'shape_to_float': {'kind': 'op', 'type': 'Convert', 'destination_type': input_data_type},
        'shape_to_float_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'mul': {'kind': 'op', 'type': 'Multiply'},
        'mul_scales_const_data': {'kind': 'data', 'value': scales_value},
        'mul_scales_const': {'kind': 'op', 'type': 'Const'},
        'mul_scales_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'mul_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'eps_const_data': {'kind': 'data', 'value': epsilon},
        'eps_const': {'kind': 'op', 'type': 'Const'},
        'eps_data': {'shape': int64_array([1]), 'kind': 'data'},
        'add': {'kind': 'op', 'type': 'Add'},
        'add_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'floor': {'type': 'Floor', 'kind': 'op'},
        'floor_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'to_int': {'kind': 'op', 'type': 'Convert', 'destination_type': 'i64'},
        'to_int_data': {'shape': int64_array([input_rank]), 'kind': 'data'},
        'strided_slice': {
            'kind': 'op', 'type': 'StridedSlice', 'begin_mask': 0,
            'end_mask': 0, 'new_axis_mask': 0,
            'shrink_axis_mask': 0, 'ellipsis_mask': 0
        },
        'strided_slice_data': {'shape': int64_array([len(spatial_scales_value)]), 'kind': 'data'},
        'begin_const_data': {'kind': 'data', 'value': int64_array([begin_dim])},
        'begin_const': {'kind': 'op', 'type': 'Const'},
        'begin_data': {'shape': int64_array([1]), 'kind': 'data'},
        'end_const_data': {'kind': 'data', 'value': int64_array([end_dim])},
        'end_const': {'kind': 'op', 'type': 'Const'},
        'end_data': {'shape': int64_array([1]), 'kind': 'data'},
        'stride_const_data': {'kind': 'data', 'value': int64_array([1])},
        'stride_const': {'kind': 'op', 'type': 'Const'},
        'stride_data': {'shape': int64_array([1]), 'kind': 'data'},
        'scales_const_data': {'kind': 'data', 'value': spatial_scales_value},
        'scales_const': {'kind': 'op', 'type': 'Const'},
        'scales_data': {'shape': int64_array([len(spatial_scales_value)]), 'kind': 'data'},
        'axes_const_data': {'kind': 'data', 'value': spatial_dims},
        'axes_const': {'kind': 'op', 'type': 'Const'},
        'axes_data': {'shape': int64_array([len(spatial_dims)]), 'kind': 'data'},
        'interpolate': attrs,
        'interpolate_data': {'shape': output_shape, 'kind': 'data'},
        'result': {'kind': 'op', 'type': 'Result'},
    }
    edges = [
        ('input', 'input_data'),
        ('input_data', 'interpolate', {'in': 0, 'out': 0}),
        ('input_data', 'shape_of', {'in': 0, 'out': 0}),
        ('shape_of', 'shape_of_data'),
        ('shape_of_data', 'shape_to_float'),
        ('shape_to_float', 'shape_to_float_data'),
        ('shape_to_float_data', 'mul', {'in': 0}),
        ('mul_scales_const_data', 'mul_scales_const'),
        ('mul_scales_const', 'mul_scales_data'),
        ('mul_scales_data', 'mul', {'in': 1}),
        ('mul', 'mul_data'),
        ('eps_const_data', 'eps_const'),
        ('eps_const', 'eps_data'),
        ('mul_data', 'add', {'in': 0}),
        ('eps_data', 'add', {'in': 1}),
        ('add', 'add_data'),
        ('add_data', 'floor'),
        ('floor', 'floor_data'),
        ('floor_data', 'to_int'),
        ('to_int', 'to_int_data'),
        ('to_int_data', 'strided_slice', {'in': 0}),
        ('strided_slice', 'strided_slice_data'),
        ('begin_const_data', 'begin_const'),
        ('begin_const', 'begin_data'),
        ('begin_data', 'strided_slice', {'in': 1}),
        ('end_const_data', 'end_const'),
        ('end_const', 'end_data'),
        ('end_data', 'strided_slice', {'in': 2}),
        ('stride_const_data', 'stride_const'),
        ('stride_const', 'stride_data'),
        ('stride_data', 'strided_slice', {'in': 3}),
        ('strided_slice_data', 'interpolate', {'in': 1}),
        ('scales_const_data', 'scales_const'),
        ('scales_const', 'scales_data'),
        ('scales_data', 'interpolate', {'in': 2}),
        ('axes_const_data', 'axes_const'),
        ('axes_const', 'axes_data'),
        ('axes_data', 'interpolate', {'in': 3}),
        ('interpolate', 'interpolate_data'),
        ('interpolate_data', 'result')
    ]

    return build_graph(nodes_attrs, edges)
예제 #18
0
    def transform_graph(self, graph: Graph, replacement_descriptions: dict):
        parameter_node = graph.get_op_nodes(op='Parameter')[0]
        parameter_node['data_type'] = data_type_str_to_np(
            parameter_node.graph.graph['cmd_params'].data_type)

        # remove existing Result operations to remove unsupported sub-graph
        graph.remove_nodes_from(
            [node.id
             for node in graph.get_op_nodes(op='Result')] + ['detections'])

        # determine if the op which is a input/final result of mean value and scale applying to the input tensor
        # then connect it to the input of the first convolution of the model, so we remove the image pre-processing
        # which includes padding and resizing from the model
        preprocessing_input_node_id = replacement_descriptions[
            'preprocessing_input_node']
        assert preprocessing_input_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
                                                           'should be a last node before image normalization and is specified' \
                                                           ' in the json file.'.format(preprocessing_input_node_id)
        preprocessing_input_node = Node(graph, preprocessing_input_node_id)
        consumer_node = preprocessing_input_node.out_port(
            0).get_connection().get_destination().node
        consumer_node.in_port(0).get_connection().set_source(
            parameter_node.out_port(0))

        preprocessing_output_node_id = replacement_descriptions[
            'preprocessing_output_node']
        assert preprocessing_output_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
                                                            'node should provide scaled image output and is specified' \
                                                            ' in the json file.'.format(preprocessing_output_node_id)
        preprocessing_output_node = Node(graph, preprocessing_output_node_id)
        preprocessing_output_node.out_port(0).disconnect()

        convolution_nodes = [
            n for n in graph.pseudo_topological_sort()
            if n.soft_get('type') == 'Convolution'
        ]
        convolution_nodes[0].in_port(0).get_connection().set_source(
            preprocessing_output_node.out_port(0))

        # create prior boxes (anchors) generator
        aspect_ratios = replacement_descriptions['aspect_ratios']
        assert len(aspect_ratios) % 2 == 0
        aspect_ratios = list(zip(aspect_ratios[::2], aspect_ratios[1::2]))
        priors_generator = self.AnchorGenerator(
            min_level=int(replacement_descriptions['min_level']),
            aspect_ratios=aspect_ratios,
            num_scales=int(replacement_descriptions['num_scales']),
            anchor_scale=replacement_descriptions['anchor_scale'])

        prior_boxes = []
        for i in range(100):
            inp_name = 'box_net/box-predict{}/BiasAdd'.format('_%d' %
                                                              i if i else '')
            if inp_name not in graph:
                break
            widths, heights = priors_generator.get(i)
            prior_box_op = PriorBoxClusteredOp(
                graph, {
                    'width': mo_array(widths),
                    'height': mo_array(heights),
                    'clip': 0,
                    'flip': 0,
                    'variance': replacement_descriptions['variance'],
                    'offset': 0.5
                })
            prior_boxes.append(
                prior_box_op.create_node(
                    [Node(graph, inp_name), parameter_node]))

        # concatenate prior box operations
        concat_prior_boxes = Concat(graph, {'axis': -1}).create_node()
        for idx, node in enumerate(prior_boxes):
            concat_prior_boxes.add_input_port(idx)
            concat_prior_boxes.in_port(idx).connect(node.out_port(0))

        conf = Sigmoid(graph, dict(name='concat/sigmoid')).create_node(
            [Node(graph, 'concat')])
        reshape_size_node = Const(graph, {
            'value': int64_array([0, -1])
        }).create_node([])
        logits = Reshape(graph, dict(name=conf.name + '/Flatten')).create_node(
            [conf, reshape_size_node])
        deltas = Reshape(graph, dict(name='concat_1/Flatten')).create_node(
            [Node(graph, 'concat_1'), reshape_size_node])

        # revert convolution boxes prediction weights from yxYX to xyXY (convolutions share weights and bias)
        weights = Node(graph, 'box_net/box-predict/pointwise_kernel')
        weights.value = weights.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(
            weights.shape)
        bias = Node(graph, 'box_net/box-predict/bias')
        bias.value = bias.value.reshape(-1,
                                        4)[:, [1, 0, 3, 2]].reshape(bias.shape)

        detection_output_node = DetectionOutput(
            graph,
            dict(
                name='detections',
                share_location=1,
                background_label_id=int(
                    replacement_descriptions['num_classes']) + 1,
                nms_threshold=replacement_descriptions['nms_threshold'],
                confidence_threshold=replacement_descriptions[
                    'confidence_threshold'],
                top_k=100,
                keep_top_k=100,
                code_type='caffe.PriorBoxParameter.CENTER_SIZE',
            )).create_node([deltas, logits, concat_prior_boxes])

        output_op = Result(graph, dict(name='output'))
        output_op.create_node([detection_output_node])
예제 #19
0
 def type_infer(node: Node):
     if node.has_valid('dst_type'):
         node.out_port(0).set_data_type(node.dst_type)
     else:
         node.out_port(0).set_data_type(
             data_type_str_to_np(node.graph.graph['cmd_params'].data_type))