コード例 #1
0
def replace_resize(graph: Graph, resize: Node):
    log.debug("Converting of ONNX Resize-11 to Interpolate-4 "
              "is triggered for node {}.".format(resize.soft_get('name', resize.id)))

    input_shape = resize.in_port(0).data.get_shape()
    input_rank = len(input_shape)
    resize_name = resize.soft_get('name', resize.id)
    if input_rank not in {4, 5}:
        log.warning('The input shape is not 4D or 5D for op with name {}'.format(resize_name))
        return

    num_of_inputs = len([port for port in resize.in_ports().values() if not port.disconnected()])
    assert num_of_inputs in {3, 4}, \
        "Number of inputs of ONNXResize (with name {}) should be equal to 3 or 4".format(resize_name)

    assert resize.soft_get('coordinate_transformation_mode') != 'tf_crop_and_resize', \
        'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(resize.op, resize_name)

    layout = graph.graph['layout']

    if input_rank == 4:
        begin_dim = get_height_dim(layout, input_rank)
        end_dim = get_width_dim(layout, input_rank) + 1
    else:
        begin_dim = get_depth_dim(layout, input_rank)
        end_dim = get_width_dim(layout, input_rank) + 1

    sizes_ss = create_op_with_const_inputs(graph, StridedSlice,
                                           {1: int64_array([begin_dim]),
                                            2: int64_array([end_dim]),
                                            3: int64_array([1])},
                                           {'name': resize_name + '/StridedSlice_sizes',
                                            'begin_mask': int64_array([1]),
                                            'end_mask': int64_array([1]),
                                            'new_axis_mask': int64_array([0]),
                                            'shrink_axis_mask': int64_array([0]),
                                            'ellipsis_mask': int64_array([0])})
    scales_ss = create_op_with_const_inputs(graph, StridedSlice,
                                            {1: int64_array([begin_dim]),
                                             2: int64_array([end_dim]),
                                             3: int64_array([1])},
                                            {'name': resize_name + '/StridedSlice_scales',
                                             'begin_mask': int64_array([1]),
                                             'end_mask': int64_array([1]),
                                             'new_axis_mask': int64_array([0]),
                                             'shrink_axis_mask': int64_array([0]),
                                             'ellipsis_mask': int64_array([0])})
    axes_node = Const(graph,
                      {'name': resize_name + '/axis',
                       'value': int64_array(np.arange(begin_dim, end_dim))}).create_node()

    shape_calculation_mode = 'scales' if num_of_inputs == 3 else 'sizes'

    interpolate_node = Interpolate(graph, {'version': 'opset4',
                                           'mode': convert_mode(resize.mode),
                                           'coordinate_transformation_mode': resize.coordinate_transformation_mode,
                                           'cube_coeff': resize.cube_coeff,
                                           'nearest_mode': resize.nearest_mode,
                                           'pads_begin': int64_array([0]),
                                           'pads_end': int64_array([0]),
                                           'antialias': 0,
                                           'shape_calculation_mode': shape_calculation_mode,
                                           'in_ports_count': 4}).create_node()

    axes_node.out_port(0).connect(interpolate_node.in_port(3))
    shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node()

    add_node = create_op_with_const_inputs(graph, Add,
                                           {1: float_array([1.0e-5])},
                                           {'name': resize_name + '/Add'})

    input_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

    if num_of_inputs == 3:
        cast_shape_to_float = Cast(graph, {'dst_type': input_data_type}).create_node()
        mul_node = Mul(graph, {'name': resize_name + '/Mul'}).create_node()
        shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
        cast_shape_to_float.out_port(0).connect(mul_node.in_port(0))
        cast_add_result_to_int = Cast(graph, {'dst_type': np.int64}).create_node()
        floor_node = Floor(graph, {'name': resize_name + '/Floor'}).create_node()
        mul_node.out_port(0).connect(add_node.in_port(0))
        add_node.out_port(0).connect(floor_node.in_port(0))
        floor_node.out_port(0).connect(cast_add_result_to_int.in_port(0))
        cast_add_result_to_int.out_port(0).connect(sizes_ss.in_port(0))
        sizes_ss.out_port(0).connect(interpolate_node.in_port(1))
        scales_ss.out_port(0).connect(interpolate_node.in_port(2))

        connection_of_resize_input = resize.in_port(0).get_connection()
        connection_of_resize_input.set_destination(interpolate_node.in_port(0))

        connection_of_scales = resize.in_port(2).get_connection()
        connection_of_scales.set_destination(scales_ss.in_port(0))

        connection_of_resize_input.get_source().connect(shape_of.in_port(0))
        connection_of_scales.get_source().connect(mul_node.in_port(1))
    else:
        cast_shape_to_float = Cast(graph, {'dst_type': input_data_type}).create_node()
        cast_sizes_to_float = Cast(graph, {'dst_type': input_data_type}).create_node()
        div_node = Div(graph, {'name': resize_name + '/Div'}).create_node()
        cast_sizes_to_float.out_port(0).connect(div_node.in_port(0))
        cast_shape_to_float.out_port(0).connect(div_node.in_port(1))
        shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
        div_node.out_port(0).connect(add_node.in_port(0))
        add_node.out_port(0).connect(scales_ss.in_port(0))
        scales_ss.out_port(0).connect(interpolate_node.in_port(2))
        sizes_ss.out_port(0).connect(interpolate_node.in_port(1))

        connection_of_resize_input = resize.in_port(0).get_connection()
        connection_of_resize_input.set_destination(interpolate_node.in_port(0))

        connection_of_sizes = resize.in_port(3).get_connection()
        connection_of_sizes.set_destination(sizes_ss.in_port(0))

        connection_of_resize_input.get_source().connect(shape_of.in_port(0))
        connection_of_sizes.get_source().connect(cast_sizes_to_float.in_port(0))

    rename_nodes([(resize, resize_name + '/delete'), (interpolate_node, resize_name)])
    resize.out_port(0).get_connection().set_source(interpolate_node.out_port(0))
コード例 #2
0
 def test_trailing_one(self):
     self.assertListEqual(list(match_shapes(int64_array([1, 32, 64, 60, 1]), int64_array([8, 4, 64, 3, 20]))), [1, 8, 4, 64, 3, 20, 1])
コード例 #3
0
 def test_many_to_one_with_trailing(self):
     self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([120, 1, 1]))), [2, 3, 4, 5, 1, 1])
コード例 #4
0
 def test_2(self):
     self.assertListEqual(list(split_input_permute_dimension(0, int64_array([0, 1, 3, 2]))), [0, 1, 2, 4, 3])
コード例 #5
0
 def test_basic(self):
     self.assertListEqual(list(match_shapes(int64_array([1, 32, 64, 60]), int64_array([8, 4, 64, 3, 20]))), [1, 8, 4, 64, 3, 20])
コード例 #6
0
 def test_2(self):
     self.assertListEqual(list(split_dims_indices(int64_array([8, 4, 64, 3, 20]), int64_array([1, 8, 4, 64, 3, 20, 1, 1]))), [0, 4, 4])
コード例 #7
0
 def test_6(self):
     self.assertListEqual(list(split_dims_indices(int64_array([1, 20, 64]), int64_array([1, 1, 20, 64]))), [1])
コード例 #8
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['op']

        if node.name == 'iteration_number_out':
            return

        # calculate length of context when state of inference becomes meaningful
        inputs = []
        for n in graph.get_op_nodes(**{'op': 'Parameter'}):
            inputs.append(n)

        in_nodes = []
        for inp in inputs:
            for ins in inp.out_port(0).get_destinations():
                in_nodes.append(ins.node.name)

        context_len = 1
        try:
            subgraph = invert_sub_graph_between_nodes(
                graph, [node.in_port(0).get_source().node.name], in_nodes)
        except Error:
            return

        for n in subgraph:
            n_node = Node(graph, n)
            if n_node.kind == 'op' and n_node.op == 'Splice':
                context_len += len(n_node.context) - 1

        if context_len == 1:
            return

        in_node_port = node.in_port(0).get_source()
        in_node_shape = node.in_port(0).data.get_shape()
        node.in_port(0).disconnect()

        # add Select before saving state to avoid saving garbage
        select_node = Select(graph, {
            'name': 'select_' + node.name
        }).create_node()
        zero_else = Const(graph, {
            'name': 'zero_else',
            'value': np.zeros(in_node_shape)
        }).create_node()
        select_node.in_port(1).connect(in_node_port)
        select_node.in_port(2).connect(zero_else.out_port(0))

        # check if we have already appropriate iteration counter
        existing_counters = find_pattern_matches(
            graph,
            nodes=[('mem_in',
                    dict(op='Memory',
                         index=1,
                         shape=int64_array([context_len]))),
                   ('mem_in_data', dict()),
                   ('crop_mem_in',
                    dict(op='Crop',
                         axis=int64_array([1]),
                         offset=int64_array([1]),
                         dim=int64_array([context_len - 1]))),
                   ('crop_mem_in_data', dict()),
                   ('concat', dict(op='Concat', axis=1)),
                   ('concat_data', dict()), ('const_1', dict(op='Const')),
                   ('const_1_data', dict()),
                   ('mem_out',
                    dict(op='Memory',
                         index=0,
                         shape=int64_array([context_len]))),
                   ('crop_out',
                    dict(op='Crop',
                         axis=int64_array([1]),
                         offset=int64_array([0]),
                         dim=int64_array([1]))), ('crop_out_data', dict()),
                   ('select', dict(op='Select'))],
            edges=[('mem_in', 'mem_in_data'), ('mem_in_data', 'crop_mem_in'),
                   ('crop_mem_in', 'crop_mem_in_data'),
                   ('crop_mem_in_data', 'concat', {
                       'in': 0
                   }), ('const_1', 'const_1_data'),
                   ('const_1_data', 'concat', {
                       'in': 1
                   }), ('concat', 'concat_data'), ('concat_data', 'mem_out'),
                   ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'),
                   ('crop_out_data', 'select')])
        counter_match = next(existing_counters, None)
        if counter_match is not None:
            input_port = Node(
                graph,
                inverse_dict(counter_match)['crop_out']).out_port(0)
        else:
            mem_out = Memory(
                graph, {
                    'name': 'iteration_number',
                    'size': 2,
                    'index': 1,
                    'id': 'iteration_' + node.name,
                    'shape': int64_array([context_len]),
                    'dst_type': np.int32
                }).create_node()
            cut_first = Crop(
                graph, {
                    'name': 'cut_first',
                    'axis': int64_array([1]),
                    'offset': int64_array([1]),
                    'dim': int64_array([context_len - 1])
                }).create_node()
            cut_first.in_port(0).connect(mem_out.out_port(0))
            ones = Const(graph, {
                'name': 'ones',
                'value': np.ones([1, 1], dtype=np.int32)
            }).create_node()
            concat = Concat(graph, {
                'name': 'concat_ones',
                'in_ports_count': 2,
                'axis': 1
            }).create_node()
            concat.in_port(0).connect(cut_first.out_port(0))
            concat.in_port(1).connect(ones.out_port(0))
            mem_in = Memory(
                graph, {
                    'name': 'iteration_number_out',
                    'size': 2,
                    'index': 0,
                    'id': 'iteration_' + node.name,
                    'shape': int64_array([context_len])
                }).create_node()
            mem_in.in_port(0).connect(concat.out_port(0))
            res = Result(graph, {}).create_node()
            mem_in.out_port(0).connect(res.in_port(0))
            cut_last = Crop(
                graph, {
                    'name': 'cut_last',
                    'axis': int64_array([1]),
                    'offset': int64_array([0]),
                    'dim': int64_array([1])
                }).create_node()
            cut_last.in_port(0).connect(concat.out_port(0))
            input_port = cut_last.out_port(0)

        select_node.in_port(0).connect(input_port)
        select_node.out_port(0).connect(node.in_port(0))
        select_node.out_port(0).data.set_shape(in_node_shape)
コード例 #9
0
    def replace_pattern(self, graph: Graph, match: [str, Node]):
        node = match['crop']
        assert node.has_valid('axis')
        node_axis = self.list_to_ndarray(node.axis)

        in_shape = node.in_port(0).data.get_shape()
        shape_rank = in_shape.size
        axis_mask = int64_array(
            [1 if i in node_axis else 0 for i in range(shape_rank)])
        begin_mask = axis_mask.copy()
        end_mask = axis_mask.copy()

        ss = StridedSlice(
            graph, {
                'name': node.soft_get('name', node.id) + '/strided_slice',
                'begin_mask': begin_mask,
                'end_mask': end_mask,
                'new_axis_mask': np.zeros(len(end_mask)),
                'shrink_axis_mask': np.zeros(len(end_mask)),
                'ellipsis_mask': np.zeros(len(end_mask))
            }).create_node()

        if len(node.in_nodes()) == 2 and node.has_valid('offset'):
            # Crop Type 1
            begin = Const(
                graph, {
                    'value':
                    self.mask_normalizer(shape_rank, node_axis, node.offset),
                    'name':
                    ss.name + '/begin'
                }).create_node()
            shape = Shape(graph, {
                'name': ss.name + '/shape_of_crop'
            }).create_node()
            end = Add(graph, {'name': ss.name + '/end'}).create_node()
            node.in_port(1).get_connection().get_source().connect(
                shape.in_port(0))
            node.in_port(1).disconnect()
            shape.out_port(0).connect(end.in_port(0))
            begin.out_port(0).connect(end.in_port(1))
        elif node.has_valid('dim') and node.has_valid('offset'):
            # Crop Type 2
            node_dim = self.list_to_ndarray(node.dim)
            node_offset = self.list_to_ndarray(node.offset)
            assert node_dim.size == node_offset.size == node_axis.size

            begin = Const(
                graph, {
                    'value':
                    self.mask_normalizer(shape_rank, node_axis, node_offset),
                    'name':
                    ss.name + '/begin'
                }).create_node()
            end_values = np.array(
                [node_offset[i] + node_dim[i] for i in range(len(node_dim))])
            end = Const(
                graph, {
                    'value':
                    self.mask_normalizer(shape_rank, node_axis, end_values),
                    'name':
                    ss.name + '/end'
                }).create_node()
        elif node.has_valid('crop_begin') and node.has_valid('crop_end'):
            # Crop Type 3
            node_crop_begin = self.list_to_ndarray(node.crop_begin)
            node_crop_end = self.list_to_ndarray(node.crop_end)
            assert len(node_crop_begin) == len(node_crop_end) == len(node_axis)

            begin = Const(
                graph, {
                    'value':
                    self.mask_normalizer(shape_rank, node_axis,
                                         node_crop_begin),
                    'name':
                    ss.name + '/begin'
                }).create_node()
            shape = Shape(graph, {'name': ss.name + '/shape'}).create_node()

            end = Add(graph, {'name': ss.name + '/end'}).create_node()
            const = Const(
                graph, {
                    'value':
                    -1 *
                    self.mask_normalizer(shape_rank, node_axis, node_crop_end),
                    'name':
                    ss.name + '/const'
                }).create_node()

            node.in_port(0).get_connection().get_source().connect(
                shape.in_port(0))
            shape.out_port(0).connect(end.in_port(0))
            const.out_port(0).connect(end.in_port(1))

        else:
            raise Exception("Unknown type of Crop")

        source = node.in_port(0).get_connection().get_source()

        stride = Const(
            graph, {
                'value': np.ones(shape_rank, dtype=np.int64),
                'name': ss.name + '/stride'
            }).create_node()

        source.connect(ss.in_port(0))
        begin.out_port(0).connect(ss.in_port(1))
        end.out_port(0).connect(ss.in_port(2))
        stride.out_port(0).connect(ss.in_port(3))

        node.in_port(0).disconnect()
        node.out_port(0).get_connection().set_source(ss.out_port(0))

        ss['force_precision_in_ports'] = {1: 'int64', 2: 'int64', 3: 'int64'}
コード例 #10
0
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import numpy as np
import unittest

from extensions.front.mxnet.gluoncv_ssd_anchors import SsdAnchorsReplacer
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph

nodes_attributes = {
    'slice_like': {'kind': 'op', 'op': 'slice_like'},
    'model_reshape0': {'kind': 'op', 'op': 'Reshape'},
    'model_reshape0_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1, 4])},
    'model_reshape1': {'kind': 'op', 'op': 'Reshape'},
    'model_reshape1_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1, 4])},
    'model_reshape2': {'kind': 'op', 'op': 'Reshape'},
    'model_reshape2_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1])},
    'reshape0': {'kind': 'op', 'op': 'Reshape'},
    'reshape0_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1])},
    'concat': {'kind': 'op', 'op': 'Concat'},
    'reshape1': {'kind': 'op', 'op': 'Reshape'},
    'reshape1_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, 2, -1])},
    'split': {'kind': 'op', 'op': 'Split', 'num_splits': 2},
    'split_const': {'kind': 'op', 'op': 'Const', 'value': int64_array(1)},
    'reshape2': {'kind': 'op', 'op': 'Reshape'},
    'reshape2_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([-1, 4])},
    'value': {'kind': 'op', 'op': 'Split', 'num_splits': 4},
    'value_const': {'kind': 'op', 'op': 'Const', 'value': int64_array(1)},
    'div_1': {'kind': 'op', 'op': 'Div'},
コード例 #11
0
    def replace_sub_graph(self, graph: Graph, match: Dict[str, Node]):
        node = match['op']
        name = node.name

        min_port_tuple = (node.in_port(1).get_source().node,
                          node.in_port(1).get_source().idx)
        max_port_tuple = (node.in_port(2).get_source().node,
                          node.in_port(2).get_source().idx)

        node.in_port(1).disconnect()
        node.in_port(2).disconnect()

        # make sure min < max
        min_less_max = Less(graph, {
            'name': name + '/if_min_less_max'
        }).create_node([min_port_tuple, max_port_tuple])
        minimum = Select(graph, {
            'name': name + '/minimum'
        }).create_node([min_less_max, min_port_tuple, max_port_tuple])
        maximum = Select(graph, {
            'name': name + '/maximum'
        }).create_node([min_less_max, max_port_tuple, min_port_tuple])

        # to create zero of limits data type, we multiply it by integer zero
        zero = create_op_node_with_second_input(graph,
                                                Mul,
                                                int64_array(0),
                                                {'name': name + '/zero'},
                                                input_node=minimum)

        # if 0 < min < max: min_adj = 0 and max_adj = max - min
        min_greater_zero = Greater(graph, {
            'name': name + '/if_minimum_greater_zero'
        }).create_node([minimum, zero])
        max_minus_min = Sub(graph, {
            'name': name + '/max_minus_min'
        }).create_node([maximum, minimum])
        minimum = Select(graph, {
            'name': name + '/first_adj_min'
        }).create_node([min_greater_zero, zero, minimum])
        maximum = Select(graph, {
            'name': name + '/first_adj_max'
        }).create_node([min_greater_zero, max_minus_min, maximum])

        # if min < max < 0: min_adj = min - max and max_adj = 0
        max_less_zero = Less(graph, {
            'name': name + '/if_max_less_zero'
        }).create_node([maximum, zero])
        min_minus_max = Sub(graph, {
            'name': name + '/min_minus_max'
        }).create_node([minimum, maximum])
        minimum = Select(graph, {
            'name': name + '/second_adj_min'
        }).create_node([max_less_zero, min_minus_max, minimum])
        maximum = Select(graph, {
            'name': name + '/second_adj_max'
        }).create_node([max_less_zero, zero, maximum])

        # scale = (max - min) / (2 ^ num_bits - 1),
        float_range = Sub(graph, {
            'name': name + '/float_range'
        }).create_node([maximum, minimum])
        quant_min_value, quant_max_value = int(
            node.narrow_range), 2**node.num_bits - 1
        int_range = Const(
            graph,
            dict(name=name + '/int_range',
                 value=quant_max_value - quant_min_value)).create_node()
        scale = Div(graph, {
            'name': name + '/scale'
        }).create_node([float_range, int_range])
        # min_adj = scale * round(min / scale)
        descaled_min = Div(graph, {
            'name': name + '/descaled_min'
        }).create_node([minimum, scale])
        rounded_descaled_min = Round(graph, {
            'name': name + '/rounded_descaled_min'
        }).create_node([descaled_min])
        min_adj = Mul(graph, {
            'name': name + '/min_adj'
        }).create_node([scale, rounded_descaled_min])
        # max_adj = max + min_adj - min.
        adjustment = Sub(graph, {
            'name': name + '/limits_adjustment'
        }).create_node([min_adj, minimum])
        max_adj = Add(graph, {
            'name': name + '/max_adj'
        }).create_node([maximum, adjustment])

        # FakeQuantize operation has 5 inputs instead of 3 inputs in TensorFlow
        node.add_input_port(3, skip_if_exist=True)
        node.add_input_port(4, skip_if_exist=True)

        node.in_port(1).connect(min_adj.out_port(0))
        node.in_port(2).connect(max_adj.out_port(0))
        node.in_port(3).connect(min_adj.out_port(0))
        node.in_port(4).connect(max_adj.out_port(0))

        FakeQuantize.update_node_stat(node, {'levels': node['levels']})
コード例 #12
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [
            port for port in node.in_ports().values()
            if not port.disconnected()
        ]
        assert len(connected_in_ports) == 2, \
            "Incorrect number of inputs for {} node".format(node_name)

        data_shape = node.in_port(0).data.get_shape()
        data_value = node.in_port(0).data.get_value()
        indices_shape = node.in_port(1).data.get_shape()
        indices_value = node.in_port(1).data.get_value()

        assert node.has_valid(
            'batch_dims'
        ), "Node {} must contain `batch_dims` attribute".format(node_name)
        batch_dims = node.batch_dims

        # check that a number of batch dimensions is less than both ranks of data and indices tensors
        assert batch_dims < len(
            data_shape
        ), "Number of batch dimensions must be less than a rank of data"
        assert batch_dims < len(
            indices_shape
        ), "Number of batch dimensions must be less than a rank of indices"

        # check that batch dimensions of data and indices are the same
        for batch_dim in range(batch_dims):
            assert data_shape[batch_dim] == indices_shape[batch_dim], \
                "The dimension {} for data and indices tensors must be the same".format(batch_dim)

        # check ranks of input tensors
        assert len(data_shape) > 0, "Data must not be a scalar"
        assert len(indices_shape) > 0, "Indices must not be a scalar"
        assert (batch_dims + indices_shape[-1]) <= len(data_shape), \
            "Length of a tuple with indices must not exceed a rank of data tensor excluding batch dimensions"

        # compute output shape
        number_batches = [np.prod(data_shape[:batch_dims]).tolist()
                          ] if batch_dims > 0 else list()
        slice_shape = list(data_shape[(batch_dims + indices_shape[-1]):])
        output_shape = number_batches + list(
            indices_shape[batch_dims:-1]) + slice_shape
        node.out_port(0).data.set_shape(int64_array(output_shape))

        # compute output value if all input values are defined
        if data_value is not None and indices_value is not None:
            output_value = np.zeros(output_shape, dtype=data_value.dtype)
            if batch_dims == 0:
                output_indices_range = int64_array(indices_shape[:-1])
                for output_index in np.ndindex(tuple(output_indices_range)):
                    indices_tuple = indices_value[output_index]
                    output_value[output_index] = data_value[tuple(
                        indices_tuple.T)]
            else:
                batch_dims_range = int64_array(indices_shape[:batch_dims])
                for batch_indices in np.ndindex(tuple(batch_dims_range)):
                    # compute batch index in output tensor
                    batch_ind = 0
                    num_elements = 1
                    for ind in reversed(range(len(batch_dims_range))):
                        batch_ind += batch_indices[ind] * num_elements
                        num_elements *= batch_dims_range[ind]
                    output_indices_range = int64_array(
                        indices_shape[batch_dims:-1])
                    for output_index in np.ndindex(
                            tuple(output_indices_range)):
                        tmp_ind = batch_indices + output_index
                        indices_tuple = tuple(indices_value[tmp_ind].T)
                        full_input_ind = batch_indices + indices_tuple
                        full_output_ind = tuple(np.array([batch_ind
                                                          ]).T) + output_index
                        output_value[full_output_ind] = data_value[
                            full_input_ind]
            node.out_port(0).data.set_value(output_value)
コード例 #13
0
ファイル: perm_inputs.py プロジェクト: xinfushe/openvino
def order(op_node: Node, port_info: str, input_port: int):
    """
        Performs layout change related transformation of the data on the in_port_idx port of op_node.
        Translates ordered shape indexes from one layout to another according to permutation

        Transformation inserts two Gather operations

        1 Gather reorders data to new layout according to direct permutation:
            actual data to translate as 1-port input indexes of Gather and
            permutation as 0-port input data
        2 Gather translates shape indexes from one layout to another according to inverse permutation
            permutation as 0-port input data and
            actual data to translate as 1-port input indexes of Gather

    For example:
        NHWC Transpose operation has 0-port input with data of shape [1, 2, 3, 4] and
        1-port input with new order indices [0, 1, 3, 2].

        After translating such operation to NCHW layout:
            0-port input shape = [1, 4, 2, 3]

        1 phase (after first Gather insertion):
            1-port input order indices = [0, 2, 1, 3]
        2 phase (after second Gather insertion):
            1-port input order indices = [0, 3, 2, 1]
    """
    graph = op_node.graph
    permutation_data_node = get_node_with_permutation(op_node, port_info)
    assert permutation_data_node.has_and_set('permutation'), 'Data node "{}" does not have permutation for node {}, ' \
                                                             'port_info "{}".'.format(permutation_data_node.id,
                                                                                      op_node.id, port_info)
    permutation = permutation_data_node.permutation
    if len(permutation.perm) == 0:
        return

    data_node = op_node.in_node(input_port)

    gather_name = op_node.soft_get('name', op_node.id) + '/OrderGather_1'
    const = Const(
        graph, {
            'value': permutation.perm,
            'name': gather_name + '/const',
            'need_shape_inference': True
        }).create_node_with_data()
    axis_const = Const(graph, {
        'value': int64_array(0),
        'name': gather_name + '/axis'
    }).create_node_with_data()
    gather = Gather(graph, {
        'name': gather_name,
        'need_shape_inference': True
    }).create_node_with_data([data_node, const, axis_const])

    gather_1_name = op_node.soft_get('name', op_node.id) + '/OrderGather_2'
    const_1 = Const(
        graph, {
            'value': permutation.inv,
            'name': gather_1_name + '/const',
            'need_shape_inference': True
        }).create_node_with_data()
    axis_const_1 = Const(graph, {
        'value': int64_array(0),
        'name': gather_1_name + '/axis'
    }).create_node_with_data()
    gather_1 = Gather(graph, {
        'name': gather_1_name,
        'need_shape_inference': True
    }).create_node_with_data([const_1, gather, axis_const_1])

    attrs = graph.get_edge_data(data_node.id, op_node.id, key=0).copy()
    graph.add_edge(gather_1.id, op_node.id, **attrs)
    graph.remove_edge(data_node.id, op_node.id)
    op_node['need_shape_inference'] = True
コード例 #14
0
    def create_normalize_l2_net_non_fusable(shape, axes, output_axes,
                                            ir_version, use_new_frontend):
        tf_net = TestNormalizeL2.build_tf_graph(shape, axes)

        reduced_shape = permute_nchw_to_nhwc(shape).copy()
        for axis in axes:
            reduced_shape[axis] = 1
        reduced_shape = permute_nchw_to_nhwc(reduced_shape)

        eltwise_shapes = int64_array(np.ones(len(shape)))
        nodes_attributes = {
            'input': {
                'kind': 'op',
                'type': 'Parameter'
            },
            'input_data': {
                'shape': shape,
                'kind': 'data'
            },
            'power_const_input_data': {
                'shape': int64_array([1]),
                'kind': 'data',
                'value': np.array([2.0])
            },
            'power_const': {
                'kind': 'op',
                'type': 'Const'
            },
            'power_const_data': {
                'shape': eltwise_shapes,
                'kind': 'data'
            },
            'power': {
                'kind': 'op',
                'type': 'Power'
            },
            'power_data': {
                'shape': shape,
                'kind': 'data'
            },
            'reduce': {
                'kind': 'op',
                'type': 'ReduceSum',
                'keep_dims': True
            },
            'reduce_data': {
                'shape': reduced_shape,
                'kind': 'data'
            },
            'reduce_axes_input_data': {
                'shape': int64_array([len(axes)]),
                'kind': 'data',
                'value': int64_array(output_axes)
            },
            'reduce_axes': {
                'kind': 'op',
                'type': 'Const'
            },
            'reduce_axes_data': {
                'shape': int64_array([len(axes)]),
                'kind': 'data'
            },
            'maximum_const_input_data': {
                'shape': int64_array([1]),
                'kind': 'data',
                'value': np.array([1e-12])
            },
            'maximum_const': {
                'kind': 'op',
                'type': 'Const'
            },
            'maximum_const_data': {
                'shape': eltwise_shapes,
                'kind': 'data'
            },
            'maximum': {
                'kind': 'op',
                'type': 'Maximum'
            },
            'maximum_data': {
                'shape': reduced_shape,
                'kind': 'data'
            },
            'power2_const_input_data': {
                'shape': int64_array([1]),
                'kind': 'data',
                'value': np.array([-0.5])
            },
            'power2_const': {
                'kind': 'op',
                'type': 'Const'
            },
            'power2_const_data': {
                'shape': eltwise_shapes,
                'kind': 'data'
            },
            'power2': {
                'kind': 'op',
                'type': 'Power'
            },
            'power2_data': {
                'shape': reduced_shape,
                'kind': 'data'
            },
            'multiply': {
                'kind': 'op',
                'type': 'Multiply'
            },
            'multiply_data': {
                'shape': shape,
                'kind': 'data'
            },
            'result': {
                'kind': 'op',
                'type': 'Result'
            },
        }

        ref_net = build_graph(nodes_attributes, [
            ('input', 'input_data'),
            ('input_data', 'power', {
                'out': 0,
                'in': 0
            }),
            ('power_const_input_data', 'power_const'),
            ('power_const', 'power_const_data'),
            ('power_const_data', 'power', {
                'out': 0,
                'in': 1
            }),
            ('power', 'power_data'),
            ('power_data', 'reduce', {
                'out': 0,
                'in': 0
            }),
            ('reduce_axes_input_data', 'reduce_axes'),
            ('reduce_axes', 'reduce_axes_data'),
            ('reduce_axes_data', 'reduce', {
                'out': 0,
                'in': 1
            }),
            ('reduce', 'reduce_data'),
            ('reduce_data', 'maximum', {
                'out': 0,
                'in': 0
            }),
            ('maximum_const_input_data', 'maximum_const'),
            ('maximum_const', 'maximum_const_data'),
            ('maximum_const_data', 'maximum', {
                'out': 0,
                'in': 1
            }),
            ('maximum', 'maximum_data'),
            ('maximum_data', 'power2', {
                'out': 0,
                'in': 0
            }),
            ('power2_const_input_data', 'power2_const'),
            ('power2_const', 'power2_const_data'),
            ('power2_const_data', 'power2', {
                'out': 0,
                'in': 1
            }),
            ('power2', 'power2_data'),
            ('input_data', 'multiply', {
                'out': 0,
                'in': 0
            }),
            ('power2_data', 'multiply', {
                'out': 0,
                'in': 1
            }),
            ('multiply', 'multiply_data'),
            ('multiply_data', 'result'),
        ])

        if use_new_frontend:
            ref_net = None
        return tf_net, ref_net
コード例 #15
0
 def test_not_matchabale_shapes(self):
     self.assertIsNone(match_shapes(int64_array([5, 7]), int64_array([7, 5])))
コード例 #16
0
    def find_and_replace_pattern(self, graph: Graph):
        reverse_nodes = graph.get_op_nodes(op='Reverse')
        for reverse in reverse_nodes:
            reverse_name = reverse.soft_get('name', reverse.id)

            assert reverse.in_port(1).disconnected()
            assert reverse.has_valid('axis')

            in_shape_rank = len(reverse.in_port(0).data.get_shape())
            # 1. Add new dimension as batch for rank = 1 to have batch != seq_axis
            if in_shape_rank == 1:
                unsq_node = create_op_node_with_second_input(
                    graph, Unsqueeze, int64_array([0]),
                    {'name': reverse_name + "/Unsqueeze"})
                reverse.in_port(0).get_source().connect(unsq_node.in_port(0))
                new_in = unsq_node.out_port(0)
                batch_axis = 0
                seq_axis = 1
            else:
                new_in = reverse.in_port(0).get_source()
                seq_axis = reverse['axis']
                batch_axis = 0 if seq_axis != 0 else 1

            # 2. For ReverseSequence 1-port input is seq_lengths => create this input node as
            # shape[seq_axis] broadcasted to shape[batch_axis]
            # in ---> ShapeOf ----> Gather(seq_axis)  ----> Broadcast----->
            #            |                                      |
            #            | -------> Gather(batch_axis)----------|
            shape_node = Shape(graph, {
                'name': reverse_name + "/Shape"
            }).create_node()
            new_in.connect(shape_node.in_port(0))
            seq_axis_node = node_to_get_shape_value_of_indices(
                shape_node, [seq_axis])
            batch_node = node_to_get_shape_value_of_indices(
                shape_node, [batch_axis])
            broadcast_node = Broadcast(graph, {
                'name': reverse_name + "/Broadcast"
            }).create_node()
            broadcast_node.in_port(0).connect(seq_axis_node.out_port(0))
            broadcast_node.in_port(1).connect(batch_node.out_port(0))

            # 3. Create new ReverseSequence node and reconnect all inputs/outputs to it
            rename_node(reverse, reverse_name + '/to_delete')
            reverse_sequence = ReverseSequence(
                graph, {
                    'name': reverse_name,
                    'seq_axis': seq_axis,
                    'batch_axis': batch_axis
                }).create_node()
            reverse_sequence.in_port(0).connect(new_in)
            reverse_sequence.in_port(1).connect(broadcast_node.out_port(0))

            # 4. remove added dimension for rank = 1
            if in_shape_rank == 1:
                rename_node(reverse_sequence,
                            reverse_name + '/ReverseSequence')
                squeeze_node = create_op_node_with_second_input(
                    graph, Squeeze, int64_array([0]), {'name': reverse_name})
                squeeze_node.in_port(0).connect(reverse_sequence.out_port(0))
                reverse.out_port(0).get_connection().set_source(
                    squeeze_node.out_port(0))
            else:
                reverse.out_port(0).get_connection().set_source(
                    reverse_sequence.out_port(0))

        # 5. Delete old Reverse node
        graph.remove_nodes_from([reverse.id for reverse in reverse_nodes])
コード例 #17
0
 def test_1(self):
     self.assertListEqual(list(split_dims_indices(int64_array([1, 32, 64, 60]), int64_array([1, 8, 4, 64, 3, 20]))), [1, 3])
コード例 #18
0
    def test(self):
        nodes_attributes = {
            'switch_2_input': {
                'shape': int64_array([1, 3]),
                'type': 'Parameter',
                'kind': 'op',
                'op': 'Parameter'
            },
            'switches_input': {
                'shape': int64_array([1, 3]),
                'type': 'Parameter',
                'kind': 'op',
                'op': 'Parameter'
            },
            'switch_input_0': {
                'kind': 'op',
                'op': 'SomeOp'
            },
            'switch_1_input_0': {
                'kind': 'op',
                'op': 'SomeOp'
            },
            'switch': {
                'kind': 'op',
                'op': 'Switch'
            },
            'switch_1': {
                'kind': 'op',
                'op': 'Switch'
            },
            'switch_2': {
                'kind': 'op',
                'op': 'Switch'
            },
            'some_op': {
                'kind': 'op',
                'op': 'Max'
            },
            'identity': {
                'kind': 'op',
                'op': 'Identity'
            },
            'merge': {
                'kind': 'op',
                'op': 'Merge'
            },
            'select': {
                'kind': 'op',
                'op': 'Select'
            },
            'last': {
                'type': None,
                'value': None,
                'kind': 'op',
                'op': 'Result'
            },
        }

        # check two cases when switch_2 goes to 0-th and 1-st input port of the Merge
        for merge_input_port in range(2):
            graph = build_graph(nodes_attributes, [
                ('switch_2_input', 'switch_2', {
                    'in': 0
                }),
                ('switch_input_0', 'switch', {
                    'in': 0
                }),
                ('switch_1_input_0', 'switch_1', {
                    'in': 0
                }),
                ('switches_input', 'switch', {
                    'in': 1,
                    'out': 0
                }),
                ('switches_input', 'switch_1', {
                    'in': 1,
                    'out': 0
                }),
                ('switches_input', 'switch_2', {
                    'in': 1,
                    'out': 0
                }),
                ('switch', 'some_op', {
                    'in': 0
                }),
                ('switch_1', 'some_op', {
                    'in': 1
                }),
                ('some_op', 'identity', {
                    'in': 0
                }),
                ('switch_2', 'merge', {
                    'in': merge_input_port
                }),
                ('identity', 'merge', {
                    'in': 1 - merge_input_port
                }),
                ('merge', 'last', {
                    'in': 0
                }),
            ],
                                nodes_with_edges_only=True)
            graph.stage = 'front'
            SwitchMergeOptimization().find_and_replace_pattern(graph)

            graph_ref = build_graph(nodes_attributes, [
                ('switches_input', 'select', {
                    'in': 0
                }),
                ('switch_2_input', 'select', {
                    'in': 1
                }),
                ('switch_input_0', 'some_op', {
                    'in': 0
                }),
                ('switch_1_input_0', 'some_op', {
                    'in': 1
                }),
                ('some_op', 'identity', {
                    'in': 0
                }),
                ('identity', 'select', {
                    'in': 2
                }),
                ('select', 'last', {
                    'in': 0
                }),
            ],
                                    nodes_with_edges_only=True)

            (flag, resp) = compare_graphs(graph,
                                          graph_ref,
                                          'last',
                                          check_op_attrs=True)
            self.assertTrue(flag, resp)
コード例 #19
0
 def test_4(self):
     self.assertListEqual(list(split_dims_indices(int64_array([120, 1]), int64_array([2, 3, 4, 5, 1]))), [0, 0, 0])
コード例 #20
0
    def find_and_replace_pattern(self, graph: Graph):
        shape_ops = graph.get_op_nodes(op='ShapeOf')

        # 1. Inserting Gather to N*C format on constant shape paths
        for shape in shape_ops:
            source_port = shape.in_port(0).get_source()
            if is_output_data_in_correct_layout(source_port.node,
                                                source_port.idx):
                continue  # data is already in N*C format

            name = shape.soft_get('name', shape.id)
            rank = source_port.data.get_shape().size

            if rank in [4, 5]:
                index = int64_array([0, *list(range(2, rank)), 1])
            else:
                continue  # data is layout independent

            gather = create_op_with_const_inputs(
                graph,
                op=Gather,
                port_value_dict={
                    1: index,
                    2: int64_array(0)
                },
                op_attrs={'name': name + '/GatherNCHWtoNHWC'})
            shape.out_port(0).get_connection().insert_node(gather)

        # 2. Inserting Gather/Transpose to NC* format
        shape_sub_graph_end_points = self.find_shape_subgraph_endpoints(
            [shape.out_port(0) for shape in shape_ops])
        for in_port in shape_sub_graph_end_points:
            name = in_port.node.soft_get('name', in_port.node.id)
            shape = in_port.data.get_shape()

            should_switch_layout = not any([
                is_output_data_in_correct_layout(port.node, port.idx)
                for port in in_port.node.out_ports().values()
                if not port.disconnected()
            ])
            should_insert_gather = should_switch_layout and len(
                shape) == 1 and shape.item(0) in [4, 5]
            should_insert_transpose = should_switch_layout and len(shape) in [
                4, 5
            ]

            if should_insert_gather:
                # we should turn input permutation off to perform it with the following gather insertion
                in_port.__setattr__('input_permutation', None)
                index = int64_array(
                    [0,
                     shape.item(0) - 1, *list(range(1,
                                                    shape.item(0) - 1))])
                gather = create_op_with_const_inputs(
                    graph,
                    op=Gather,
                    port_value_dict={
                        1: index,
                        2: int64_array(0)
                    },
                    op_attrs={'name': name + '/GatherNHWCtoNCHW'})
                in_port.get_connection().insert_node(gather)
            elif should_insert_transpose:
                # we should turn input permutation off to perform it with the following transpose insertion
                in_port.__setattr__('input_permutation', None)
                order = int64_array(
                    [0, len(shape) - 1, *list(range(1,
                                                    len(shape) - 1))])
                transpose = create_op_with_const_inputs(
                    graph,
                    op=Transpose,
                    port_value_dict={1: order},
                    op_attrs={
                        'name': name + '/TransposeNHWCtoNCHW',
                        'override_output_shape': True
                    })
                mark_input_as_in_correct_layout(transpose, 0)
                mark_output_as_in_correct_layout(transpose, 0)
                in_port.get_connection().insert_node(transpose)
            else:
                continue  # data is layout independent
コード例 #21
0
 def test_1(self):
     self.assertListEqual(list(split_input_permute_dimension(1, int64_array([0, 2, 3, 1]))), [0, 3, 4, 1, 2])
コード例 #22
0
    def extract(node):
        pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array)
        auto_pad = onnx_attr(node,
                             'auto_pad',
                             's',
                             default=None,
                             dst_type=get_onnx_autopad)

        if pads is not None:
            if len(pads) % 2 != 0:
                raise Error(
                    'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.',
                    node.soft_get('name'), pads)
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)

        final_pads = int64_array([[0, 0], [0, 0], *pads
                                  ]) if pads is not None else None

        dilations = onnx_attr(node, 'dilations', 'ints', default=None)
        final_dilations = int64_array([1, 1, *dilations
                                       ]) if dilations is not None else None

        strides = onnx_attr(node, 'strides', 'ints', default=None)
        final_strides = int64_array([1, 1, *strides
                                     ]) if strides is not None else None

        kernel_shape = onnx_attr(node,
                                 'kernel_shape',
                                 'ints',
                                 dst_type=int64_array)

        if kernel_shape is None:
            raise Error(
                'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.',
                node.soft_get('name'))

        output_padding = onnx_attr(node,
                                   'output_padding',
                                   'ints',
                                   default=None)
        final_output_padding = int64_array(
            [0, 0, *output_padding]) if output_padding is not None else None

        output_shape = onnx_attr(node,
                                 'output_shape',
                                 'ints',
                                 default=None,
                                 dst_type=int64_array)

        attrs = {
            'type':
            'Deconvolution',
            'op':
            'Deconv2D',
            'auto_pad':
            auto_pad,
            'bias_addable':
            True,
            'bias_term':
            None,  # will be deduced later; not really needed
            'pad':
            final_pads,
            'dilation':
            final_dilations,
            'output_spatial_shape':
            output_shape,
            'output_shape':
            None,
            'output_padding':
            final_output_padding,
            'stride':
            final_strides,
            'group':
            onnx_attr(node, 'group', 'i', default=1),
            'output':
            None,
            'spatial_dims':
            None,  # Will be calculated in infer function
            'channel_dims':
            int64_array([1]),
            'batch_dims':
            int64_array([0]),
            'layout':
            'NCHW',
            'input_feature_channel':
            0,
            'output_feature_channel':
            1,
            'get_pad':
            ConvTransposeFrontExtractor.get_pad,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel] * node.
            group,
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return __class__.enabled
コード例 #23
0
 def test_5(self):
     self.assertListEqual(list(split_input_permute_dimension(3, int64_array([0, 1, 2, 3]))), [0, 1, 2, 3, 4])
コード例 #24
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        if not check_applicability(match):
            return

        reshape = match['reshape']
        div_name = match['division'].name

        input_shape = Shape(graph, dict(name=div_name +
                                        '/shape/MVN_T_')).create_node()
        shape_of_reshape = reshape.in_port(
            1).get_connection().get_source().node.value
        c1, c2 = shape_of_reshape[1], shape_of_reshape[2]
        c = c1 * c2

        new_reshape = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, 0, 0, c1, c2]),
            dict(name=div_name + '/first_reshape/MVN_T_'))
        permute_order = int64_array([0, 1, 2, 4, 3])
        first_permute = create_op_node_with_second_input(
            graph, Transpose, permute_order,
            dict(name=div_name + '/first_permute/MVN_T_'), new_reshape)

        add = match['add']
        variance = match['variance']
        eps_port_num = 0 if add.in_port(
            0).get_connection().get_source().node.id != variance.id else 1
        eps = add.in_port(eps_port_num).get_connection().get_source().node
        mvn_node = create_op_with_const_inputs(
            graph, MVN, {1: int64_array([1, 2, 3])},
            dict(name=div_name + '/MVN/MVN_T_',
                 eps=eps.value,
                 normalize_variance=1,
                 eps_mode='inside_sqrt'))
        first_permute.out_port(0).connect(mvn_node.in_port(0))

        second_permute = create_op_node_with_second_input(
            graph, Transpose, permute_order,
            dict(name=div_name + '/second_permute/MVN_T_'), mvn_node)
        new_reshape2 = Reshape(graph,
                               dict(name=div_name +
                                    '/second_reshape/MVN_T_')).create_node()
        second_permute.out_port(0).connect(new_reshape2.in_port(0))
        gamma_val = np.reshape(
            match['gamma_identity'].in_port(
                0).get_connection().get_source().node.value,
            int64_array([1, 1, 1, c]))
        new_mul = create_op_node_with_second_input(
            graph, Mul, gamma_val, dict(name=match['mul'].name + '/MVN_T_'),
            new_reshape2)
        beta_val = np.reshape(
            match['beta_identity'].in_port(
                0).get_connection().get_source().node.value,
            int64_array([1, 1, 1, c]))
        new_add2 = create_op_node_with_second_input(
            graph, Add, beta_val, dict(name=match['add2'].name + '/MVN_T_'),
            new_mul)

        transpose_connection = match['transpose'].in_port(0).get_connection()
        before_transpose = transpose_connection.get_source().node
        transpose_connection.set_destination(new_reshape.in_port(0))
        input_shape.out_port(0).connect(new_reshape2.in_port(1))
        before_transpose.out_port(0).connect(input_shape.in_port(0))
        match['transpose2'].out_port(0).get_connection().set_source(
            new_add2.out_port(0))
コード例 #25
0
 def test_ones_in_the_middle(self):
     self.assertListEqual(list(match_shapes(int64_array([32, 1, 2, 3, 1, 8]), int64_array([4, 2, 1, 4, 6, 1, 1, 8]))), [4, 2, 1, 4, 1, 2, 3, 1, 1, 8])
コード例 #26
0
 def test_intersection_of_input_output_dimensions(self):  # is this test correct? Looks like yes...
     self.assertListEqual(list(match_shapes(int64_array([10, 20, 7]), int64_array([5, 4, 1, 70]))), [5, 2, 2, 1, 10, 7])
コード例 #27
0
 def test_many_to_one(self):
     self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([120]))), [2, 3, 4, 5])
コード例 #28
0
 def test_trailing_ones(self):
     self.assertListEqual(list(match_shapes(int64_array([1, 1, 10]), int64_array([1, 5, 1, 1, 2, 1]))), [1, 1, 5, 1, 1, 2, 1])
コード例 #29
0
 def test_equal_shapes(self):
     self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([2, 3, 4, 5]))), [2, 3, 4, 5])
コード例 #30
0
# SPDX-License-Identifier: Apache-2.0

import unittest

import numpy as np

from mo.front.common.partial_infer.utils import int64_array
from mo.middle.passes.convert_data_type import convert_blobs, SUPPORTED_DATA_TYPES
from mo.utils.error import Error
from mo.utils.unittest.graph import build_graph

nodes_attributes = {
    'data_node': {
        'kind': 'data',
        'value': None,
        'shape': int64_array([5])
    },
    'op_node': {
        'kind': 'op',
        'op': 'Result'
    }
}


class TestConvertBlob(unittest.TestCase):
    def test_convert_blob_to_fp32_from_fp64(self):
        graph = build_graph(
            nodes_attributes, [('data_node', 'op_node', {
                'bin': 1
            })], {
                'data_node': {