Esempio n. 1
0
    def replace_op(self, graph: Graph, node: Node):
        in_node = node.in_node()
        out_nodes = [node for node in node.out_nodes().values()]
        graph.remove_edge(node.in_node().id, node.id)

        scalar_value_op = Const(
            graph,
            dict(value=node.scalar,
                 shape=node.scalar.shape,
                 symbol_dict={'name': node.id + '/const'}))
        add_op = Add(
            graph,
            dict(name=node.id + '/add_',
                 symbol_dict={'name': node.id + '/add_'}))
        add_node = add_op.create_node(
            inputs=[in_node, scalar_value_op.create_node()])

        for out_node in out_nodes:
            edge_attrs = graph.get_edge_data(node.id, out_node.id)[0]
            graph.remove_edge(node.id, out_node.id)
            graph.add_edges_from([(add_node.id, out_node.id, edge_attrs)])

        return [add_node.id]
 def find_and_replace_pattern(self, graph: Graph):
     for expand_dims_node in graph.get_op_nodes(op='ExpandDims'):
         if len(expand_dims_node.in_nodes()) == 1:
             expand_axis = expand_dims_node.expand_axis
             if not isinstance(expand_axis, np.ndarray):
                 expand_axis = int64_array([expand_axis]).flatten()
             unsqueeze_node = Unsqueeze(graph, {
                 'name': expand_dims_node.id
             }).create_node()
             unsqueeze_dims_node = Const(graph, {
                 'name': expand_dims_node.id + '/Dims',
                 'value': expand_axis
             }).create_node()
             expand_dims_node.in_port(0).get_connection().set_destination(
                 unsqueeze_node.in_port(0))
             expand_dims_node.out_port(0).get_connection().set_source(
                 unsqueeze_node.out_port(0))
             unsqueeze_node.in_port(1).connect(
                 unsqueeze_dims_node.out_port(0))
         else:
             log.error(
                 'The ExpandDims node {} has more than 1 input'.format(
                     expand_dims_node.soft_get('name')))
 def replace_sub_graph(self, graph: Graph, match: dict):
     ph = match['placeholder']
     if ph.name in graph.graph['freeze_placeholder']:
         name = ph.name
         if ph.has_and_set('data_type'):
             data_type = ph.data_type
         else:
             data_type = SUPPORTED_DATA_TYPES[graph.graph['cmd_params'].data_type][0]
         string_value = graph.graph['freeze_placeholder'][name]
         try:
             if data_type != np.bool:
                 value = np.array(string_value, dtype=data_type)
             elif data_type == np.bool and graph.graph['fw'] == 'tf':
                 from mo.front.tf.common import tf_data_type_cast
                 if isinstance(string_value, list):
                     casted_list = list()
                     for v in np.array(string_value):
                         casted_list.append(tf_data_type_cast[ph.data_type](v))
                     value = np.array(string_value, dtype=data_type)
                 else:
                     value = tf_data_type_cast[ph.data_type](string_value)
             else:
                 raise Error("Can not cast value {} to {} data_type".format(string_value, data_type))
         except:
             raise Error("Can not cast value {} to {} data_type".format(string_value, data_type))
         try:
             value = np.reshape(a=value, newshape=ph.shape)
         except:
             raise Error("Can not reshape value {} to shape {}".format(value, ph.shape))
         out_edges = list(graph.out_edges(ph.id, data=True))
         new_node = Const(graph).create_node(
             attrs={'value': value, 'data_type': type(value), 'name': name + '/const_placeholder',
                    'shape': ph.shape})
         graph.erase_node(ph)
         graph.add_edges_from([(new_node.id, v, attrs) for u, v, attrs in out_edges])
         log.info("Placeholder node \"{}\" was replaced with Const node \"{}\" with value \"{}\"".format(
             name, new_node.name, value))
Esempio n. 4
0
    def replace_sub_graph(graph: Graph, match: dict):
        strided_slice_node = match['strided_slice']
        const_node = match['const']
        reshape_node = match['reshape']
        pack_node = match['pack']

        if not const_node.has_valid('value') or not is_value_is_constant(
                const_node.value, -1):
            log.debug(
                'The pattern does not correspond to flatten. The second reshape dimension is not -1. It is {}'
                .format(const_node.soft_get('value')))
            return
        if len(pack_node.in_nodes()) != 2:
            log.debug(
                'The pattern does not correspond to flatten. The "Pack" operation produces tensor with 3 items '
                'but should produce just 2.')
            return

        expected_values = [
            0, 1, 1
        ]  # expected values to a StridedSlice to get the batch size
        for ind in range(3):
            if not strided_slice_node.in_node(ind + 1).has_valid('value') or \
                    not is_value_is_constant(strided_slice_node.in_node(ind + 1).value, expected_values[ind]):
                log.debug(
                    'The pattern does not correspond to flatten because of the input with index {}. The value is '
                    '"{}".'.format(ind, strided_slice_node.soft_get('value')))
                return

        reshape_node.in_port(1).disconnect()
        reshape_const_node = Const(graph, {
            'value': int64_array([0, -1])
        }).create_node()
        reshape_node.in_port(1).connect(reshape_const_node.out_port(0))
        reshape_node['special_zero'] = True
        log.debug('The node "{}" is actually a Flatten node'.format(
            reshape_node.soft_get('name')))
Esempio n. 5
0
    def test_replace_node_one_consumer(self):
        graph = build_graph(
            {
                'input_1': {
                    'type': 'Placeholder',
                    'value': None,
                    'kind': 'op'
                },
                'input_2': {
                    'type': 'Placeholder',
                    'value': None,
                    'kind': 'op'
                },
                'old': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op',
                    'is_output': True
                },
                'output': {
                    'type': 'OpOutput',
                    'value': None,
                    'kind': 'op'
                },
            }, [('input_1', 'old'), ('input_2', 'old'), ('old', 'output')])

        new_node = Const(graph, {
            'name': 'new'
        }).create_node([Node(graph, 'input_1'),
                        Node(graph, 'input_2')])
        replace_node(Node(graph, 'old'), new_node)

        self.assertEqual(len(graph.nodes()), 4)
        self.assertEqual(len(graph.edges()), 3)
        self.assertEqual(new_node['is_output'], True)
        self.assertListEqual(list(graph.out_edges('new')), [('new', 'output')])
Esempio n. 6
0
    def insert_transpose(node, in_port_idx):
        graph = node.graph
        name = node.soft_get('name', node.id)

        assert in_port_idx in node.in_ports() and not node.in_port(in_port_idx).disconnected(), \
            'Input port with index {} should be connected for node {}'.format(in_port_idx, name)

        in_port = node.in_port(in_port_idx)
        port_shape = in_port.data.get_shape()
        assert port_shape is not None, \
            'Shape is unknown for input port with index {} for node {}'.format(in_port_idx, name)

        transpose_order = list(range(port_shape.size))
        transpose_order[-1], transpose_order[-2] = transpose_order[-2], transpose_order[-1]

        order = Const(graph, {'value': int64_array(transpose_order)}).create_node()
        transpose = Transpose(graph, {'name': name + '/{}_port_transpose'.format(in_port_idx)}).create_node()

        port_source = in_port.get_source()
        in_port.get_connection().set_source(transpose.out_port(0))
        transpose.in_port(0).connect(port_source)
        transpose.in_port(1).connect(order.out_port(0))

        transpose['override_output_shape'] = True
Esempio n. 7
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        slice_like = match['slice_like']
        const = slice_like.in_nodes()[0]
        crop_shape = slice_like.in_nodes()[1]

        variants_dict = {'mul_scalar1x': 0.1, 'mul_scalar2x': 0.2, 'mul_scalar1y': 0.1, 'mul_scalar2y': 0.2}
        for matches in find_pattern_matches(graph, self.variants_pattern['nodes'], self.variants_pattern['edges'], None, None):
            for k, v in matches.items():
                if v in variants_dict.keys():
                    variants_dict[v] = Node(graph, k).in_nodes()[1].value[0]

        variants = np.array([variants_dict['mul_scalar1x'], variants_dict['mul_scalar1y'],
                             variants_dict['mul_scalar2x'], variants_dict['mul_scalar2y']] * int(const.value.size / 4)).reshape(const.value.shape)
        priorbox_variants = Const(graph, dict(value=variants, name=const.id + '/priorbox_variants')).create_node()
        variants_slice_like = SliceLike(graph, dict(axes=slice_like.axes,
                                                    name=slice_like.id + '/variants_slice_like')).create_node()
        variants_slice_like.in_port(0).connect(priorbox_variants.out_port(0))
        variants_slice_like.in_port(1).connect(crop_shape.out_port(0))

        concat = match['reshape3'].out_port(0).get_destination().node
        assert concat.op == 'Concat'
        concat_nodes_count = len(concat.in_nodes())
        concat.add_input_port(concat_nodes_count)
        concat.in_port(concat_nodes_count).get_connection().set_source(variants_slice_like.out_port(0))
Esempio n. 8
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['fc']
        name = node.soft_get('name', node.id)
        add = match['add']

        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            return

        out_size = node.soft_get('out-size', None)
        assert out_size is not None, \
            "FullyConnected should have `out-size` parameter, but it doesn't for node {}".format(name)

        tensor_port, value_port = get_tensor_in_port(add), get_value_in_port(
            add)
        if value_port is None:
            return

        shift_shape = value_port.data.get_shape()
        if not any([
                np.array_equal(int64_array(suitable_shape), shift_shape)
                for suitable_shape in [[1, out_size], [1, 1], [out_size], [1],
                                       []]
        ]):
            return

        broadcasted_value = np.broadcast_to(value_port.data.get_value(),
                                            [1, out_size])
        const = Const(graph, {
            'name': name + '/Bias_',
            'value': broadcasted_value
        }).create_node()

        node.add_input_port(2, skip_if_exist=True)
        const.out_port(0).connect(node.in_port(2))
        add.out_port(0).get_connection().set_source(tensor_port.get_source())
        node.in_port(2).bin = 'biases'
Esempio n. 9
0
    def unsqueeze_num_directions(graph: Graph, match: dict):
        """ Assuming considered LSTM/GRU/RNN node should has num_directions in output shape and add Reshape
            to match it.
        """

        rnn_layer = match['rnn_layer']
        # num_directions is at 1st position in output shape, and in 0st position in hidden and cell states
        # please refer to docs in this transform

        direction_dim = [1, 0, 0]  # index of dimension with direction index
        for i in rnn_layer.out_nodes():
            old_data_node = rnn_layer.out_node(i)
            old_shape = old_data_node.shape.copy()
            new_shape = np.delete(old_shape, direction_dim[i])

            data = Op._create_data_node(graph,
                                        name=rnn_layer.name +
                                        '/Out/{}/'.format(i),
                                        attrs={'shape': new_shape})
            graph.remove_edge(rnn_layer.id, old_data_node.id)
            graph.add_edge(rnn_layer.id, data.id, key=0, out=i)

            reshape = Reshape(graph, dict(dim=old_shape))

            reshape_dim_data = Const(
                graph, {
                    'name':
                    rnn_layer.name + '/SqueezeNumDirections/{}/Dim'.format(i),
                    'value':
                    old_shape
                }).create_node_with_data()
            reshape.create_node_with_data(
                [data, reshape_dim_data],
                dict(name=rnn_layer.name +
                     '/SqueezeNumDirections/{}'.format(i)),
                data_nodes=[old_data_node])
Esempio n. 10
0
    def replace_pattern(self, graph: Graph, match: dict):
        reverse = match['reverse']
        input_data_shape = reverse.in_node(0).shape

        if len(input_data_shape) == 1:
            raise Error(
                'Reverse operation name = {} is\'t supported because of 1D input.'
                .format(reverse.name))

        assert reverse.in_port(1).disconnected()

        seq_axis = reverse['axis']
        # We need to choose arbitrary batch_axis != sequence_axis
        batch_axis = int(not seq_axis)

        # 1. For ReverseSequence 1-port input is seq_lengths => create this input node
        seq_lengths = np.ones(
            input_data_shape[batch_axis]) * input_data_shape[seq_axis]
        const = Const(graph, dict(value=seq_lengths)).create_node()

        # 2. Create new ReverseSequence node and reconnect all inputs/outputs to it
        reverse_sequence = ReverseSequence(
            graph, {
                'name': reverse.name + '/ReverseSequence/',
                'seq_axis': seq_axis,
                'batch_axis': batch_axis
            }).create_node()

        reverse.in_port(0).get_connection().set_destination(
            reverse_sequence.in_port(0))
        const.out_port(0).connect(reverse_sequence.in_port(1))
        reverse.out_port(0).get_connection().set_source(
            reverse_sequence.out_port(0))

        # 3. Delete old Reverse node
        graph.remove_node(reverse.id)
Esempio n. 11
0
    def swap_pad_and_unsqueeze(self, pad: Node, unsqueeze: Node):
        # insert additional items to the pads in the position specified by the Unsqueeze axis
        unsqueeze_axis = unsqueeze.in_port(1).data.get_value()
        for port_id in [1, 2]:
            current_value = pad.in_port(
                port_id).get_connection().data.get_value()
            new_value_node = Const(
                pad.graph, {
                    'value': np.insert(current_value, unsqueeze_axis.item(),
                                       0),
                    'override_output_shape': True
                }).create_node()
            pad.in_port(port_id).disconnect()
            pad.in_port(port_id).connect(new_value_node.out_port(0))

        # swap Pad and Unsqueeze layers
        unsqueeze.in_port(0).disconnect()
        pad.in_port(0).get_connection().set_destination(unsqueeze.in_port(0))
        unsqueeze.out_port(0).get_connection().set_source(pad.out_port(0))
        unsqueeze.out_port(0).connect(pad.in_port(0))

        # output shapes of Pad and Unsqueeze changed so need to recalculate them
        pad['override_output_shape'] = True
        unsqueeze['override_output_shape'] = True
def resolve_shared_inputs(node: Node, port_ids_to_duplicate: List[int]):
    """
    Duplicates shared constants that are consumed by more than one node. 
    If constant is consumed by several ports of one node - no duplication gets done
    """
    graph = node.graph

    for port_id in port_ids_to_duplicate:
        dst_port_map = defaultdict(list)
        for dst in node.in_port(
                port_id).get_source().get_connection().get_destinations():
            dst_port_map[dst.node].append(dst.idx)
        del dst_port_map[node]
        value = node.in_port(port_id).data.get_value()
        if value is None:
            log.debug(
                'Can not duplicate due no data for in_port {} of node {}'.
                format(port_id, node.name))
        for node, idxs in dst_port_map.items():
            const = Const(graph, {'value': np.array(value)}).create_node()
            for idx in idxs:
                node.in_port(idx).disconnect()
                const.out_port(0).connect(node.in_port(idx))
            const.infer(const)
    def replace_op(self, graph: Graph, node: Node):
        # save the original node name to use it in the new Pad op instance
        original_name = node.soft_get('name', node.id)
        rename_node(node, original_name + '/TBR')

        new_pad = Pad(graph, {'mode': node.soft_get('mode', None)}).create_node()
        rename_node(new_pad, original_name)

        node.in_port(0).get_connection().set_destination(new_pad.in_port(0))

        if node.soft_get('mode') == 'constant':
            # the input with fill value is an optional third input in ONNX
            if not node.in_port(2).disconnected():
                node.in_port(2).get_connection().set_destination(new_pad.in_port(3))
            else:
                new_pad.in_port(3).connect(Const(graph, {'value': 0.0}).create_node().out_port(0))

        # convert ONNX representation of the pads as [2 * N] to MO representation: [N] and [N]
        split_pads = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2})
        node.in_port(1).get_connection().set_destination(split_pads.in_port(0))
        split_pads.out_port(0).connect(new_pad.in_port(1))
        split_pads.out_port(1).connect(new_pad.in_port(2))

        return [new_pad.id]
    def replace_pattern(self, graph: Graph, match: dict):
        """
            Adds layers with type 'Const' that produce blob from 'bin' file. The pass finds data nodes with one output which
            doesn't have edge with 'bin' attribute (or with two outputs and at least one output doesn't have 'bin' attr)
            and generate Const op node before the node and data node before the Const node. The data node before 'Const'
            node is needed because the op node dumps input tensors to bin file.
        """
        node = match['data']
        if len(node.in_nodes()) > 0:
            return

        if self._check_bin_attrs(node):
            if node.has_valid('value'):
                const_node_name = node.soft_get('name', node.id)
                const_node_name = graph.unique_id(re.sub(r'\/Output_\d+\/Data_(.?)+', '', const_node_name))
                log.debug("Added Const node '{}'".format(const_node_name))
                const_node = Const(graph, {'name': const_node_name, 'value': node.value,
                                           'force_shape': node.soft_get('force_shape', None),
                                           'override_output_shape': node.has_valid('force_shape'),
                                           'force_type': node.soft_get('force_type', None),
                                           'correct_data_type': node.soft_get('correct_data_type', None),
                                           }).create_node()
                const_node.add_input_port(0)
                graph.add_edges_from([(const_node_name, node.id, {'out': 0})])

                node_copy = node.copy_node()
                const_node.type_infer(const_node)
                graph.add_edges_from([(node_copy.id, const_node_name, {'in': 0, 'bin': 'custom'})])
            elif not self._check_that_node_from_body(node):
                log.debug('node = {}'.format(node.graph.node[node.id]))
                raise Error(
                    'Discovered data node without inputs and value, node.name = {}, consumer.name = {}. ' +
                    refer_to_faq_msg(23),
                    node.soft_get('name'),
                    node.out_node().soft_get('name') if len(node.out_nodes()) else "<no consumer>"
                )
Esempio n. 15
0
    def convert_onnx_slice_opset10(node: Node):
        """
        Converts the Slice node from ONNX opset10 to StridedSlice.
        :param node: Slice node
        :return: None
        """
        graph = node.graph

        input_shape = node.in_port(0).data.get_shape()
        output_shape = node.out_port(0).data.get_shape()
        starts = node.in_port(1).data.get_value()
        ends = node.in_port(2).data.get_value()
        if starts is None or ends is None:
            raise Error(
                'The input with starts or end is not constant for node {}'.
                format(node.id))

        # in ONNX the value for 'ends' is usually -1 which is translated to maximum possible value of int64. This
        # value must be converted to maximum of int32 because such big values do not fit into the int32 which is
        # supported by the StridedSlice layer
        ends = np.clip(ends, np.iinfo(np.int32).min, np.iinfo(np.int32).max)
        if node.is_in_port_connected(3):
            axes = node.in_port(3).data.get_value()
            if axes is None:
                raise Error(
                    'The input with axes is not constant for node {}'.format(
                        node.id))
        else:
            axes = int64_array(list(range(starts.size)))

        if node.is_in_port_connected(4):
            steps = node.in_port(4).data.get_value()
            if steps is None:
                raise Error(
                    'The input with steps is not constant for node {}'.format(
                        node.id))
        else:
            steps = np.ones([starts.size])

        ss_begin_mask = np.zeros(len(input_shape), dtype=np.int32)
        ss_end_mask = np.zeros(len(input_shape), dtype=np.int32)
        ss_begin = np.zeros(len(input_shape), dtype=np.int32)
        ss_end = np.zeros(len(input_shape), dtype=np.int32)
        ss_steps = np.ones(len(input_shape), dtype=np.int32)

        # prepare inputs and attributes for the StridedSlice layer
        for i, axis in enumerate(axes):
            if starts[i] != 0:
                ss_begin_mask[axis] = 1
                ss_begin[axis] = starts[i]

            ss_end_mask[axis] = 1
            ss_end[axis] = ends[i]

            ss_steps[axis] = steps[i]

        slice_node_name = node.soft_get('name', node.id)

        begin_node = Const(graph, {
            'value': ss_begin,
            'name': slice_node_name + '/begin'
        }).create_node()
        end_node = Const(graph, {
            'value': ss_end,
            'name': slice_node_name + '/end'
        }).create_node()
        strides_node = Const(graph, {
            'value': ss_steps,
            'name': slice_node_name + '/stride'
        }).create_node()

        ss = StridedSlice(
            graph,
            dict(new_axis_mask=np.zeros(len(output_shape), dtype=np.int32),
                 shrink_axis_mask=np.zeros(len(output_shape), dtype=np.int32),
                 ellipsis_mask=np.zeros(len(output_shape), dtype=np.int32),
                 begin_mask=ss_begin_mask,
                 end_mask=ss_end_mask)).create_node()
        rename_nodes([(node, slice_node_name + '_delete'),
                      (ss, slice_node_name)])
        node.in_port(0).get_connection().set_destination(ss.in_port(0))
        begin_node.out_port(0).connect(ss.in_port(1))
        end_node.out_port(0).connect(ss.in_port(2))
        strides_node.out_port(0).connect(ss.in_port(3))
        node.out_port(0).get_connection().set_source(ss.out_port(0))
Esempio n. 16
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['slice']

        input = node.in_node(0)
        output_data = node.out_node()

        # ONNX 10 opset case
        if len(node.in_nodes()) >= 3 and node.has_valid(
                'format') and node['format'] == 'onnx':
            self.convert_onnx_slice_opset10(node)
            return

        # Caffe case
        if not node.has_valid('start') or not node.has_valid('end'):
            return

        begin = node.start
        end = node.end
        axis = node.axis if node.has_valid('axis') else np.arange(begin.size)

        # Check whether operation use only one axis or not
        axes_begin = np.zeros(len(input.shape), dtype=np.int32)
        axes_end = np.zeros(len(input.shape), dtype=np.int32)
        ss_begin = np.zeros(len(input.shape), dtype=np.int32)
        ss_end = np.zeros(len(input.shape), dtype=np.int32)
        dims = 0
        axes = np.zeros(begin.size)
        for i in range(len(axis)):
            if begin[i] != 0 or end[i] < input.shape[axis[i]]:
                dims += 1
                axes[i] = 1
                if begin[i] != 0:
                    axes_begin[axis[i]] = 1
                    ss_begin[axis[i]] = begin[i]
                if end[i] < input.shape[axis[i]]:
                    axes_end[axis[i]] = 1
                    ss_end[axis[i]] = end[i]
        axes = np.array(axes, dtype=bool)

        slice_node_name = node.soft_get('name', node.id)

        if dims == 1 or dims == 0:
            # If Slice use only one axis or no axis, than
            # convert Slice to StridedSlice
            ss = StridedSlice(
                graph,
                dict(new_axis_mask=np.zeros(len(output_data.shape),
                                            dtype=np.int32),
                     shrink_axis_mask=np.zeros(len(output_data.shape),
                                               dtype=np.int32),
                     ellipsis_mask=np.zeros(len(output_data.shape),
                                            dtype=np.int32),
                     begin_mask=axes_begin,
                     end_mask=axes_end)).create_node()

            convert_negative_indices(ss_begin, input.shape)
            convert_negative_indices(ss_end, input.shape)

            begin_node = Const(graph, {
                'value': ss_begin,
                'name': slice_node_name + '/begin'
            }).create_node()
            end_node = Const(graph, {
                'value': ss_end,
                'name': slice_node_name + '/end'
            }).create_node()

            rename_nodes([(node, slice_node_name + '_delete'),
                          (ss, slice_node_name)])

            node.in_port(0).get_connection().set_destination(ss.in_port(0))
            begin_node.out_port(0).connect(ss.in_port(1))
            end_node.out_port(0).connect(ss.in_port(2))
            node.out_port(0).get_connection().set_source(ss.out_port(0))
        else:
            # If Slice use more than one axis use Crop layer
            crop = Crop(
                graph,
                dict(axis=axis[axes],
                     offset=begin[axes],
                     dim=end[axes] - begin[axes])).create_node()
            rename_nodes([(node, slice_node_name + '_delete'),
                          (crop, slice_node_name)])

            node.in_port(0).get_connection().set_destination(crop.in_port(0))
            node.out_port(0).get_connection().set_source(crop.out_port(0))
    def replace_op(self, graph: Graph, node: Node):
        # split input to (i_part, f_part, c_part, o_part, ct_1)
        split_node_axis = Const(graph, {'value': np.int64(1)}).create_node()
        split_node = Split(graph, {
            'name': 'Split_lstm_input_',
            'num_splits': 5
        }).create_node()
        node.in_port(0).get_connection().set_destination(split_node.in_port(0))
        split_node.in_port(1).connect(split_node_axis.out_port(0))

        # i_t = Sigmoid(i_part + w_ic*ct_1)
        i_scale_attrs = {'name': 'i_scaleshift', 'bias_term': False}
        i_scale = ScaleShiftOp(graph, i_scale_attrs).create_node()
        input_as_const(i_scale, i_scale_attrs, 1, 'weights', node.i_weights)
        split_node.out_port(4).connect(i_scale.in_port(0))

        sum_i_c = Add(graph, {'name': 'sum_i_c_'}).create_node()
        split_node.out_port(0).connect(sum_i_c.in_port(0))
        i_scale.out_port(0).connect(sum_i_c.in_port(1))

        i_sigmoid = Sigmoid(graph, {'name': 'i_sigmoid'}).create_node()
        sum_i_c.out_port(0).connect(i_sigmoid.in_port(0))

        # f_t = Sigmoid(f_part + w_fc*ct_1)
        f_scale_attrs = {'name': 'f_scaleshift', 'bias_term': False}
        f_scale = ScaleShiftOp(graph, f_scale_attrs).create_node()
        input_as_const(f_scale, f_scale_attrs, 1, 'weights', node.f_weights)
        split_node.out_port(4).connect(f_scale.in_port(0))

        sum_f_c = Add(graph, {'name': 'sum_f_c_'}).create_node()
        split_node.out_port(1).connect(sum_f_c.in_port(0))
        f_scale.out_port(0).connect(sum_f_c.in_port(1))

        f_sigmoid = Sigmoid(graph, {'name': 'f_sigmoid'}).create_node()
        sum_f_c.out_port(0).connect(f_sigmoid.in_port(0))

        # c_t = f_t*ct_1 + i_t * tanh(c_part)
        c_tanh = Tanh(graph, {'name': 'c_tanh'}).create_node()
        split_node.out_port(2).connect(c_tanh.in_port(0))

        prod_i_c_tanh = Mul(graph, {'name': 'prod_i_c_tanh_'}).create_node()
        i_sigmoid.out_port(0).connect(prod_i_c_tanh.in_port(0))
        c_tanh.out_port(0).connect(prod_i_c_tanh.in_port(1))

        prod_f_ct_1 = Mul(graph, {'name': 'prod_f_ct_1_'}).create_node()
        f_sigmoid.out_port(0).connect(prod_f_ct_1.in_port(0))
        split_node.out_port(4).connect(prod_f_ct_1.in_port(1))

        sum_f_i = Add(graph, {'name': 'sum_f_i_'}).create_node()
        prod_f_ct_1.out_port(0).connect(sum_f_i.in_port(0))
        prod_i_c_tanh.out_port(0).connect(sum_f_i.in_port(1))

        #  o_t = Sigmoid(o_part + w_oc*c_t)
        o_scale_attrs = {'name': 'o_scaleshift', 'bias_term': False}
        o_scale = ScaleShiftOp(graph, o_scale_attrs).create_node()
        input_as_const(o_scale, o_scale_attrs, 1, 'weights', node.o_weights)
        sum_f_i.out_port(0).connect(o_scale.in_port(0))

        sum_o_c = Add(graph, {'name': 'sum_o_c_'}).create_node()
        split_node.out_port(3).connect(sum_o_c.in_port(0))
        o_scale.out_port(0).connect(sum_o_c.in_port(1))

        o_sigmoid = Sigmoid(graph, {'name': 'o_sigmoid'}).create_node()
        sum_o_c.out_port(0).connect(o_sigmoid.in_port(0))

        # m_t = o_t * Tanh(c_t)
        c_t_tanh = Tanh(graph, {'name': 'c_t_tanh'}).create_node()
        sum_f_i.out_port(0).connect(c_t_tanh.in_port(0))

        prod_o_c_t_tanh = Mul(graph, {
            'name': 'prod_o_c_t_tanh_'
        }).create_node()
        o_sigmoid.out_port(0).connect(prod_o_c_t_tanh.in_port(0))
        c_t_tanh.out_port(0).connect(prod_o_c_t_tanh.in_port(1))

        # add concat to create 1 output
        concat = Concat(graph, {'name': 'Concat_c_m'}).create_node()
        concat.add_sequence_of_ports('in', range(2))
        sum_f_i.out_port(0).connect(concat.in_port(0))
        prod_o_c_t_tanh.out_port(0).connect(concat.in_port(1))

        return [concat.id]
Esempio n. 18
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        log.debug('UpsampleToResample is triggered')
        upsample = match['upsample']
        input_shape = upsample.in_port(0).data.get_shape()
        input_shape_rank = len(input_shape)
        if input_shape_rank not in [4, 5]:
            log.warning('The input shape is not 4D or 5D for op {}'.format(
                upsample.soft_get('name')))
            return

        if len(upsample.in_nodes()) == 2:
            if upsample.in_node(1).value is None:
                return
            scales = upsample.in_node(1).value
            assert scales.shape == (4, )
            if not (math.isclose(scales[0], 1, rel_tol=1e-5)
                    and math.isclose(scales[1], 1, rel_tol=1e-5)):
                return
            height_scale = scales[2]
            width_scale = scales[3]
        else:
            height_scale = upsample['height_scale']
            width_scale = upsample['width_scale']

        if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():
            upsample.in_port(1).disconnect()

        factor = Const(graph, {
            'value': np.array([height_scale, width_scale])
        }).create_node()

        shape = Shape(graph, {'name': upsample.name + '/0_port'}).create_node()

        layout = graph.graph['layout']
        if input_shape_rank == 4:
            begin = Const(graph, {
                'value':
                int64_array([get_height_dim(layout, input_shape_rank)])
            }).create_node()
        else:
            begin = Const(graph, {
                'value':
                int64_array([get_depth_dim(layout, input_shape_rank)])
            }).create_node()
        end = Const(graph, {
            'value':
            int64_array([get_width_dim(layout, input_shape_rank) + 1])
        }).create_node()

        stride = Const(graph, {'value': int64_array([1])}).create_node()
        ss = StridedSlice(
            graph, {
                'name': upsample.name + '/ss_0_port',
                'begin_mask': np.array([1]),
                'end_mask': np.array([0]),
                'new_axis_mask': np.array([0]),
                'shrink_axis_mask': int64_array([0]),
                'ellipsis_mask': int64_array([0])
            }).create_node()

        mul = Mul(graph, {
            'name': upsample.name + '/factor_mul_'
        }).create_node()

        source = upsample.in_port(0).get_connection().get_source()
        source.connect(shape.in_port(0))
        shape.out_port(0).connect(ss.in_port(0))
        begin.out_port(0).connect(ss.in_port(1))
        end.out_port(0).connect(ss.in_port(2))
        stride.out_port(0).connect(ss.in_port(3))
        ss.out_port(0).connect(mul.in_port(0))
        factor.out_port(0).connect(mul.in_port(1))

        # Create Interpolate operation
        if input_shape_rank == 4:
            axes = int64_array([
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])
        else:
            axes = int64_array([
                get_depth_dim(layout, input_shape_rank),
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])

        resample_op = Interpolate(
            graph,
            dict(name='Interpolate/{}'.format(upsample.name),
                 axes=axes,
                 mode=upsample.attrs()['mode'],
                 antialias=0,
                 convert_to_resample=True)).create_node()

        upsample.add_input_port(1, skip_if_exist=True)
        assert upsample.in_port(1).disconnected()
        mul.out_port(0).connect(resample_op.in_port(1))

        upsample.in_port(0).get_connection().set_destination(
            resample_op.in_port(0))
        upsample.out_port(0).get_connection().set_source(
            resample_op.out_port(0))
Esempio n. 19
0
    def replace_pattern(self, graph: Graph, match: dict):
        if match['rnn_layer']['op'] == 'LSTM':
            return

        rnn_layer = match['rnn_layer']

        # Build TensorIterator body first
        body = Graph(name=rnn_layer.name + '/sub_graph')
        body.graph = graph.graph

        # 1. Input squeeze Reshape
        inputs = [
            Op._create_data_node(
                body, rnn_layer.name + '/inport/' + str(inp), {
                    'shape':
                    rnn_layer.in_node(inp).shape.copy(),
                    'value':
                    rnn_layer.in_node(inp).value.copy()
                    if rnn_layer.in_node(inp).value is not None
                    and inp in [1, 2] else None
                }) for inp in [0, 4, 1, 2]
        ]  # X, h_init, WR, B

        inputs[0].shape[rnn_layer.sequence_dim] = 1
        input_squeeze = Squeeze(
            body,
            dict(name=rnn_layer.name + '/input_squeeze', internal_layer_id=0))
        input_squeeze_dim = Const(
            body,
            dict(name=rnn_layer.name + '/input_squeeze_dim',
                 value=rnn_layer.sequence_dim)).create_node_with_data()
        inputs[0] = input_squeeze.create_node_with_data(
            [inputs[0], input_squeeze_dim],
            edge_attrs=[{
                'internal_port_id': 0
            }])

        # 2. Output unsqueeze Reshape
        outputs = [
            Op._create_data_node(
                body, rnn_layer.name + '/outport/' + str(out), {
                    'shape':
                    rnn_layer.out_node(out).shape.copy()
                    if out in rnn_layer.out_nodes() else None
                }) for out in [0]
        ]
        for out in outputs:
            add_opoutput(body, out.id, 0, False)

        outputs[0].shape = np.delete(outputs[0].shape.copy(),
                                     rnn_layer.sequence_dim)
        output_unsqueeze_dim = Const(
            body,
            dict(name=rnn_layer.name + '/output_unsqueeze_dim',
                 value=rnn_layer.sequence_dim)).create_node_with_data()
        output_unsqueeze = Unsqueeze(
            body,
            dict(name=rnn_layer.name + '/output_unsqueeze/',
                 internal_layer_id=2))

        additional_attrs = dict(activations=rnn_layer.activations,
                                activation_alpha=rnn_layer.activation_alpha,
                                activation_beta=rnn_layer.activation_beta,
                                clip=rnn_layer.clip)
        if rnn_layer.op == 'GRU':
            additional_attrs[
                'linear_before_reset'] = rnn_layer.linear_before_reset

        # 3. ***Cell
        rnn_cell_op = self.get_rnn_cell(rnn_layer['op'])(
            body,
            dict(hidden_size=rnn_layer.hidden_size,
                 name=rnn_layer.name + '/{}Cell'.format(rnn_layer.op),
                 **additional_attrs,
                 internal_layer_id=1))

        gru_cell = rnn_cell_op.create_node_with_data(inputs,
                                                     data_nodes=outputs,
                                                     edge_attrs=[{}, {
                                                         'internal_port_id':
                                                         1
                                                     }, {
                                                         'internal_port_id':
                                                         2
                                                     }, {
                                                         'bin':
                                                         'weights'
                                                     }, {
                                                         'bin':
                                                         'biases'
                                                     }])

        # internal ports for outputs of cell
        gru_cell.in_node().out_edge(0)['internal_port_id'] = 4  # h_state

        gru_cell = output_unsqueeze.create_node_with_data(
            [gru_cell, output_unsqueeze_dim])
        gru_cell.in_node().out_edge(0)['internal_port_id'] = 3
        add_opoutput(body, gru_cell.id, 0, False)

        # 4. TensorIterator layer creating
        assert rnn_layer.direction in ['forward', 'reverse']
        if rnn_layer.direction == 'forward':
            stride = 1
            start = None
            end = None
        else:
            assert rnn_layer.direction == 'reverse'
            stride = -1
            start = -1
            end = 0

        # stacked h_state
        output_port_map = [{
            'external_port_id': 3,
            'internal_layer_id': 2,
            'internal_port_id': 3,
            'axis': rnn_layer.sequence_dim,
            'stride': stride,
            'start': start,
            'end': end,
            'part_size': 1,
        }]

        # Adding last h_state to outputs
        if len(rnn_layer.out_nodes()) == 2:
            output_port_map.extend([{
                'external_port_id': 4,
                'internal_layer_id': 1,
                'internal_port_id': 4,
            }])

        ti_op = TensorIterator(
            graph,
            {
                'name':
                rnn_layer.name + '/TensorIterator',
                'body':
                body,
                'in_ports_count':
                4,
                'out_ports_count':
                len(rnn_layer.out_nodes()),
                'input_port_map': [
                    {
                        'external_port_id': 0,
                        'internal_layer_id': 0,
                        'internal_port_id': 0,
                        'axis': rnn_layer.sequence_dim,
                        'stride': stride,
                        'start': start,
                        'end': end,
                        'part_size': 1,
                    },
                    {
                        'external_port_id': 1,
                        'internal_layer_id': 1,
                        'internal_port_id': 1,
                    },
                ],
                'output_port_map':
                output_port_map,
                # only for h state
                'back_edges': [
                    {
                        'from_layer': 1,
                        'from_port': 4,
                        'to_layer': 1,
                        'to_port': 1,
                    },
                ]
            })

        assert sorted(rnn_layer.out_nodes().keys()) == list(range(len(rnn_layer.out_nodes()))), \
            "There are gaps in output ports of GRUSequence operation. Node {}".format(rnn_layer.id)

        outs = ti_op.create_node_with_data(
            [rnn_layer.in_node(i) for i in [0, 4]],  # X, h_init
            data_nodes=[
                rnn_layer.out_node(i)
                for i in range(len(rnn_layer.out_nodes()))
            ],
            edge_attrs=[{
                'external_port_id': 0
            }, {
                'external_port_id': 1
            }])

        if not isinstance(outs, list):
            outs = list([outs])

        graph.remove_node(rnn_layer.id)
        outs[0].in_edge(0)['external_port_id'] = 3
        for i, out in enumerate(outs[1:]):
            external_port_id = 4 + i
            out.in_edge()['external_port_id'] = external_port_id

        ti = outs[0].in_node()
        TensorIterator.cover_body_input_data_nodes_with_parameter_ops(ti)
        TensorIterator.cover_body_constant_data_nodes_with_const_ops(ti)
        TensorIterator.normalize_internal_ids(ti)
Esempio n. 20
0
    def replace_pattern(self, graph: Graph, match: dict):

        merge = match['merge']
        power = Pow(graph, {
            'name': merge.name + '/reciprocal_',
            'type': 'PNORM'
        }).create_node()
        const1 = Const(graph, {
            'value': -1.0,
            'name': merge.name + '/negate_const'
        }).create_node()
        merge.in_port(0).get_connection().set_destination(power.in_port(0))
        const1.out_port(0).connect(power.in_port(1))

        concat_node = Concat(
            graph, {
                'axis': 0,
                'name': merge.name + '/Concat_',
                'override_output_shape': True
            }).create_node()
        const3 = Const(graph, {
            'name': merge.name + '/const_reduce',
            'value': 0
        }).create_node()

        for ii, idx in enumerate(
                range(merge.significant, merge.to_significant + 1, 1)):
            const_node = Const(
                graph, {
                    'value': float_array(math.pow(10.0, idx)),
                    'name': merge.name + '/Const_' + ii.__str__()
                }).create_node()

            mul_node = Mul(graph, {
                'name': merge.name + '/Mul_' + ii.__str__()
            }).create_node()
            const_node.out_port(0).connect(mul_node.in_port(0))

            power.out_port(0).connect(
                mul_node.in_port(1))  # connect to the graph node
            mul_node2 = Mul(graph, {
                'name': merge.name + '/Mul_Div_' + ii.__str__()
            }).create_node()

            const_node2 = Const(
                graph, {
                    'value': float_array(math.pow(10.0, -1 * idx)),
                    'name': merge.name + '/Const_Pow_' + ii.__str__()
                }).create_node()
            cast_node = Cast(
                graph, {
                    'name': merge.name + '/Cast_' + idx.__str__(),
                    'dst_type': np.float32
                }).create_node()

            mul_node.out_port(0).connect(cast_node.in_port(0))
            const_node2.out_port(0).connect(mul_node2.in_port(1))
            cast_node.out_port(0).connect(mul_node2.in_port(0))
            concat_node.add_input_port(ii, skip_if_exist=True)
            concat_node.in_port(ii).get_connection().set_source(
                mul_node2.out_port(0))

        reducesum_node = ReduceMean(
            graph, {
                'name': merge.id + '/_pnorm_reduced_sum',
                'keep_dims': False,
                'in_ports_count': 2,
                'need_shape_inference': None,
                'infer': reduce_infer
            }).create_node()

        const3.out_port(0).connect(reducesum_node.in_port(1))
        reducesum_node.in_port(0).get_connection().set_source(
            concat_node.out_port(0))

        reshape = Reshape(graph, {
            'name': merge.name + '/Reshape_Node'
        }).create_node()
        reshape_dim = Const(graph, {
            'value': np.array([1, 5]),
            'name': merge.id + '/Reshape_Dim'
        }).create_node()
        reducesum_node.out_port(0).connect(reshape.in_port(0))
        reshape.in_port(1).connect(reshape_dim.out_port(0))
        merge.out_port(0).get_connection().set_source(reshape.out_port(0))
    def replace_sub_graph(self, graph: Graph, match: dict):
        node = match['flatten']
        name = node.soft_get('name', node.id)

        assert node.has_valid(
            'axis'), 'Flatten {} has no mandatory `axis` attribute'.format(
                name)
        assert node.has_valid(
            'end_axis'
        ), 'Flatten {} has no mandatory `end_axis` attribute'.format(name)

        axis = node.axis
        end_axis = node.end_axis

        if end_axis == -1 and axis >= 0:
            begin_dims = Const(graph, {
                'value': int64_array([0] * axis)
            }).create_node()
            middle_dim = Const(graph, {
                'value': int64_array([-1])
            }).create_node()
            end_dims = Const(graph, {'value': int64_array([])}).create_node()
        else:
            rank = Rank(graph, {'name': name + '/input_rank'}).create_node()
            node.in_port(0).get_source().connect(rank.in_port(0))

            shape = Shape(graph, {'name': name + '/input_shape'}).create_node()
            node.in_port(0).get_source().connect(shape.in_port(0))

            begin_dims = get_shape_values_by_range_idxs(shape=shape,
                                                        rank=rank,
                                                        begin=0,
                                                        end=axis)
            middle_dims = get_shape_values_by_range_idxs(shape=shape,
                                                         rank=rank,
                                                         begin=axis,
                                                         end=end_axis,
                                                         include_end=True)
            end_dims = get_shape_values_by_range_idxs(shape=shape,
                                                      rank=rank,
                                                      begin=end_axis,
                                                      end=-1,
                                                      include_begin=False,
                                                      include_end=True)

            middle_dim = create_op_node_with_second_input(
                graph, ReduceProd, int64_array([0]), {'keep_dims': True})
            middle_dims.out_port(0).connect(middle_dim.in_port(0))

        dim = new_shape_node_from_shape_nodes(
            [begin_dims, middle_dim, end_dims])

        original_name = node.soft_get('name')
        abandoned_name = original_name + '/ShouldBeDeleted'
        reshape_node = Reshape(graph, {}).create_node()
        # Keep node with the same name to avoid confuse with renaming
        rename_nodes([(node, abandoned_name), (reshape_node, original_name)])
        reshape_node.in_port(1).connect(dim.out_port(0))

        node.out_port(0).get_connection().set_source(reshape_node.out_port(0))
        node.in_port(0).get_connection().set_destination(
            reshape_node.in_port(0))
    def replace_pattern(self, graph: Graph, match: dict):
        conv = match['conv']

        assert len(
            conv.out_nodes()
        ) == 1, "Convolution operation {} should have 1 output data node".format(
            conv.id)
        out_data = conv.out_node()

        assert out_data.has_valid(
            'shape'), 'Output shape is undefined for {} in back phase'.format(
                conv.id)
        out_shape = out_data.shape

        if out_shape.size != 3:
            return

        assert len(
            conv.in_nodes()
        ) >= 1, "Convolution operation {} should have more than 1 input data node".format(
            conv.id)
        inp_data = conv.in_node()

        assert inp_data.has_valid(
            'shape'), 'Input shape is undefined for {} in back phase'.format(
                conv.id)
        inp_shape = inp_data.shape
        new_inp_shape = np.insert(inp_shape, 2, 1)

        # setting to None to be overwritten by infer function
        conv.kernel_spatial_idx = None
        conv.spatial_dims = None

        # inserting fake H dimension
        conv.dilation = np.insert(conv.dilation, 2, 1)
        conv.kernel_spatial = np.append([1], conv.kernel_spatial)
        conv.pad = np.insert(conv.pad, 2, [0, 0], axis=0)
        conv.stride = np.insert(conv.stride, 2, 1)

        weights_node = conv.in_node(1)
        weights_node.value = np.reshape(
            weights_node.value, np.insert(weights_node.value.shape, 2, 1))
        weights_node.shape = np.array(weights_node.value.shape, dtype=np.int64)

        reshape = Reshape(graph, {
            'name': conv.name + '/reshape'
        }).create_node()
        reshape_dim = Const(graph, {
            'value': new_inp_shape,
            'name': reshape.id + '/Dim'
        }).create_node()
        conv.in_port(0).get_connection().insert_node(reshape)
        reshape.in_port(1).connect(reshape_dim.out_port(0))

        reshape_back = Reshape(graph, {
            'name': conv.name + '/reshape_back'
        }).create_node()
        reshape_back_dim = Const(graph, {
            'value': out_shape,
            'name': reshape.id + '/Dim'
        }).create_node()
        conv.out_port(0).get_connection().insert_node(reshape_back)
        reshape_back.in_port(1).connect(reshape_back_dim.out_port(0))

        # run shape inference manually for several nodes to override shapes of the model nodes which changed behaviour
        reshape_dim.infer(reshape_dim)
        reshape.infer(reshape)
        conv.infer(conv)
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['node']

        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            in_rank = node.in_port(0).data.get_shape().size

            shape_src = node.in_port(2).get_source()
            node.in_port(2).disconnect()

            begin = Const(graph, {
                'value': np.array([2], dtype=np.int32)
            }).create_node()
            end = Const(graph, {
                'value': np.array([in_rank], dtype=np.int32)
            }).create_node()
            stride = Const(graph, {
                'value': np.array([1], dtype=np.int32)
            }).create_node()

            ss_0 = StridedSlice(
                graph, {
                    'name': node.name + '/ss_0_port',
                    'begin_mask': np.array([1], dtype=np.int32),
                    'end_mask': np.array([0], dtype=np.int32),
                    'new_axis_mask': np.array([0], dtype=np.int32),
                    'shrink_axis_mask': np.array([0], dtype=np.int32),
                    'ellipsis_mask': np.array([0], dtype=np.int32)
                }).create_node()

            shape_src.connect(ss_0.in_port(0))
            begin.out_port(0).connect(ss_0.in_port(1))
            end.out_port(0).connect(ss_0.in_port(2))
            stride.out_port(0).connect(ss_0.in_port(3))

            ss_0.out_port(0).connect(node.in_port(2))

            del node['pad']

        group = node.soft_get('group', 1)

        if group != 1:
            assert group > 1

            weights_shape = node.in_port(1).data.get_shape()
            assert weights_shape is not None
            I = node.in_port(0).data.get_shape()[1]
            assert I % group == 0
            assert node.output % group == 0

            new_shape = int64_array(
                [group, I / group, node.output / group, *weights_shape[2:]])

            assert np.prod(weights_shape) == np.prod(new_shape), \
                'Initial weights shape {}, grouped weights shape {}'.format(weights_shape, new_shape)
            reshape = create_op_node_with_second_input(
                graph, Reshape, int64_array(new_shape),
                {'override_output_shape': True},
                node.in_port(1).get_source().node)

            node.in_port(1).get_connection().set_source(reshape.out_port(0))

            node['type'] = 'GroupConvolutionBackpropData'
        else:
            node['type'] = 'ConvolutionBackpropData'
Esempio n. 24
0
    def replace_pattern(self, graph: Graph, match: dict):
        assert match['operator'].has('multiplication_transparent_ports')

        quantize = match['quantize']

        port = match['operator'].input_ports_with(match['quantized'])
        assert len(port) >= 1
        if len(port) > 1:
            log.debug(
                'BinarizeWeightsM1P1 cannot apply transformation for data {} because it consumed more'
                ' than once'.format(match['quantized'].name))
            return

        assert len(port) == 1
        port = port[0]
        applicable = [
            pair for pair in match['operator'].multiplication_transparent_ports
            if pair[0] == port
        ]
        if len(applicable) == 0:
            return

        # Look at 3-rd and 4-th inputs of FakeQuantize -- they have constants that should be passed through.
        # Assume that the constant that should be passed through is a scalar.
        output_low = quantize.in_node(3)
        output_high = quantize.in_node(4)
        assert len(output_low.out_nodes()) == 1
        assert len(output_high.out_nodes()) == 1

        if not output_low.has_valid('value') and not output_high.has_valid(
                'value'):
            return

        output_low = output_low.value
        output_high = output_high.value

        operator = match['operator']

        weights = operator.in_node(1).value
        weights_rounded = np.round(weights)
        weights_consistent = np.all(np.isclose(weights, weights_rounded)) and \
                             set(np.unique(weights_rounded)).issubset({-1, 1})

        if weights_consistent and np.all(np.isclose(output_low, 0)) and np.all(
                np.isclose(output_high, 1)):
            reduction_indices = set(range(len(weights.shape))) - set(
                [operator.output_feature_channel])
            weights_reduced = np.add.reduce(weights,
                                            axis=tuple(reduction_indices))
            weights_reduced = weights_reduced.reshape(
                [len(weights_reduced), 1, 1])  # FIXME: works for NCHW only

            add_term = Const(graph, {'value': weights_reduced}).create_node()
            add = Add(graph, {}).create_node()
            add.in_port(1).connect(add_term.out_port(0))
            mul_term = Const(graph, {'value': np.array(0.5)}).create_node()
            mul = Mul(graph, {}).create_node()
            mul.in_port(1).connect(mul_term.out_port(0))
            add.out_port(0).connect(mul.in_port(0))

            operator.out_port(0).get_connection().set_source(mul.out_port(0))
            add.in_port(0).connect(operator.out_port(0))

            operator['pad_value'] = float(-1.0)
        elif weights_consistent and np.all(np.isclose(
                output_low, -1)) and np.all(np.isclose(output_high, +1)):
            pass
        else:
            log.debug(
                'ConvToBinaryConv: cannot apply transformation because input range is neither in [0, +1] nor '
                'in [-1, +1].')
            return

        operator['type'] = 'BinaryConvolution'
        operator['mode'] = 'xnor-popcount'
        operator['pad_value'] = operator.soft_get('pad_value', float(0))
        operator['input'] = operator.in_node(0).shape[1]
        # Weights are not bit-packed yet; there should be a separate transformation to do that

        assert output_low.size == 1
        assert output_high.size == 1

        output_low = quantize.in_node(3)
        output_high = quantize.in_node(4)

        # Make sure that low/high values are exactly 0/1
        output_low.value = np.zeros(output_low.shape)
        output_high.value = np.ones(output_high.shape)
Esempio n. 25
0
def parse_specifier(string, graph, layer_node_map):
    pos = string.find(b'(')
    if pos == -1:
        # node name
        input_name = str(string.split(b' ')[0]).strip('b').replace(
            "\'", '').replace('\\n', '')

        if input_name not in layer_node_map:
            node_name = graph.unique_id(prefix=input_name)
            graph.add_node(node_name, parameters=[], op="", kind='op')
            layer_node_map[input_name] = node_name
        else:
            node_name = layer_node_map[input_name]
        return node_name

    spec = string[:pos]
    args = get_args_for_specifier(string[pos:])
    if spec == b'Append':
        nodes = []
        for i in range(len(args)):
            nodes.append(parse_specifier(args[i], graph, layer_node_map))
        layer_name = 'Append_'
        for node in nodes:
            layer_name = layer_name + node + "_"

        if layer_name not in layer_node_map:
            concat_name = graph.unique_id(prefix=layer_name)
            graph.add_node(concat_name,
                           parameters=None,
                           op='concat',
                           kind='op')
            layer_node_map[layer_name] = concat_name
            i = 0
            Node(graph,
                 concat_name).add_sequence_of_ports('in', range(len(nodes)))
            for node in nodes:
                out_port = len(Node(graph, node).out_nodes())
                Node(graph, node).add_output_port(out_port)
                graph.create_edge(
                    Node(graph, node), Node(graph, concat_name), out_port, i,
                    create_edge_attrs(node, concat_name, node, i, out_port))
                i = i + 1
        else:
            concat_name = layer_node_map[layer_name]
        return concat_name
    elif spec == b'Offset':
        node = parse_specifier(args[0], graph, layer_node_map)
        t = int(args[1])
        if len(args) > 2:
            raise Error("ModelOptimizer supports only 2 arguments for Offset")
        layer_name = 'Offset_' + node + '_'
        if t < 0:
            layer_name = layer_name + '_' + str(-t)
        else:
            layer_name = layer_name + str(t)

        if layer_name not in layer_node_map:
            memory_name = graph.unique_id(prefix=layer_name)
            layer_node_map[layer_name] = memory_name
            memory_name_2 = memory_name + '_out'
            graph.add_node(memory_name,
                           parameters=dict(t=t,
                                           pair_name=memory_name_2,
                                           has_default=False),
                           op='MemoryOffset',
                           kind='op')
            out_port = len(Node(graph, node).out_nodes())
            in_port = len(Node(graph, memory_name).in_nodes())
            Node(graph, memory_name).add_input_port(in_port)
            Node(graph, node).add_output_port(out_port, skip_if_exist=True)
            graph.create_edge(
                Node(graph, node), Node(graph, memory_name), out_port, in_port,
                create_edge_attrs(node, memory_name, node, in_port, out_port))
        else:
            memory_name = layer_node_map[layer_name]
        return memory_name
    elif spec == b'Sum':
        nodes = []
        for i in range(len(args)):
            nodes.append(parse_specifier(args[i], graph, layer_node_map))

        layer_name = 'Sum_'
        for node in nodes:
            layer_name = layer_name + node + "_"

        if layer_name not in layer_node_map:
            sum_name = graph.unique_id(prefix=layer_name)
            graph.add_node(sum_name, parameters=None, op='Add', kind='op')
            layer_node_map[layer_name] = sum_name
        else:
            sum_name = layer_node_map[layer_name]

        for i, node in enumerate(nodes):
            out_port = len(Node(graph, node).out_nodes())
            Node(graph, node).add_output_port(out_port, skip_if_exist=True)
            Node(graph, sum_name).add_input_port(i)
            graph.add_edge(node, sum_name,
                           **create_edge_attrs(node, sum_name, node, i))

        return sum_name
    elif spec == b'IfDefined':
        node_id = parse_specifier(args[0], graph, layer_node_map)
        node = Node(graph, node_id)
        if node.op == 'MemoryOffset':
            node['parameters']['has_default'] = True
        return node_id
    elif spec == b'ReplaceIndex':
        node = parse_specifier(args[0], graph, layer_node_map)
        return node
    elif spec == b'Scale':
        node_name = parse_specifier(args[1], graph, layer_node_map)
        scale_value = float(args[0])
        layer_name = '{}/Mul/{}'.format(node_name, scale_value)

        if layer_name not in layer_node_map:
            scale_name = graph.unique_id(prefix=layer_name)
            scale_node = Mul(graph, {'name': scale_name}).create_node()

            layer_node_map[layer_name] = scale_name

            scale_const_name = 'Const_{}'.format(scale_value)
            const_node = Const(graph, {
                'name': scale_const_name,
                'value': float_array([scale_value])
            }).create_node()

            node = Node(graph, node_name)
            graph.create_edge(
                const_node, scale_node, 0, 0,
                create_edge_attrs(const_node.id, scale_node.id, const_node.id))
            out_port = len(node.out_nodes())
            graph.create_edge(
                node, scale_node, out_port, 1,
                create_edge_attrs(node_name, scale_node.id, node_name, 1,
                                  out_port))
        else:
            scale_name = layer_node_map[layer_name]

        return scale_name
Esempio n. 26
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['conv']
        node_name = node.soft_get('name', node.id)

        # create Reshape before convolution
        # shape = [in_shape[0], in_shape[1]/patch_stride, 1, patch_stride]
        shape = Shape(graph, {'name': node_name + '/Shape'}).create_node()
        shape.in_port(0).connect(node.in_port(0).get_source())

        split = create_op_with_const_inputs(graph, VariadicSplit, {
            1: int64_array(0),
            2: int64_array([1, -1])
        }, {
            'name': shape.name + '/split_batch',
            'out_ports_count': 2
        }, shape)

        pow_node = create_op_node_with_second_input(
            graph, Pow, int64_array([-1]),
            {'name': node_name + '/patch_stride/inverse'})
        conv_patch_stride = Const(
            graph, {
                'value': int64_array([node.patch_stride]),
                'name': node_name + '/patch_stride/'
            }).create_node()
        pow_node.in_port(0).connect(conv_patch_stride.out_port(0))

        mul = Mul(graph, {
            'name': node_name + '/mul_inverse_stride_h'
        }).create_node()
        mul.in_port(0).connect(split.out_port(1))
        mul.in_port(1).connect(pow_node.out_port(0))

        concat = create_op_with_const_inputs(
            graph, Concat, {2: int64_array([1])}, {
                'name': node_name + '/concat_all_dims',
                'in_ports_count': 4,
                'axis': 0
            })

        concat.in_port(0).connect(split.out_port(0))
        concat.in_port(1).connect(mul.out_port(0))
        concat.in_port(3).connect(conv_patch_stride.out_port(0))

        reshape_in = Reshape(graph, {
            'name': node_name + '/reshape_in'
        }).create_node()
        reshape_in.in_port(1).connect(concat.out_port(0))

        # create Reshape after Convolution
        reshape_out = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            {'name': node_name + '/reshape_out'})

        # connect input_reshape_node
        source = node.in_port(0).get_source()
        node.in_port(0).get_connection().set_source(reshape_in.out_port(0))
        reshape_in.in_port(0).connect(source)
        # connect output_reshape_node
        node.out_port(0).get_connection().set_source(reshape_out.out_port(0))
        node.out_port(0).connect(reshape_out.in_port(0))
Esempio n. 27
0
    def replace_pattern(graph: Graph, match: dict):
        log.debug(
            '================== GNMTBeforeConditionFind ==================')
        input_sequence_lengths = match['Max'].in_port(0).get_source().node
        encoder_sequence_lengths = looking_for_op_in_list([
            port.node
            for port in input_sequence_lengths.out_port(0).get_destinations()
        ], 'Identity')

        # Looking for Sequence_length node in encoder looks like:
        # Sequence_length -> CheckSeqLen -> Max -> Maximum -> Minimum

        check_seq_len = looking_for_op_in_list([
            port.node for port in encoder_sequence_lengths.out_port(
                0).get_destinations()
        ], 'Identity')
        max = looking_for_op_in_list([
            port.node for port in check_seq_len.out_port(0).get_destinations()
        ], 'ReduceMax')
        maximum = max.out_port(0).get_destinations()[0].node
        assert maximum.op == 'Maximum'
        minimum = maximum.out_port(0).get_destinations()[0].node
        assert minimum.op == 'Minimum'

        tensor_seq_len = looking_for_op_in_list([
            minimum.in_port(port).get_source().node
            for port in minimum.in_ports()
        ], 'StridedSlice')

        # Create node for multiplying seq_len by 2
        const = Const(graph, {
            'name': 'FakeSeqLenMultiplyer',
            'value': np.array(2)
        }).create_node()
        mul_op = Mul(graph, {'name': 'FakeSeqLen'}).create_node()

        const.out_port(0).get_connection().set_destination(mul_op.in_port(1))
        tensor_seq_len.out_port(0).get_connection().add_destination(
            mul_op.in_port(0))

        # Connect seq_len * 2 to TensorArray from GNMT loop
        ta_writes = [
            port.node
            for port in match['Identity_1'].out_port(0).get_destinations()
            if port.node.op == 'TensorArrayWriteV3'
        ]

        for ta_write in ta_writes:
            ta = ta_write.in_port(0).get_source().node.in_port(
                0).get_source().node

            ta.in_port(0).disconnect()
            ta.in_port(0).get_connection().set_source(mul_op.out_port(0))

        if not graph.graph['cmd_params'].static_shape:
            log.error(
                "Model can not be translated in a reshape-able way.\n"
                "Model Optimizer key static_shape was turned on to prevent related errors.\n"
                "There will be no success changing input shapes of the model with the help of "
                "InferenceEngine reshape method",
                extra={'is_warning': True})
            graph.graph['cmd_params'].static_shape = True
Esempio n. 28
0
    def find_and_replace_pattern(self, graph: Graph):
        # Iterate over all data nodes and find all with >= 1 consumers
        for input_data in list(graph.get_data_nodes()):
            # We don't use constant data nodes
            if input_data.value is not None:
                continue

            input_shape = np.array(input_data.shape)

            # Get all unique StridedSlice consumers
            out_nodes = [node for node in input_data.out_nodes() if node.op == 'StridedSlice' and node.in_node(0).name == input_data.name]
            sorted_out_nodes = sorted(out_nodes, key=lambda n: list(n.slices))
            out_nodes = unique_by(sorted_out_nodes, strided_slices_equality)
            if len(out_nodes) <= 1:
                continue

            valid_for_replacement = True

            for node in out_nodes:
                if len(node.slices) != len(out_nodes[0].slices):
                    valid_for_replacement = False

            # Detect dimension for splitting
            split_channel_dim = None
            for dim_id, s in enumerate(out_nodes[0].slices):
                l, r, stride = s.start, s.stop, s.step
                if l != 0 or r != input_shape[dim_id]:
                    if split_channel_dim is None:
                        split_channel_dim = dim_id
                    else:
                        valid_for_replacement = False

            if split_channel_dim is None:
                valid_for_replacement = False

            # split_dims contains tuples with split range and output data node
            split_dims = []
            for out_id, node in enumerate(out_nodes):
                # Check that StridedSlice op has stride eq 1 and splits only feature channel
                for id, s in enumerate(node.slices):
                    l, r, stride = s.start, s.stop, s.step
                    # We don't support StridedSlice with stride != 1
                    if stride != 1:
                        valid_for_replacement = False
                    if id == split_channel_dim:
                        split_dims.append((s.start, s.stop, node.out_node()))

            if not valid_for_replacement:
                continue

            # Check feature split intersection
            final_data_nodes_list = []
            sorted_split_dims = sorted(split_dims, key=lambda item: (item[0], item[1]))

            # check if we have similar StridedSlice operations with different outputs
            prev_sd = sorted_split_dims[0]
            to_remove = []
            for i in range(1, len(sorted_split_dims)):
                if sorted_split_dims[i][0] == prev_sd[0] and sorted_split_dims[i][1] == prev_sd[1] and sorted_split_dims[i][2].name != prev_sd[2].name:
                    cur_node = sorted_split_dims[i][2]
                    for out in cur_node.out_nodes():
                        attrs = deepcopy(graph.get_edge_data(cur_node.id, out.id)[0])
                        graph.remove_edge(cur_node.id, out.id)
                        graph.add_edge(prev_sd[2].id, out.id, **attrs)
                    to_remove.append(i)

            for ind in reversed(to_remove):
                sorted_split_dims.pop(ind)

            size_splits = []
            prev_r = 0
            for l, r, out in sorted_split_dims:
                # Split dims shouldn't intersect
                if l < prev_r:
                    valid_for_replacement = False
                prev_r = r

            if prev_r > input_shape[split_channel_dim]:
                valid_for_replacement = False

            if not valid_for_replacement:
                continue

            prev_r = 0
            for l, r, out in sorted_split_dims:
                # Save missing tensor part
                if l > prev_r:
                    shape = np.array(input_shape)
                    size_splits.append(l - prev_r)
                    shape[split_channel_dim] = l - prev_r
                    data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape})
                    add_opoutput(graph, data_node.id, 0, False)
                    final_data_nodes_list.append(data_node)

                prev_r = r
                size_splits.append(r - l)
                final_data_nodes_list.append(out)

            if prev_r < input_shape[split_channel_dim]:
                # Add last part of tensor
                shape = input_shape.copy()
                shape[split_channel_dim] = input_shape[split_channel_dim] - prev_r
                size_splits.append(input_shape[split_channel_dim] - prev_r)
                data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape})
                add_opoutput(graph, data_node.id, 0, False)
                final_data_nodes_list.append(data_node)

            for node in out_nodes:
                if not np.all([x == 0 for x in node.shrink_axis_mask]):
                    out_node = node.out_node()
                    if np.any(node['shrink_axis_mask']):
                        self.add_squeeze_for_shrink(graph, node)
                    if np.any(node['new_axis_mask']):
                        self.add_unsqueeze_for_new(graph, node)

                    for i in range(len(final_data_nodes_list)):
                        if final_data_nodes_list[i].name == out_node.name:
                            final_data_nodes_list[i] = node.out_node()
                            break

            # Insert Split layer and remove old StridedSlice layers
            # 1. Remove connections from input_data to StridedSlice ops
            out_data_nodes = []
            name_for_future_split = out_nodes[0].name
            for node in out_nodes:
                out_data_nodes.append(node.out_node())
                graph.remove_edge(input_data.id, node.id)
                graph.remove_edge(node.id, node.out_node().id)
                graph.remove_node(node.id)
                log.debug("Removed: {}".format(node.id))

            # 2. Create Split layer and reorder outputs
            name = name_for_future_split + "/Split"
            axis_const = Const(graph, {'value': int64_array(split_channel_dim),
                                       'name': name + '/Axis'}).create_node_with_data()
            size_splits_const = Const(graph, {'value': int64_array(size_splits),
                                              'name': name + '/Sizes'}).create_node_with_data()
            split = VariadicSplit(graph, dict(name=name, out_ports_count=len(size_splits)))

            split.create_node_with_data(inputs=[input_data, axis_const, size_splits_const],
                                        data_nodes=final_data_nodes_list)
Esempio n. 29
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['node']
        node_name = node.soft_get('name', node.id)

        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            # Third input represents output shape. Cutting its value according to scheme:
            # [N, C, spatial_dim_0, ..., spatial_dim_n] -> [spatial_dim_0, ..., spatial_dim_n]
            in_rank = node.in_port(0).data.get_shape().size

            shape_src = node.in_port(2).get_source()
            node.in_port(2).disconnect()

            ss_0 = create_op_with_const_inputs(
                graph, StridedSlice, {
                    1: np.array([2], dtype=np.int32),
                    2: np.array([in_rank], dtype=np.int32),
                    3: np.array([1], dtype=np.int32)
                }, {
                    'name': node_name + '/ss_0_port',
                    'begin_mask': np.array([1], dtype=np.int32),
                    'end_mask': np.array([0], dtype=np.int32),
                    'new_axis_mask': np.array([0], dtype=np.int32),
                    'shrink_axis_mask': np.array([0], dtype=np.int32),
                    'ellipsis_mask': np.array([0], dtype=np.int32)
                })

            shape_src.connect(ss_0.in_port(0))
            ss_0.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        elif node.has_valid('original_output_spatial_shape'):
            # node had fixed output spatial shape set in original framework, so we restore it here
            const = Const(
                graph, {
                    'value': int64_array(node.original_output_spatial_shape),
                    'name': node_name + '/original_spatial_shape'
                }).create_node()
            node.add_input_port(2, skip_if_exist=True)
            const.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        group = node.soft_get('group', 1)

        if group != 1:
            assert group > 1

            weights_shape = node.in_port(1).data.get_shape()
            assert weights_shape is not None
            I = node.in_port(0).data.get_shape()[1]
            assert I % group == 0
            assert node.output % group == 0

            new_shape = int64_array(
                [group, I / group, node.output / group, *weights_shape[2:]])

            assert np.prod(weights_shape) == np.prod(new_shape), \
                'Initial weights shape {}, grouped weights shape {}'.format(weights_shape, new_shape)
            reshape = create_op_node_with_second_input(
                graph, Reshape, int64_array(new_shape),
                {'override_output_shape': True},
                node.in_port(1).get_source().node)

            node.in_port(1).get_connection().set_source(reshape.out_port(0))

            node['type'] = 'GroupConvolutionBackpropData'
        else:
            node['type'] = 'ConvolutionBackpropData'
Esempio n. 30
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['op']
        pair_node = Node(graph, node.pair_name)

        if node.t >= 0:
            raise Error('Does not support IfDefined with t > 0')

        if node.in_port(0).get_source() is not None:
            input_port = node.in_port(0).get_source()
            op_output_id = node.out_port(0).get_destination().node.id
            out_port = pair_node.out_port(0)
            node_name = node.name
            pair_name = pair_node.name
        else:
            input_port = pair_node.in_port(0).get_source()
            op_output_id = pair_node.out_port(0).get_destination().node.id
            out_port = node.out_port(0)
            node_name = pair_node.name
            pair_name = node.name

        in_shape = input_port.data.get_shape()
        node_t = abs(node.t)

        init_value_memory_out = Const(
            graph, {
                'name': 'init_value_' + pair_name,
                'value': np.zeros(
                    int64_array([in_shape[0], in_shape[1] * node_t])),
                'shape': int64_array([in_shape[0], in_shape[1] * node_t])
            }).create_node()
        memory_out = ReadValue(graph, {
            'name': pair_name,
            'variable_id': node_name + pair_name
        }).create_node()
        init_value_memory_out.out_port(0).connect(memory_out.in_port(0))

        if node_t > 1:
            crop_concat = Crop(
                graph, {
                    'name': 'Memory_crop',
                    'dim': np.array([in_shape[1] * (node_t - 1)]),
                    'offset': np.array([in_shape[1]]),
                    'axis': np.array([1])
                }).create_node()
            memory_out.out_port(0).connect(crop_concat.in_port(0))
            concat = Concat(graph, {'name': 'Memory_concat'}).create_node()
            concat.add_sequence_of_ports('in', range(2))
            crop_concat.out_port(0).connect(concat.in_port(0))
            concat.in_port(1).connect(input_port)

            memory_in = Assign(graph, {
                'name': node_name,
                'variable_id': node_name + pair_name
            }).create_node()
            concat.out_port(0).connect(memory_in.in_port(0))
            out = Result(graph, {'name': 'Memory_output'}).create_node()
            memory_in.out_port(0).connect(out.in_port(0))

            crop_out = Crop(
                graph, {
                    'name': 'Memory_crop_out',
                    'dim': np.array([in_shape[1]]),
                    'offset': np.array([0]),
                    'axis': np.array([1])
                }).create_node()
            memory_out.out_port(0).connect(crop_out.in_port(0))
            out_port.get_connection().set_source(crop_out.out_port(0))
        else:
            memory_in = Assign(graph, {
                'name': node_name,
                'variable_id': node_name + pair_name
            }).create_node()
            memory_in.in_port(0).connect(input_port)
            out = Result(graph, {'name': 'Memory_output'}).create_node()
            memory_in.out_port(0).connect(out.in_port(0))
            out_port.get_connection().set_source(memory_out.out_port(0))

        graph.remove_node(op_output_id)
        graph.remove_node(node.id)
        graph.remove_node(pair_node.id)