Exemple #1
0
    def _create_data_node(graph: Graph, name: str, attrs: dict = None):
        if attrs is None:
            attrs = {}

        data_node = graph.unique_id(name)
        default_attrs = dict(kind='data',
                             name=data_node,
                             value=None,
                             shape=None,
                             data_type=None,
                             infer=None)
        default_attrs.update(attrs)
        graph.add_node(data_node, **add_attrs_props(default_attrs))
        data_node = Node(graph, data_node)
        return data_node
Exemple #2
0
    def add_node(self, attrs: dict = None):
        new_attrs = {}
        new_attrs.update(self.attrs)
        if attrs is not None:
            new_attrs.update(attrs)
        id_prefix = new_attrs['name'] if 'name' in new_attrs else ''
        id = self.graph.unique_id(id_prefix)
        new_attrs['name'] = id
        new_attrs = add_attrs_props(new_attrs)
        update_ie_fields(new_attrs, self.ir_version)
        self.substitute_ie_attrs(new_attrs)
        self.graph.add_node(id, **new_attrs)

        node = Node(self.graph, id)
        return node
Exemple #3
0
 def create_input_data_node(graph: Graph,
                            name: str,
                            value: np.array,
                            attrs: dict = None):
     if attrs is None:
         attrs = {}
     data_node = graph.unique_id(name)
     default_attrs = dict(kind='data',
                          name=data_node,
                          value=np.array(value),
                          shape=np.array(value.shape),
                          data_type=None,
                          infer=None)
     default_attrs.update(attrs)
     graph.add_node(data_node, **add_attrs_props(default_attrs))
     return Node(graph, data_node)
Exemple #4
0
    def create_data_node(graph: Graph, op_node: Node, attrs: dict = None, edge_attrs: dict = None, out_port=0):
        assert op_node is not None and op_node.kind == 'op'
        assert out_port not in op_node.out_nodes()

        if attrs is None:
            attrs = {}

        data_node = graph.unique_id(op_node.id)
        default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None)
        default_attrs.update(attrs)
        graph.add_node(data_node, **add_attrs_props(default_attrs))
        data_node = Node(graph, data_node)
        if edge_attrs is not None:
            graph.add_edges_from([(op_node.id, data_node.id, {'out': out_port, **edge_attrs})])
        else:
            graph.add_edges_from([(op_node.id, data_node.id, {'out': out_port})])
        return data_node
Exemple #5
0
    def _create_data_node(graph: nx.MultiDiGraph,
                          name: str,
                          attrs: dict = None):
        if attrs is None:
            attrs = {}

        data_node = unique_id(graph, name)
        defaul_attrs = dict(kind='data',
                            precision="FP32",
                            name=data_node,
                            value=None,
                            shape=None,
                            data_type=None,
                            infer=None)
        defaul_attrs.update(attrs)
        graph.add_node(data_node, **add_attrs_props(defaul_attrs))
        data_node = Node(graph, data_node)
        return data_node
Exemple #6
0
    def create_and_connect_input_data_node(graph: Graph,
                                           op_node: Node,
                                           attrs: dict = None,
                                           edge_attrs: dict = None):
        assert op_node is not None and op_node.kind == 'op'
        if attrs is None:
            attrs = {}
        if edge_attrs is None:
            edge_attrs = {}

        data_node = graph.unique_id(op_node.id)
        default_attrs = dict(kind='data',
                             name=data_node,
                             value=None,
                             shape=None,
                             data_type=None,
                             infer=None)
        default_attrs.update(attrs)
        graph.add_node(data_node, **add_attrs_props(default_attrs))
        data_node = Node(graph, data_node)
        op_node.add_input_port(edge_attrs['in'], skip_if_exist=True)
        graph.add_edges_from([(data_node.id, op_node.id, edge_attrs)])
        return data_node
Exemple #7
0
    def create_and_connect_input_data_node(graph: nx.MultiDiGraph,
                                           op_node: Node,
                                           attrs: dict = None,
                                           edge_attrs: dict = None):
        assert op_node is not None and op_node.kind == 'op'
        if attrs is None:
            attrs = {}
        if edge_attrs is None:
            edge_attrs = {}

        data_node = unique_id(graph, op_node.id)
        defaul_attrs = dict(kind='data',
                            precision="FP32",
                            name=data_node,
                            value=None,
                            shape=None,
                            data_type=None,
                            infer=None)
        defaul_attrs.update(attrs)
        graph.add_node(data_node, **add_attrs_props(defaul_attrs))
        data_node = Node(graph, data_node)
        graph.add_edges_from([(data_node.id, op_node.id, edge_attrs)])
        return data_node
Exemple #8
0
    def replace_pattern(self, graph: Graph, match: dict):
        """
        Converts specific for NasNet topology subgraph Pad->StridedSlice->AvgPool to Conv->Crop->AvgPool
        """
        input = match['input']

        pad_op = match['pad_op']

        sslice = match['sslice']
        sslice_out = match['sslice_out']
        begin = []
        end = []
        stride = []
        for s in sslice.slices:
            begin.append(s.start)
            end.append(s.stop)
            stride.append(s.step)

        if not np.array_equal(pad_op.pads,
                              np.array([[0, 0], [0, 1], [0, 1], [0, 0]])):
            log.error(" Pad values doesn't match!")
            return

        if not np.array_equal(begin, np.array([0, 1, 1, 0])):
            log.error("StridedSlice has wrong begin")
            return

        if not np.array_equal(sslice.end_mask, np.array(
            [0, 0, 0, 0])) or not np.array_equal(sslice.begin_mask,
                                                 np.array([0, 1, 1, 0])):
            log.error("StridedSlice has wrong masks")
            return

        # Cut Smth-x->Pad->StrudedSlice-x->AvgPool
        graph.remove_edge(input.id, pad_op.id)
        graph.remove_edge(sslice.id, sslice_out.id)

        # Pad -> Conv
        conv_node = graph.unique_id(pad_op.name + '/Conv_')
        conv_weights_node = graph.unique_id(pad_op.name + '/ConvW_')
        conv_weights = np.ones((input.shape[3], 1, 1, 1))
        conv_output = graph.unique_id(pad_op.name + '/ConvOut_')
        output_shape = np.array([
            input.shape[0], input.shape[1] + 1, input.shape[2] + 1,
            input.shape[3]
        ])

        graph.add_node(
            conv_node,
            **add_attrs_props(
                dict(kind='op',
                     type='Convolution',
                     name=conv_node,
                     op='Conv2D',
                     stride=np.array([1, 1, 1, 1]),
                     dilation=np.array([1, 1, 1, 1]),
                     group=input.shape[3],
                     bias_addable=True,
                     bias_term=False,
                     spatial_dims=np.array([1, 2]),
                     kernel_spatial=np.array([1, 1]),
                     pad=np.array([[0, 0], [0, 1], [0, 1], [0, 0]]),
                     output_shape=output_shape,
                     channel_dims=np.array([3]),
                     output=input.shape[3],
                     in_ports_count=3,
                     out_ports_count=1)))

        graph.add_node(
            conv_weights_node,
            **add_attrs_props(
                dict(kind='data',
                     name=conv_weights_node,
                     value=np.array(conv_weights),
                     shape=np.array(conv_weights.shape),
                     data_type=input.data_type,
                     infer=None,
                     spatial_dims=np.array([0, 1]),
                     input_channel_dim=2,
                     output_channel_dim=3,
                     dims_number=4,
                     can_be_bias=True)))
        graph.add_node(
            conv_output,
            **add_attrs_props(
                dict(kind='data',
                     name=conv_output,
                     value=None,
                     shape=output_shape,
                     data_type=input.data_type)))

        # StridedSlice -> Crop
        crop = Crop(
            graph,
            dict(name=sslice.name + '/Crop_',
                 axis=np.array([1, 2]),
                 dim=np.array([output_shape[1] - 1, output_shape[2] - 1]),
                 offset=np.array([1, 1])))
        crop.create_node_with_data([Node(graph, conv_output)],
                                   data_nodes=sslice_out)

        # Connect : Conv->Crop->AvgPool
        graph.add_edges_from([
            (input.id, conv_node, {
                'in': 0
            }),
            (conv_weights_node, conv_node, {
                'in': 1,
                'bin': 'weights'
            }),
            (conv_node, conv_output, {
                'out': 0
            }),
        ])
        update_ie_fields(graph.node[conv_node], graph.graph['ir_version'])
Exemple #9
0
    def create_node_with_data(self,
                              inputs: list = None,
                              attrs: dict = None,
                              data_nodes: [Node, np.ndarray, list] = None,
                              edge_attrs: list = None):
        """
        Creates a new node with given inputs and attrs and also creates data node that
        holds the op output value. Inputs should be data nodes (not op nodes).
        Work for ops with a single output port only.
        Edge attributes in edge_attrs go in order of items in 'inputs'
        """
        if inputs is None:
            inputs = []
        if attrs is None:
            attrs = {}
        # No need to extract port, because input node should be a data node,
        # so there is no choice.
        new_op_node = self.add_node(attrs)

        # TODO Preserve debug information
        inputs_with_edge_attrs = []
        for i, inp in enumerate(inputs):
            if inp is None:
                continue
            edge_attr = {'in': i}
            if edge_attrs is not None and i < len(edge_attrs):
                edge_attr.update(edge_attrs[i])
            inputs_with_edge_attrs.append((inp.id, new_op_node.id, edge_attr))
            new_op_node.add_input_port(i, skip_if_exist=True)

        self.graph.add_edges_from(inputs_with_edge_attrs)

        # TODO: Extend to the case when multiple output ports
        old_data_value = [None]
        old_data_shape = [None]
        if data_nodes is None:
            data_node = self.graph.unique_id()
            self.graph.add_node(
                data_node,
                **add_attrs_props(
                    dict(kind='data',
                         name=data_node,
                         value=None,
                         shape=None,
                         data_type=None,
                         infer=None)))
            data_nodes = [Node(self.graph, data_node)]
        else:
            if type(data_nodes) not in [list, np.ndarray]:
                data_nodes = [data_nodes]
            old_data_value = [
                data_node.value.copy()
                if data_node.has_valid('value') else None
                for data_node in data_nodes
            ]
            old_data_shape = [
                data_node.shape.copy()
                if data_node.has_valid('shape') else None
                for data_node in data_nodes
            ]
        for id, data_node in enumerate(data_nodes):
            self.graph.add_edges_from([(new_op_node.id, data_node.id, {
                'out': id
            })])

        if new_op_node.has_valid('infer'):
            if log.getLogger().isEnabledFor(log.DEBUG):
                log.debug(
                    'Start running infer function for individual op node with attributes: {}'
                    ''.format(str(new_op_node)))
            new_op_node.infer(new_op_node)
            if new_op_node.has('nchw_layout'):
                for out_node in new_op_node.out_nodes().values():
                    out_node['nchw_layout'] = new_op_node.nchw_layout
            assert all(
                old_value is None for old_value in old_data_value) or all([
                    strict_compare_tensors(old_data_value[id], data_node.value)
                    for id, data_node in enumerate(data_nodes)
                ])
            assert all(old_shape is None for old_shape in old_data_shape) or all(
                [strict_compare_tensors(old_data_shape[id], data_node.shape)
                 for id, data_node in enumerate(data_nodes)]), \
                "After re-inference of {} node, old and new shapes do not match. Old shapes: {}, new shapes: {}." \
                "".format(new_op_node.soft_get('name'), [old_data_shape[id] for id in range(len(data_nodes))],
                          [data_node.shape for data_node in data_nodes])
            for data_node in data_nodes:
                if log.getLogger().isEnabledFor(log.DEBUG):
                    log.debug(
                        'Finished running infer function, data nodes attributes: {}'
                        .format(data_node))
        return data_nodes[0] if len(data_nodes) == 1 else data_nodes
Exemple #10
0
def muladd_to_scaleshift_action(graph: Graph, match: dict):
    mul = match['mul']
    add = match['add']
    output = match['output']

    # Pass works correctly only in case when node have only 1 output
    if len(mul.out_port(0).get_destinations()) > 1:
        return

    if mul.soft_get('can_be_scaleshift') is False or add.soft_get(
            'can_be_scaleshift') is False:
        return

    mul_weights_id = get_value_id(mul)
    mul_input_id = get_tensor_id(mul)
    add_weights_id = get_value_id(add)

    if mul_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(
            mul.name))
        return
    if mul_input_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(
            mul.name))
        return
    if add_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(
            add.name))
        return

    input = mul.in_node(mul_input_id)
    weights = mul.in_node(mul_weights_id)
    bias = add.in_node(add_weights_id)

    # Transform values
    weights.value = np.squeeze(weights.value)
    weights.shape = np.array(weights.value.shape, dtype=np.int64)

    bias.value = np.squeeze(bias.value)
    bias.shape = np.array(bias.value.shape, dtype=np.int64)

    # Broadcast weights if they are scalar
    if weights.value.ndim == 0 and bias.value.ndim == 1:
        weights.value = np.full(bias.shape, weights.value.item())
        weights.shape = np.array(weights.value.shape, dtype=np.int64)

    if bias.shape != weights.shape:
        log.warning('Mul->Add to ScaleShift conversion stoped {} != {}'.format(
            weights.shape, bias.shape))
        return

    if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size:
        log.debug(
            "Skipping Mul->Add to ScaleShift conversion for nodes {}, {} because of different weights "
            "and biases".format(mul.name, add.name))
        return

    if bias.value.size == 1 and weights.value.size == 1:
        log.debug(
            "Skipping Mul->Add to ScaleShift conversion for nodes {}, {}. Will be converted to Power"
            "".format(mul.name, add.name))
        return

    op_name = "ScaleShift"

    log.debug(
        "Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}"
        "".format(op_name, mul.id, add.id, bias.shape, weights.shape))

    graph.remove_edge(input.node, mul.id)
    graph.remove_edge(weights.node, mul.id)
    graph.remove_edge(bias.node, add.id)
    graph.remove_edge(add.node, output.id)

    op_node = graph.unique_id(mul.name + '/Fused{}_'.format(op_name))

    graph.add_node(
        op_node,
        **add_attrs_props(
            dict(kind='op',
                 type=op_name,
                 name=op_node,
                 op=op_name,
                 data_type=input.data_type)))
    scsh = Node(graph, op_node)
    scsh.add_input_port(0)
    scsh.add_input_port(1)
    scsh.add_input_port(2)
    scsh.add_output_port(0)

    update_ie_fields(graph.node[op_node])

    graph.add_edges_from([(input.node, op_node, {
        'in': 0
    }), (weights.node, op_node, {
        'in': 1,
        'bin': 'weights'
    }), (bias.node, op_node, {
        'in': 2,
        'bin': 'biases'
    }), (op_node, output.node, {
        'out': 0
    })])

    return
Exemple #11
0
def _fuse_add(graph: nx.MultiDiGraph,
              node: Node,
              fuse_nodes: list,
              backward: bool = True):
    """
    This function takes Add node and Convolution/FC nodes for further fusion and then deletes Add node
    In case if Convolution/FC Bias absence it will be created
    """
    is_fused = False
    const_id, tensor_id = get_value_id(node), get_tensor_id(node)

    if const_id is None or tensor_id is None:
        log.warning(
            'Cannot do fuse_add for node {} because this node has wrong inputs'
            .format(node.id))
        return False

    # if len(node.in_node(const_id).shape) > 2 or any([x == 0 for x in node.in_node(const_id).shape]):
    #     log.warning('Cannot do fuse_add for node {} because this node has wrong shape'.format(node.id))
    #     return False

    for fuse_node in fuse_nodes:
        if fuse_node.soft_get('can_be_fused') == False:
            log.warning(
                'Node {} can\'t be used in fusing due to user specified attr can_be_fused = False'
                .format(fuse_node.id))
            return False
        if not fuse_node.has_valid('layout'):
            log.warning('Node {} has no layout attr'.format(fuse_node.id))
            return False
        if len(fuse_node.in_nodes()) < 2:
            log.warning('Node {} has no weights node'.format(fuse_node.id))
            return False

    for fuse_node in fuse_nodes:
        value = np.array(node.in_node(const_id).value)

        # If forward, broadcast value
        if not backward:
            cnt = fuse_node.in_node(1).shape[-1] / node.in_node(
                const_id).shape[0]
            if fuse_node.layout == 'NCHW':
                tmp = []
                for val in value:
                    tmp = np.concatenate((tmp, np.repeat(val, cnt)))
                value = np.array(tmp)
            else:
                value = np.tile(value, int(cnt))

        value = np.squeeze(value)

        # Create BIAS data node if not exists
        if len(fuse_node.in_nodes()) <= 2:
            bias_data = unique_id(graph, "bias_data")
            data_type = fuse_node.in_node(1).data_type
            # Broadcast if scalar
            if value.size == 1:
                id = fuse_node.in_node(
                    1).output_channel_dim if backward else fuse_node.in_node(
                        1).input_channel_dim
                vshape = fuse_node.in_node(1).shape[id]
                value = np.full(vshape, value.item())

            if not backward:
                value = np.dot(fuse_node.in_node(1).value, value)

            shape = value.shape

            graph.add_node(
                bias_data,
                **add_attrs_props(
                    dict(kind='data',
                         precision="FP32",
                         name=bias_data,
                         value=value,
                         shape=shape,
                         data_type=data_type)))
            graph.add_edges_from([(bias_data, fuse_node.id, {
                'in': 2,
                'bin': 'biases'
            })])
            fuse_node['bias_term'] = True
        else:
            if not backward:
                fuse_node.in_node(2).value += np.dot(
                    fuse_node.in_node(1).value, value)
            else:
                fuse_node.in_node(2).value += value

        log.debug('Fused: {} to {}'.format(node.name, fuse_node.name))
        is_fused = True

    if is_fused:
        # Delete Add node
        out_node = node.out_node()
        op_data_node = node.in_node(tensor_id)
        op_const_node = node.in_node(const_id)
        op_node = op_data_node.in_node(0)
        graph.remove_edge(node.id, out_node.id)
        graph.remove_edge(op_node.id, op_data_node.id)
        graph.remove_edge(op_const_node.id, node.id)
        # Connect nodes after deleting
        graph.add_edge(op_node.id, out_node.id, out=0)
        for idx in reversed(range(len(op_data_node.out_nodes()))):
            out_data = op_data_node.out_nodes()[idx]
            edge_attrs = graph.get_edge_data(op_data_node.id, out_data.id)[0]
            if not out_data.id is node.id:
                graph.remove_edge(op_data_node.id, out_data.id)
                graph.add_edges_from([(out_node.id, out_data.id, edge_attrs)])

    return is_fused
Exemple #12
0
def muladd_to_scaleshift_action(graph: nx.MultiDiGraph, match: dict):
    mul = match['mul']
    add = match['add']
    output = match['output']

    if mul.soft_get('can_be_scaleshift') is False or add.soft_get(
            'can_be_scaleshift') is False:
        return

    mul_weights_id = get_value_id(mul)
    mul_input_id = get_tensor_id(mul)
    add_weights_id = get_value_id(add)

    if mul_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(
            mul.name))
        return
    if mul_input_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(
            mul.name))
        return
    if add_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(
            add.name))
        return

    input = mul.in_node(mul_input_id)
    weights = mul.in_node(mul_weights_id)
    bias = add.in_node(add_weights_id)

    # Transform values
    weights.value = np.squeeze(weights.value)
    weights.shape = weights.value.shape

    bias.value = np.squeeze(bias.value)
    bias.shape = bias.value.shape

    # Broadcast weights if they are scalar
    if weights.value.ndim == 0 and bias.value.ndim == 1:
        weights.value = np.full(bias.shape, weights.value.item())
        weights.shape = weights.value.shape

    if bias.shape != weights.shape:
        log.warning('Mul->Add to ScaleShift conversion stoped {} != {}'.format(
            weights.shape, bias.shape))
        return

    if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size:
        log.debug(
            "Skipping Mul->Add to scaleshift or power conversion for nodes {}, {} because of different weights "
            "and biases".format(mul.name, add.name))
        return

    op_name = "ScaleShift"
    if bias.value.size == 1 and weights.value.size == 1:
        op_name = "Power"

    log.debug(
        "Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}"
        "".format(op_name, mul.id, add.id, bias.shape, weights.shape))

    graph.remove_edge(input.node, mul.id)
    graph.remove_edge(weights.node, mul.id)
    graph.remove_edge(bias.node, add.id)
    graph.remove_edge(add.node, output.id)

    op_node = unique_id(graph, mul.name + '/Fused{}_'.format(op_name))
    if op_name == 'ScaleShift':
        graph.add_node(
            op_node,
            **add_attrs_props(
                dict(kind='op',
                     precision="FP32",
                     type=op_name,
                     name=op_node,
                     op=op_name,
                     data_type=input.data_type)))
        update_ie_fields(graph.node[op_node])
        graph.add_edges_from([(input.node, op_node, {
            'in': 0
        }), (weights.node, op_node, {
            'in': 1,
            'bin': 'weights'
        }), (bias.node, op_node, {
            'in': 2,
            'bin': 'biases'
        }), (op_node, output.node, {
            'out': 0
        })])
    else:
        graph.add_node(
            op_node,
            **add_attrs_props(
                dict(kind='op',
                     precision="FP32",
                     type=op_name,
                     name=op_node,
                     op=op_name,
                     data_type=input.data_type,
                     power=1,
                     scale=weights.value.item(),
                     shift=bias.value.item())))
        update_ie_fields(graph.node[op_node])
        graph.add_edges_from([(input.node, op_node, {
            'in': 0
        }), (op_node, output.node, {
            'out': 0
        })])

    return