def create_data_node(graph: Graph, op_node: Node, attrs: dict = None, edge_attrs: dict = None, out_port=0): assert op_node is not None and op_node.kind == 'op' assert out_port not in op_node.out_nodes() if attrs is None: attrs = {} data_node = graph.unique_id(op_node.id) default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None) default_attrs.update(attrs) graph.add_node(data_node, **add_attrs_props(default_attrs)) data_node = Node(graph, data_node) if edge_attrs is not None: graph.add_edges_from([(op_node.id, data_node.id, { 'out': out_port, **edge_attrs })]) else: graph.add_edges_from([(op_node.id, data_node.id, { 'out': out_port })]) return data_node
def update_node(self, node: Node, attrs: dict = None): """ Updates/creates new attributes in node based on self.attrs and attrs. """ new_attrs = {} new_attrs.update(self.attrs) if attrs: new_attrs.update(attrs) new_attrs = add_attrs_props(new_attrs) update_ie_fields(new_attrs, self.ir_version) self.substitute_ie_attrs(new_attrs) for k, v in new_attrs.items(): node[k] = v node.update_node()
def add_node(self, attrs: dict = None): new_attrs = {} new_attrs.update(self.attrs) if attrs is not None: new_attrs.update(attrs) id_prefix = new_attrs['name'] if 'name' in new_attrs else '' id = self.graph.unique_id(id_prefix) new_attrs['name'] = id new_attrs = add_attrs_props(new_attrs) update_ie_fields(new_attrs, self.ir_version) self.substitute_ie_attrs(new_attrs) self.graph.add_node(id, **new_attrs) node = Node(self.graph, id) return node
def _create_data_node(graph: Graph, name: str, attrs: dict = None): if attrs is None: attrs = {} data_node = graph.unique_id(name) default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None) default_attrs.update(attrs) graph.add_node(data_node, **add_attrs_props(default_attrs)) data_node = Node(graph, data_node) return data_node
def create_input_data_node(graph: Graph, name: str, value: np.array, attrs: dict = None): if attrs is None: attrs = {} data_node = graph.unique_id(name) default_attrs = dict(kind='data', name=data_node, value=mo_array(value), shape=mo_array(value.shape), data_type=None, infer=None) default_attrs.update(attrs) graph.add_node(data_node, **add_attrs_props(default_attrs)) return Node(graph, data_node)
def create_and_connect_input_data_node(graph: Graph, op_node: Node, attrs: dict = None, edge_attrs: dict = None): assert op_node is not None and op_node.kind == 'op' if attrs is None: attrs = {} if edge_attrs is None: edge_attrs = {} data_node = graph.unique_id(op_node.id) default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None) default_attrs.update(attrs) graph.add_node(data_node, **add_attrs_props(default_attrs)) data_node = Node(graph, data_node) op_node.add_input_port(edge_attrs['in'], skip_if_exist=True) graph.add_edges_from([(data_node.id, op_node.id, edge_attrs)]) return data_node
def muladd_to_scaleshift_action(graph: Graph, match: dict): mul = match['mul'] add = match['add'] output = match['output'] # Pass works correctly only in case when node have only 1 output if len(mul.out_port(0).get_destinations()) > 1: return if mul.soft_get('can_be_scaleshift') is False or add.soft_get('can_be_scaleshift') is False: return mul_weights_id = get_value_id(mul) mul_input_id = get_tensor_id(mul) add_weights_id = get_value_id(add) if mul_weights_id is None: log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(mul.name)) return if mul_input_id is None: log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(mul.name)) return if add_weights_id is None: log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(add.name)) return input = mul.in_node(mul_input_id) weights = mul.in_node(mul_weights_id) bias = add.in_node(add_weights_id) # Transform values weights.value = np.squeeze(weights.value) weights.shape = int64_array(weights.value.shape) bias.value = np.squeeze(bias.value) bias.shape = int64_array(bias.value.shape) # Broadcast weights if they are scalar if weights.value.ndim == 0 and bias.value.ndim == 1: weights.value = np.full(bias.shape, weights.value.item(), dtype=weights.value.dtype) weights.shape = int64_array(weights.value.shape) if bias.shape != weights.shape: log.warning('Mul->Add to ScaleShift conversion stopped {} != {}'.format(weights.shape, bias.shape)) return if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size: log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {} because of different weights " "and biases".format(mul.name, add.name)) return if bias.value.size == 1 and weights.value.size == 1: log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {}. Will be converted to Power" "".format(mul.name, add.name)) return op_name = "ScaleShift" log.debug("Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}" "".format(op_name, mul.id, add.id, bias.shape, weights.shape)) graph.remove_edge(input.node, mul.id) graph.remove_edge(weights.node, mul.id) graph.remove_edge(bias.node, add.id) graph.remove_edge(add.node, output.id) op_node = graph.unique_id(mul.name + '/Fused{}_'.format(op_name)) graph.add_node(op_node, **add_attrs_props(dict(kind='op', type=op_name, name=op_node, op=op_name, data_type=input.data_type))) scsh = Node(graph, op_node) scsh.add_input_port(0) scsh.add_input_port(1) scsh.add_input_port(2) scsh.add_output_port(0) update_ie_fields(graph.node[op_node]) graph.add_edges_from([ (input.node, op_node, {'in': 0}), (weights.node, op_node, {'in': 1, 'bin': 'weights'}), (bias.node, op_node, {'in': 2, 'bin': 'biases'}), (op_node, output.node, {'out': 0}) ]) return
def create_node_with_data(self, inputs: list = None, attrs: dict = None, data_nodes: [Node, np.ndarray, list] = None, edge_attrs: list = None): """ Creates a new node with given inputs and attrs and also creates data node that holds the op output value. Inputs should be data nodes (not op nodes). Work for ops with a single output port only. Edge attributes in edge_attrs go in order of items in 'inputs' """ if inputs is None: inputs = [] if attrs is None: attrs = {} # No need to extract port, because input node should be a data node, # so there is no choice. new_op_node = self.add_node(attrs) # TODO Preserve debug information inputs_with_edge_attrs = [] for i, inp in enumerate(inputs): if inp is None: continue edge_attr = {'in': i} if edge_attrs is not None and i < len(edge_attrs): edge_attr.update(edge_attrs[i]) inputs_with_edge_attrs.append((inp.id, new_op_node.id, edge_attr)) new_op_node.add_input_port(i, skip_if_exist=True) self.graph.add_edges_from(inputs_with_edge_attrs) # TODO: Extend to the case when multiple output ports old_data_value = [None] old_data_shape = [None] if data_nodes is None: data_node = self.graph.unique_id() self.graph.add_node( data_node, **add_attrs_props( dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None))) data_nodes = [Node(self.graph, data_node)] else: if type(data_nodes) not in [list, np.ndarray]: data_nodes = [data_nodes] old_data_value = [ data_node.value.copy() if data_node.has_valid('value') else None for data_node in data_nodes ] old_data_shape = [ data_node.shape.copy() if data_node.has_valid('shape') else None for data_node in data_nodes ] for id, data_node in enumerate(data_nodes): self.graph.add_edges_from([(new_op_node.id, data_node.id, { 'out': id })]) if new_op_node.has_valid('infer'): if log.getLogger().isEnabledFor(log.DEBUG): log.debug( 'Start running infer function for individual op node with attributes: {}' ''.format(str(new_op_node))) new_op_node.infer(new_op_node) if new_op_node.has('nchw_layout'): for out_node in new_op_node.out_nodes().values(): out_node['nchw_layout'] = new_op_node.nchw_layout assert all( old_value is None for old_value in old_data_value) or all([ strict_compare_tensors(old_data_value[id], data_node.value) for id, data_node in enumerate(data_nodes) ]) assert all(old_shape is None for old_shape in old_data_shape) or all( [strict_compare_tensors(old_data_shape[id], data_node.shape) for id, data_node in enumerate(data_nodes)]), \ "After re-inference of {} node, old and new shapes do not match. Old shapes: {}, new shapes: {}." \ "".format(new_op_node.soft_get('name'), [old_data_shape[id] for id in range(len(data_nodes))], [data_node.shape for data_node in data_nodes]) for data_node in data_nodes: if log.getLogger().isEnabledFor(log.DEBUG): log.debug( 'Finished running infer function, data nodes attributes: {}' .format(data_node)) return data_nodes[0] if len(data_nodes) == 1 else data_nodes