def process_transforms(self, light_graph, prune=True):
        """
        Returns a new LightGraph object which is the result of performing
        the transormations returned from self.get_transforms(light_graph)
        on the given light_graph
        """
        transforms = self.concat_transforms(self.get_transforms(light_graph))

        # Remember light_graph is immutable, so we will mutate nodes,
        # input_edges, output_edges, then create a new light_graph
        nodes = light_graph.nodes()
        input_edges = light_graph.input_edges()
        output_edges = light_graph.output_edges()
        output_node_names = light_graph.output_node_names()
        meta_graph_info = self.get_meta_graph_info(
            light_graph.meta_graph_info())

        self._add_nodes(transforms.to_add, nodes, input_edges, output_edges)
        self._replace_nodes(transforms.to_replace, nodes, input_edges,
                            output_edges)
        self._reroute_nodes(transforms.to_reroute, nodes, input_edges,
                            output_edges)
        self._output_swap(transforms.to_output_swap, output_node_names)

        # Create transformed graph and prune it
        transformed_graph = lgf_graph.LightGraph(
            nodes,
            input_edges=input_edges,
            output_edges=output_edges,
            output_node_names=output_node_names,
            meta_graph_info=meta_graph_info)
        if prune:
            transformed_graph = transformed_graph.prune_graph()

        return transformed_graph
    def sv_max_graph(inp_shape=(1, 100),
                     inp_dtype_t=dtypes_pb2.DT_QINT,
                     inp_dtype_p=8,
                     num_nodes=1):
        inp1 = lgf_pb2.EdgeInfo()
        inp1.name = "inp_ten1"
        inp1.port = 0
        inp1.dtype.t = inp_dtype_t
        inp1.dtype.p = inp_dtype_p
        inp1.shape.d.extend(inp_shape)

        nodes = []
        last_edge = inp1
        for i in range(num_nodes):
            outp = lgf_pb2.EdgeInfo()
            outp.CopyFrom(inp1)
            outp.name = "out_ten_{0}".format(i)

            n = lgf_pb2.LNF()
            n.name = outp.name
            n.sv_max.SetInParent()
            n.inputs.add().CopyFrom(last_edge)
            n.outputs.add().CopyFrom(outp)
            n.sv_max.scalar = 64
            n.supported = True
            last_edge = n.outputs[0]
            nodes.append(n)

        return lgf_graph.LightGraph(nodes,
                                    input_edges=[inp1],
                                    output_edges=[last_edge])
Example #3
0
    def _get_phases_and_dequant_scales(self, new_opu_node, light_graph,
                                       phasify_node, phasify_subgraph_nodes,
                                       transform_result):
        # Create a subgraph
        subgraph = lgf_graph.LightGraph(phasify_subgraph_nodes,
                                        output_edges=phasify_node.outputs)

        # Run the graph
        subgraph_inputs = utils.create_inference_inputs([], [])
        runner = graph_runner.GraphRunner(subgraph, self._hw_specs,
                                          self._sw_config, self._sim_params)
        out_inf = runner.run_single_batch(subgraph_inputs)

        # Get numpy arrays
        phasify_output_arrays = [
            utils.tensor_pb_to_array(named_tensor.data, np.float32)
            for named_tensor in out_inf.results
        ]

        # Get the adc scales node
        adc_scales_node = light_graph.get_node_by_name(phasify_node.inputs[
            lgf_pb2.PhasifyNode.ADC_SCALES_INPUT_INDEX].name)

        # Create constant nodes
        phases_node = self.create_const_node(
            phasify_output_arrays[lgf_pb2.PhasifyNode.PHASES_OUTPUT_INDEX],
            new_opu_node.name + "_phases",
            new_opu_node.inputs[lgf_pb2.MatMulNode.PHASES_INDEX].dtype,
            lgf_pb2.ConstNode.WEIGHTS)
        phases_node.const.weight_rows = self._get_weight_rows(
            light_graph, phasify_node)
        if not self._sw_config.disable_block_sparsity:
            self._add_block_sparsity(
                phases_node,
                phasify_output_arrays[lgf_pb2.PhasifyNode.PHASES_OUTPUT_INDEX])
        dequant_scales_node = self.create_const_node(
            phasify_output_arrays[
                lgf_pb2.PhasifyNode.DEQUANT_SCALES_OUTPUT_INDEX],
            new_opu_node.name + "_dequant_scales",
            new_opu_node.inputs[lgf_pb2.MatMulNode.DEQUANT_SCALES_INDEX].dtype,
            lgf_pb2.ConstNode.DEQUANT_SCALE)
        new_adc_scales_node = self.create_const_node(
            phasify_output_arrays[lgf_pb2.PhasifyNode.ADC_SCALES_OUTPUT_INDEX],
            adc_scales_node.name,
            new_opu_node.inputs[lgf_pb2.MatMulNode.ADC_SCALES_INDEX].dtype,
            adc_scales_node.const.const_type)

        # Update the new opu node inputs
        new_opu_node.inputs[lgf_pb2.MatMulNode.PHASES_INDEX].CopyFrom(
            phases_node.outputs[0])
        new_opu_node.inputs[lgf_pb2.MatMulNode.DEQUANT_SCALES_INDEX].CopyFrom(
            dequant_scales_node.outputs[0])
        new_opu_node.inputs[lgf_pb2.MatMulNode.ADC_SCALES_INDEX].CopyFrom(
            new_adc_scales_node.outputs[0])

        transform_result.CopyFrom(
            self.create_transform_result(
                to_add=[phases_node, dequant_scales_node],
                to_replace=[new_adc_scales_node]))
Example #4
0
    def _get_fetches(self, sess):
        # Get the subgraph of nodes necessary to run the output ops
        output_node_names = [
            self.get_node_name_and_output_index(ten_name)[0]
            for ten_name in self._output_tensor_names
        ]
        subgraph = tf.graph_util.extract_sub_graph(sess.graph_def,
                                                   output_node_names)
        subgraph_node_names = {n.name for n in subgraph.node}

        # Get fetches
        fetches = set()
        if self._input_tensor_names:
            fetches = fetches.union(set(self._input_tensor_names))
        if self._output_tensor_names:
            fetches = fetches.union(set(self._output_tensor_names))
        for op in sess.graph.get_operations():
            # Skip ops that the output does not depend on
            if op.node_def.name not in subgraph_node_names:
                continue

            # Go through inputs of the op, TF seems to ignore output tensors of
            # an op if no other node in the graph asks for that tensor
            op_lnf = self._init_lnf_from_tf_node_def(op.node_def)
            ignore_op = self._ignore_nodes_filter.matches(
                op_lnf, lgf_graph.LightGraph([op_lnf]))

            for inp in op.inputs:
                # Skip constant tensors
                if (inp.op.type == "Const"):
                    continue

                # If we have (ignore) --> (ignore), we can skip
                inp_lnf = self._init_lnf_from_tf_node_def(inp.op.node_def)
                ignore_inp = self._ignore_nodes_filter.matches(
                    inp_lnf, lgf_graph.LightGraph([inp_lnf]))
                if ignore_op and ignore_inp:
                    continue

                # Otherwise, we will need the tensor because we need
                # inputs and outputs of all (not ignore) nodes. Cases here are
                # (not ignore) --> (not ignore), (ignore) --> (not ignore),
                # (not ignore) --> (ignore)
                fetches.add(inp.name)

        return list(fetches)
    def _subgraph_nodes_to_subgraph(self, subgraph_nodes, light_graph):
        """
        Params:
            subgraph_nodes: a list of nodes that form a subgraph of supported nodes
            light_graph: original light graph the subgraph_nodes were extracted from

        Returns:
            subgraph: a LightGraph object for the nodes in subgraph_nodes
        """
        subgraph_node_names = {n.name for n in subgraph_nodes}
        original_output_node_names = set(light_graph.output_node_names())
        light_graph_output_edges = light_graph.output_edges()
        # Create a list of edges that nodes outside the subgraph needs.
        # Might contain duplicates
        inputs_for_other_nodes = []
        for node in light_graph.nodes():
            if node.name not in subgraph_node_names:
                inputs_for_other_nodes.extend(node.inputs)

        # Get the inputs and outputs of the subgraph
        input_edges = []
        output_edges = []
        output_node_names = []
        control_inputs = set()

        for node in subgraph_nodes:
            # If a node's inputs need an edge that is not found in the subgraph, it
            # must be an input to the subgraph
            for e in node.inputs:
                if (e.name not in subgraph_node_names
                        and not (self._edge_in_list(e, input_edges))):
                    input_edges.append(e)

            # If a node has a control input that is not found in the subgraph, it
            # must be a control input to the subgraph
            for inp_name in node.control_inputs:
                if inp_name not in subgraph_node_names:
                    control_inputs.add(inp_name)

            # If a node's outputs have an edge that is an output of light_graph or
            # an edge that a node outside the subgraph needs, then it must be an output
            # of the subgraph
            for e in node.outputs:
                if (self._edge_in_list(e, light_graph_output_edges)
                        or self._edge_in_list(e, inputs_for_other_nodes)):
                    if not self._edge_in_list(e, output_edges):
                        output_edges.append(e)

            # If a node was an output node in the original graph, then it must
            # also be an output node of the subgraph
            if node.name in original_output_node_names:
                output_node_names.append(node.name)

        subgraph = lgf_graph.LightGraph(subgraph_nodes,
                                        input_edges=input_edges,
                                        output_edges=output_edges,
                                        output_node_names=output_node_names)
        return subgraph, control_inputs
    def as_light_graph(self):
        self._max_ports = {}
        self._variable_values = {}
        self._tensor_shapes = {}
        self._tensor_dtypes = {}

        onnx_model = onnx.load(self._graph_path)
        sess = onnxruntime.InferenceSession(onnx_model.SerializeToString())
        onnx_model = self.change_onnx_names_to_lnf_standard(onnx_model)
        # Run shape inference on the graph and map the respective edge name to shape
        inferred_model = shape_inference.infer_shapes(onnx_model)

        # Add graph input's & output's shape and dim to the dictionary
        for value in onnx_model.graph.input:
            self.add_shape_dtype_from_given_valueinfo_tensor(value)

        for value in onnx_model.graph.output:
            self.add_shape_dtype_from_given_valueinfo_tensor(value)

        # Add graph variables' & constant's shape and dim to dict
        for value in inferred_model.graph.value_info:
            self.add_shape_dtype_from_given_valueinfo_tensor(value)

        graph_nodes = [
            self.onnx_node_to_lnf(onnx_node)
            for onnx_node in onnx_model.graph.node
        ]

        graph_inputs = [
            self.onnx_edge_to_edge_info(onnx_graph_input_edge.name)
            for onnx_graph_input_edge in sess.get_inputs()
        ]
        graph_outputs = [
            self.onnx_edge_to_edge_info(onnx_graph_output_edge.name)
            for onnx_graph_output_edge in sess.get_outputs()
        ]

        graph_output_node_names = [
            output_node.name for output_node in sess.get_outputs()
        ]
        return lgf_graph.LightGraph(graph_nodes,
                                    input_edges=graph_inputs,
                                    output_edges=graph_outputs,
                                    output_node_names=graph_output_node_names)
Example #7
0
    def as_light_graph(self):
        # Nodes, inputs, outputs
        nodes = [
            self._tf_node_def_to_lnf(tf_node_def)
            for tf_node_def in self._graph_def.node
        ]
        input_edges = [
            self._tensor_name_to_edge_info(tensor_name)
            for tensor_name in self._input_tensor_names
        ]
        output_edges = [
            self._tensor_name_to_edge_info(tensor_name)
            for tensor_name in self._output_tensor_names
        ]
        output_node_names = sorted(self._output_node_names)
        meta_graph_info = self._tf_meta_graph_def_to_lgf_meta_graph_info(
            self._meta_graph_def)

        return lgf_graph.LightGraph(nodes,
                                    input_edges=input_edges,
                                    output_edges=output_edges,
                                    output_node_names=output_node_names,
                                    meta_graph_info=meta_graph_info)
    def matmul_graph(hw_spec,
                     sw_config,
                     sim_params,
                     weights,
                     inp_shape=(1, 4),
                     inp_dtype_t=dtypes_pb2.DT_BFLOAT,
                     inp_dtype_p=16,
                     add_activation=False):
        inp1 = lgf_pb2.EdgeInfo()
        inp1.name = "inp_ten1"
        inp1.port = 0
        inp1.dtype.t = inp_dtype_t
        inp1.dtype.p = inp_dtype_p
        inp1.shape.d.extend(list(inp_shape))

        weights_i = lgf_pb2.EdgeInfo()
        weights_i.name = "weights_ten"
        weights_i.port = 0
        weights_i.dtype.CopyFrom(inp1.dtype)
        weights_i.shape.d.extend(weights.shape)

        outp = lgf_pb2.EdgeInfo()
        outp.CopyFrom(inp1)
        outp.name = "out_ten"
        outp.shape.d[1] = weights.shape[1]
        outp.dtype.t = inp_dtype_t
        outp.dtype.p = inp_dtype_p

        wn = lgf_pb2.LNF()
        wn.name = "weights_ten"
        wn.const.SetInParent()
        wn.outputs.add().CopyFrom(weights_i)
        wn.const.value.CopyFrom(
            utils.array_to_tensor_pb(weights, weights_i.dtype))
        wn.const.const_type = lgf_pb2.ConstNode.GRAPH_CONST
        wn.supported = True

        mm_tx = matmul_transform.MatMulTransform(hw_spec, sw_config,
                                                 sim_params)
        mm_nodes = mm_tx.create_supported_nodes("out_ten", inp1, weights_i,
                                                outp, [])

        act_nodes = []
        if add_activation:
            bias = base_transform.BaseTransform.create_const_node(
                np.random.random(size=(1, inp_shape[1])), "add_bias",
                inp1.dtype, lgf_pb2.ConstNode.GRAPH_CONST)
            act_nodes.append(bias)

            act = lgf_pb2.LNF()
            act.name = "act"
            act.vv_add.SetInParent()
            act.supported = True
            act.inputs.add().CopyFrom(mm_nodes[0].outputs[0])
            act.inputs.add().CopyFrom(bias.outputs[0])
            act.outputs.add().CopyFrom(mm_nodes[0].outputs[0])
            act.outputs[0].name = "act"
            outp = act.outputs[0]
            act_nodes.append(act)

        lg = lgf_graph.LightGraph([wn] + mm_nodes + act_nodes,
                                  input_edges=[inp1],
                                  output_edges=[outp])

        folder = fold_phasify_constants.FoldPhasifyConstants(
            hw_spec, sw_config, sim_params)

        return folder.process_transforms(lg)
Example #9
0
    def _get_dequant_bias(self, new_opu_node, light_graph, transform_result):
        # Create a dequant bias with all 0's
        _, num_y, _, j = new_opu_node.inputs[
            lgf_pb2.MatMulNode.DEQUANT_SCALES_INDEX].shape.d
        zero_dequant_bias_node = self.create_const_node(
            np.zeros([1, num_y, 1, j]), new_opu_node.name + "_dequant_bias",
            self._sw_config.float_type, lgf_pb2.ConstNode.DEQUANT_BIAS)

        # Add dequant bias to the new opu node
        new_opu_node.inputs.add().CopyFrom(zero_dequant_bias_node.outputs[0])

        # Get constant nodes from the transform result
        const_nodes = [
            t.node for t in list(transform_result.to_add) +
            list(transform_result.to_replace)
            if t.node.HasField(lgf_pb2.LNF.const.DESCRIPTOR.name)
        ]
        const_nodes.append(
            light_graph.get_node_by_name(new_opu_node.inputs[
                lgf_pb2.MatMulNode.QUANT_PARAMS_INDEX].name))

        # Create a subgraph to run the new opu node
        subgraph = lgf_graph.LightGraph(
            const_nodes + [new_opu_node, zero_dequant_bias_node],
            input_edges=[new_opu_node.inputs[lgf_pb2.MatMulNode.INPUT_INDEX]],
            output_edges=[new_opu_node.outputs[0]])

        # Create zero inputs
        input_edge = subgraph.input_edges()[0]
        batch_dilation_factor = (input_edge.shape.batch_dilation_factor
                                 if input_edge.shape.batch_dilation_factor > 0
                                 else 1)
        array = np.zeros([
            d if d != -1 else self._sim_params.compiled_batch_size *
            batch_dilation_factor for d in input_edge.shape.d
        ])
        zero_inputs = utils.create_inference_inputs([input_edge], [array])

        # Run zeros through the subgraph
        runner = graph_runner.GraphRunner(subgraph, self._hw_specs,
                                          self._sw_config, self._sim_params)
        out_inf = runner.run_single_batch(zero_inputs)

        # New bias is chosen so the outputs of a zero input will be exactly zero
        dequant_bias = -1 * utils.tensor_pb_to_array(out_inf.results[0].data,
                                                     np.float32)

        # Convert to a [1, last_dim] vector
        dequant_bias = np.reshape(dequant_bias, [-1, dequant_bias.shape[-1]])
        dequant_bias = dequant_bias[0:1, :]

        # Pad and reshape so dequant_bias is [1, num_y, 1, j]
        pad = [[0, 0], [0, num_y * j - dequant_bias.shape[1]]]
        dequant_bias = np.pad(dequant_bias, pad, "constant", constant_values=0)
        dequant_bias = np.split(dequant_bias, num_y, axis=1)
        dequant_bias = np.stack(dequant_bias, axis=0)
        dequant_bias = np.reshape(dequant_bias, [1, num_y, 1, j])

        # Create a dequant bias node and add to the transform result
        dequant_bias_node = self.create_const_node(
            dequant_bias, zero_dequant_bias_node.name,
            zero_dequant_bias_node.outputs[0].dtype,
            zero_dequant_bias_node.const.const_type)

        transform_result.to_add.add().node.CopyFrom(dequant_bias_node)