Ejemplo n.º 1
0
    def preprocess_weights(self, weights_edge):
        # Unpack shapes
        filter_height, filter_width, in_channels, channel_multiplier = \
            weights_edge.shape.d

        # Transpose to [filter_height, filter_width, channel_multiplier, in_channels]
        transpose_edge = lgf_pb2.EdgeInfo()
        transpose_edge.CopyFrom(weights_edge)
        transpose_edge.name = weights_edge.name + "_transpose"
        transpose_edge.shape.d[:] = [
            filter_height, filter_width, channel_multiplier, in_channels
        ]
        transpose_node = transpose_transform.TransposeTransform.create_supported_nodes(
            self, transpose_edge.name, weights_edge, transpose_edge, [],
            [0, 1, 3, 2])[0]

        # Reshape to [filter_height * filter_width, channel_multiplier * in_channels]
        reshape_edge = lgf_pb2.EdgeInfo()
        reshape_edge.CopyFrom(transpose_edge)
        reshape_edge.name = transpose_edge.name + "_reshape"
        reshape_edge.shape.d[:] = [
            filter_height * filter_width, channel_multiplier * in_channels
        ]
        reshape_node = reshape_transform.ReshapeTransform.create_supported_nodes(
            self, reshape_edge.name, transpose_node.outputs[0], reshape_edge,
            [])[0]

        return True, [transpose_node, reshape_node], reshape_node.outputs[0]
Ejemplo n.º 2
0
    def create_supported_nodes(self, sigmoid_name, input_edge, output_edge,
                               control_inputs):
        """
        Creates a supported sigmoid node in standard format

        Params:
            sigmoid_name: name of original node
            input_edge: edge of the input for the original node
            output_edge: edge of the output for the original node
            control_inputs: a list of node names for the control inputs
        """
        exp_output_edge = lgf_pb2.EdgeInfo()
        exp_output_edge.CopyFrom(output_edge)
        exp_output_edge.name = sigmoid_name + "_exp"

        exp_node = self.create_transform_obj(
            exp_transform.ExpTransform).create_supported_nodes(
                exp_output_edge.name, input_edge, exp_output_edge,
                control_inputs)[0]

        sv_add_output_edge = lgf_pb2.EdgeInfo()
        sv_add_output_edge.CopyFrom(output_edge)
        sv_add_output_edge.name = sigmoid_name + "_sv_add"

        sv_add_node = self.create_transform_obj(
            sv_transform.SVAddTransform).create_supported_nodes(
                sv_add_output_edge.name, exp_node.outputs[0],
                sv_add_output_edge, control_inputs, 1)[0]

        vv_div_node = self.create_transform_obj(
            vv_transform.VVDivTransform).create_supported_nodes(
                sigmoid_name, exp_node.outputs[0], sv_add_node.outputs[0],
                output_edge, control_inputs)[0]

        return [vv_div_node, sv_add_node, exp_node]
Ejemplo n.º 3
0
    def sv_max_graph(inp_shape=(1, 100),
                     inp_dtype_t=dtypes_pb2.DT_QINT,
                     inp_dtype_p=8,
                     num_nodes=1):
        inp1 = lgf_pb2.EdgeInfo()
        inp1.name = "inp_ten1"
        inp1.port = 0
        inp1.dtype.t = inp_dtype_t
        inp1.dtype.p = inp_dtype_p
        inp1.shape.d.extend(inp_shape)

        nodes = []
        last_edge = inp1
        for i in range(num_nodes):
            outp = lgf_pb2.EdgeInfo()
            outp.CopyFrom(inp1)
            outp.name = "out_ten_{0}".format(i)

            n = lgf_pb2.LNF()
            n.name = outp.name
            n.sv_max.SetInParent()
            n.inputs.add().CopyFrom(last_edge)
            n.outputs.add().CopyFrom(outp)
            n.sv_max.scalar = 64
            n.supported = True
            last_edge = n.outputs[0]
            nodes.append(n)

        return lgf_graph.LightGraph(nodes,
                                    input_edges=[inp1],
                                    output_edges=[last_edge])
Ejemplo n.º 4
0
    def create_supported_nodes(self, tanh_name, input_edge, output_edge,
                               control_inputs):
        """
        Creates a supported tanh node in standard format

        Params:
            tanh_name: name of original node
            input_edge: edge of the input for the original node
            output_edge: edge of the output for the original node
            control_inputs: a list of node names for the control inputs
        """
        sv_mul_output_edge = lgf_pb2.EdgeInfo()
        sv_mul_output_edge.CopyFrom(output_edge)
        sv_mul_output_edge.name = tanh_name + "_double"

        sv_mul_node = self.create_transform_obj(
            sv_transform.SVMulTransform).create_supported_nodes(
                sv_mul_output_edge.name, input_edge, sv_mul_output_edge,
                control_inputs, 2)[0]

        exp_output_edge = lgf_pb2.EdgeInfo()
        exp_output_edge.CopyFrom(output_edge)
        exp_output_edge.name = tanh_name + "_double_exp"

        exp_node = self.create_transform_obj(
            exp_transform.ExpTransform).create_supported_nodes(
                exp_output_edge.name, sv_mul_node.outputs[0], exp_output_edge,
                control_inputs)[0]

        numerator_output_edge = lgf_pb2.EdgeInfo()
        numerator_output_edge.CopyFrom(output_edge)
        numerator_output_edge.name = tanh_name + "_numerator"
        denominator_output_edge = lgf_pb2.EdgeInfo()
        denominator_output_edge.CopyFrom(output_edge)
        denominator_output_edge.name = tanh_name + "_denominator"

        numerator_node = self.create_transform_obj(
            sv_transform.SVAddTransform).create_supported_nodes(
                numerator_output_edge.name, exp_node.outputs[0],
                numerator_output_edge, control_inputs, -1)[0]
        denominator_node = self.create_transform_obj(
            sv_transform.SVAddTransform).create_supported_nodes(
                denominator_output_edge.name, exp_node.outputs[0],
                denominator_output_edge, control_inputs, 1)[0]

        vv_div_node = self.create_transform_obj(
            vv_transform.VVDivTransform).create_supported_nodes(
                tanh_name, numerator_node.outputs[0],
                denominator_node.outputs[0], output_edge, control_inputs)[0]

        return [
            vv_div_node, numerator_node, denominator_node, exp_node,
            sv_mul_node
        ]
Ejemplo n.º 5
0
    def transform(self, swish_node, light_graph):
        """
        Converts a swish node to a supported nodes in standard format
        """
        self.check_original_node(swish_node)

        sigmoid_output_edge = lgf_pb2.EdgeInfo()
        sigmoid_output_edge.CopyFrom(swish_node.outputs[0])
        sigmoid_output_edge.name = swish_node.name + "_sigmoid"

        sigmoid_nodes = self.create_transform_obj(
            sigmoid_transform.SigmoidTransform).create_supported_nodes(
                sigmoid_output_edge.name,
                swish_node.inputs[0],
                sigmoid_output_edge,
                swish_node.control_inputs)

        vv_mul_node = self.create_transform_obj(
            vv_transform.VVMulTransform).create_supported_nodes(
                swish_node.name,
                swish_node.inputs[0],
                sigmoid_nodes[0].outputs[0],
                swish_node.outputs[0],
                swish_node.control_inputs)[0]

        return self.create_transform_result(to_add=sigmoid_nodes,
                                            to_replace=[vv_mul_node])
Ejemplo n.º 6
0
    def _get_feed_dict(self, sess, unknown_dim_size):
        # Create feed dict with random data
        feed_dict = {}

        for inp in self._input_tensor_names:
            real_name, port, _ = self.get_node_name_and_output_index(inp)
            # This shape might have multiple unknown dimensions, so we
            # don't update batch_dim_indx here
            shape = self.tf_tensorshape_to_lgf_tensorshape(
                sess.graph.get_tensor_by_name(inp).shape,
                update_batch_dim_indx=False)

            # Correct for batch size if necessary
            corrected_edge_info = lgf_pb2.EdgeInfo()
            corrected_edge_info.CopyFrom(self._input_edges[(real_name, port)])
            batch_dim_indx = corrected_edge_info.shape.batch_dim_indx
            if (batch_dim_indx >= 0 and len(shape.d) > 0
                    and shape.d[batch_dim_indx] > 0):
                corrected_edge_info.shape.d[batch_dim_indx] = shape.d[
                    batch_dim_indx]

            data = utils.generate_random_inference_inputs(
                [corrected_edge_info], unknown_dim_size=unknown_dim_size)
            named_tensor = data.inputs[0]
            feed_dict[sess.graph.get_tensor_by_name(
                inp)] = utils.tensor_pb_to_array(
                    named_tensor.data,
                    utils.dtype_pb_to_np_dtype(named_tensor.data.dtype))

        return feed_dict
Ejemplo n.º 7
0
    def _pad_and_reshape(weights, spec):
        # Pad to even multiple of opu dimension.
        weights_edge = lgf_pb2.EdgeInfo()
        weights_edge.shape.d.extend(weights.shape)
        weights_edge.shape.batch_dim_indx = -1
        weights_edge.dtype.CopyFrom(utils.np_dtype_to_lgf_dtype(weights.dtype))

        num_x, num_y, k, j = opu_op_transform.OPUOpTransform.get_tiled_shape(
            weights_edge, False, spec)
        W_dim = weights.shape

        pad_x = (k * num_x) - W_dim[0]
        pad_y = (j * num_y) - W_dim[1]
        pad = [[0, pad_x], [0, pad_y]]
        W_padded = np.pad(weights, pad, "constant", constant_values=0)

        # Convert [num_x * k, num_y * j] W_padded to full_matrix of size
        # [num_x, num_y, k, j]
        full_matrix = np.split(W_padded, num_x, axis=0)
        full_matrix = np.stack(full_matrix, axis=0)
        full_matrix = np.split(full_matrix, num_y, axis=2)
        full_matrix = np.stack(full_matrix, axis=1)

        # Convert to [num_x, num_y, num_dps, k, k]
        full_matrix = full_matrix.reshape(num_x, num_y, k, j // k, k)
        full_matrix = np.transpose(full_matrix, axes=[0, 1, 3, 2, 4])

        return full_matrix
    def _stack_edges(self, edges, out_edge):
        """
        Stack output edges of a group of matmul nodes and reshape the result to match the
        given out_edge.
        """
        stack_edge = lgf_pb2.EdgeInfo()
        stack_edge.name = out_edge.name + "_stack"
        stack_edge.port = 0
        stack_edge.dtype.CopyFrom(edges[0].dtype)
        stack_edge.shape.d.append(len(edges))
        stack_edge.shape.d.extend(edges[0].shape.d)
        if edges[0].shape.batch_dim_indx == -1:
            stack_edge.shape.batch_dim_indx = -1
        else:
            stack_edge.shape.batch_dim_indx = edges[0].shape.batch_dim_indx + 1
            stack_edge.shape.batch_dilation_factor = edges[
                0].shape.batch_dilation_factor

        common_args = self._common_args()
        stack_node = stack_transform.StackTransform(
            *common_args).create_supported_nodes(stack_edge.name, edges,
                                                 stack_edge, [], 0)[0]

        if len(out_edge.shape.d) > 3:
            out_node = reshape_transform.ReshapeTransform(
                *common_args).create_supported_nodes(out_edge.name, stack_edge,
                                                     out_edge, [])[0]
        else:
            out_node = identity_transform.IdentityTransform(
                *common_args).create_supported_nodes(out_edge.name, stack_edge,
                                                     out_edge, [])[0]

        return [out_node, stack_node]
Ejemplo n.º 9
0
def np_to_edge_info(np_array, name=LT_UNSET):
    """Use shape and dtype from an np array to get edge info."""
    ret = lgf_pb2.EdgeInfo()
    ret.dtype.CopyFrom(np_dtype_to_lgf_dtype(np_array.dtype))
    ret.shape.d.extend(np_array.shape)
    ret.name = name
    return ret
Ejemplo n.º 10
0
 def _tensor_name_to_edge_info(self, tensor_name):
     edge_info = lgf_pb2.EdgeInfo()
     name, port, _ = self.get_node_name_and_output_index(tensor_name)
     edge_info.name = name
     edge_info.port = port
     edge_info.dtype.CopyFrom(self._tensor_dtypes[tensor_name])
     edge_info.shape.CopyFrom(self._tensor_shapes[tensor_name])
     return edge_info
Ejemplo n.º 11
0
 def _update_input_edges(nodes, input_edges):
     node_names = {n.name for n in nodes}
     for node in nodes:
         for e_in in node.inputs:
             if (e_in.name not in node_names and
                     not (GraphTransform._edge_in_list(e_in, input_edges))):
                 new_inp = lgf_pb2.EdgeInfo()
                 new_inp.CopyFrom(e_in)
                 input_edges.append(new_inp)
 def onnx_edge_to_edge_info(self, onnx_edge_name):
     edge_info = lgf_pb2.EdgeInfo()
     name_and_port_str = self.get_name_and_port(onnx_edge_name)
     name, port = name_and_port_str.split(":")
     edge_info.name = name
     edge_info.port = int(port)
     edge_info.dtype.CopyFrom(
         self._tensor_dtypes[self.get_name_and_port(onnx_edge_name)])
     edge_info.shape.CopyFrom(
         self._tensor_shapes[self.get_name_and_port(onnx_edge_name)])
     return edge_info
    def get_transforms(self, light_graph):
        """
        Returns the transforms to collapse supported subgraphs in
        light_graph into single nodes.
        """
        subgraphs = self._get_supported_subgraph_lists(light_graph)

        # Node transformations converting each subgraph into a single node
        to_add = []
        to_reroute = []
        to_output_swap = []
        subgraph_index = self._get_next_subgraph_index(light_graph)
        for subgraph, control_inputs in subgraphs:
            subgraph_node = lgf_pb2.LNF()
            subgraph_node.name = self.get_subgraph_node_name(subgraph_index)
            subgraph_node.supported = False
            subgraph_node.subgraph.SetInParent()
            subgraph_node.subgraph.graph.CopyFrom(subgraph.as_lgf_pb())
            subgraph_node.inputs.extend(subgraph.input_edges())
            subgraph_node.control_inputs.extend(control_inputs)

            for j, old_edge in enumerate(subgraph.output_edges()):
                new_edge = lgf_pb2.EdgeInfo()
                new_edge.CopyFrom(old_edge)
                new_edge.name = subgraph_node.name
                new_edge.port = j
                subgraph_node.outputs.add().CopyFrom(new_edge)

                to_reroute.append((transform_result_pb2.ToReroute.edge_reroute.
                                   DESCRIPTOR.name, [], old_edge, new_edge))

            for old_node in subgraph.nodes():
                to_reroute.append((transform_result_pb2.ToReroute.
                                   control_input_reroute.DESCRIPTOR.name, [],
                                   [old_node.name], [subgraph_node.name]))

            if len(subgraph.output_node_names()):
                to_output_swap.append(
                    (subgraph.output_node_names(), [subgraph_node.name]))

            to_add.append(subgraph_node)

            subgraph_index += 1

        return [
            base_transform.BaseTransform.create_transform_result(
                to_add=to_add,
                to_reroute=to_reroute,
                to_output_swap=to_output_swap)
        ]
Ejemplo n.º 14
0
def extract_edge_from_data(calibration_data):
    """Return an EdgeInfo for the input data, in case it isn't specified in the graph."""
    input_edges = []
    for named_tensor in calibration_data.batches[0].inputs:
        e = lgf_pb2.EdgeInfo()
        e.CopyFrom(named_tensor.edge_info)
        if e.shape.batch_dim_indx < 0:
            # NOTE: Assumes dim 0 is batch dim
            e.shape.batch_dim_indx = 0
        e.shape.d[e.shape.batch_dim_indx] = -1

        input_edges.append(e)

    return input_edges
Ejemplo n.º 15
0
    def create_supported_nodes(self, softmax_name, input_edge, output_edge,
                               control_inputs, axis):
        """
        Creates a supported softmax node in standard format

        Params:
            softmax_name: name of original node
            input_edge: edge of the input for the original node
            output_edge: edge of the output for the original node
            control_inputs: a list of node names for the control inputs
            axes: integer for which dimension to do softmax over
        """
        exp_output_edge = lgf_pb2.EdgeInfo()
        exp_output_edge.CopyFrom(output_edge)
        exp_output_edge.name = softmax_name + "_exp"

        exp_node = self.create_transform_obj(
            exp_transform.ExpTransform).create_supported_nodes(
                exp_output_edge.name, input_edge, exp_output_edge,
                control_inputs)[0]

        reduce_sum_output_edge = lgf_pb2.EdgeInfo()
        reduce_sum_output_edge.CopyFrom(output_edge)
        reduce_sum_output_edge.name = softmax_name + "_reduce_sum"
        reduce_sum_output_edge.shape.d[axis] = 1

        reduce_sum_node = self.create_transform_obj(
            reduce_sum_transform.ReduceSumTransform).create_supported_nodes(
                reduce_sum_output_edge.name, exp_node.outputs[0],
                reduce_sum_output_edge, control_inputs, [axis], True)[0]

        vv_div_node = self.create_transform_obj(
            vv_transform.VVDivTransform).create_supported_nodes(
                softmax_name, exp_node.outputs[0], reduce_sum_node.outputs[0],
                output_edge, control_inputs)[0]

        return [vv_div_node, reduce_sum_node, exp_node]
Ejemplo n.º 16
0
    def edges(self):
        """Returns input edge infos without shape."""
        edges = {}
        dtypes = self.dtypes
        ports = self.ports
        batch_dim_indices = self.batch_dim_indices
        for name in self.names:
            edge = lgf_pb2.EdgeInfo()
            edge.name = name
            edge.port = ports[name]
            edge.shape.batch_dim_indx = batch_dim_indices[name]
            edge.dtype.CopyFrom(dtypes[name])
            edges[name] = edge

        return edges
    def create_supported_nodes(self, batch_matmul_name, input_edge,
                               weight_edge, output_edge, control_inputs,
                               transpose_inputs, transpose_weights, num):
        input_trans = self._unstack_edges(input_edge, num)
        weight_trans = self._unstack_edges(weight_edge, num)

        common_args = self._common_args()
        matmul_obj = matmul_transform.MatMulTransform(*common_args)

        new_matmul_nodes = []
        new_matmul_edges = []
        for i, (new_input, new_weight) in enumerate(
                zip(input_trans[-1].outputs, weight_trans[-1].outputs)):
            matmul_edge = lgf_pb2.EdgeInfo()
            matmul_edge.name = self._get_unstacked_matmul_node_name(
                batch_matmul_name, i)
            matmul_edge.dtype.CopyFrom(output_edge.dtype)

            # Regular matmul's input edge should not have its batch dim
            # equal to the contracted dim
            if new_input.shape.batch_dim_indx != 1:
                matmul_edge.shape.batch_dim_indx = new_input.shape.batch_dim_indx
                matmul_edge.shape.batch_dilation_factor = (
                    new_input.shape.batch_dilation_factor)
            else:
                matmul_edge.shape.batch_dim_indx = -1

            matmul_edge.shape.d.append(new_input.shape.d[1] if transpose_inputs
                                       else new_input.shape.d[0])
            matmul_edge.shape.d.append(
                new_weight.shape.d[0] if transpose_weights else new_weight.
                shape.d[1])
            matmul_node = matmul_obj.create_supported_nodes(
                matmul_edge.name, new_input, new_weight, matmul_edge,
                control_inputs, transpose_inputs, transpose_weights)
            matmul_node[0].matmul.from_batch_matmul = True
            new_matmul_nodes.extend(matmul_node)
            new_matmul_edges.append(matmul_edge)

        all_nodes = self._stack_edges(new_matmul_edges, output_edge)
        all_nodes += input_trans + weight_trans + new_matmul_nodes

        return all_nodes
Ejemplo n.º 18
0
    def transform(self, relu6_node, light_graph):
        """
        Relu6Transform
        """
        self.check_original_node(relu6_node)

        intermediate_edge = lgf_pb2.EdgeInfo()
        intermediate_edge.CopyFrom(relu6_node.outputs[0])
        intermediate_edge.name = relu6_node.name + "_sv_max"

        sv_max_node = self.create_transform_obj(
            sv_transform.SVMaxTransform).create_supported_nodes(
                intermediate_edge.name, relu6_node.inputs[0],
                intermediate_edge, relu6_node.control_inputs, 0)[0]

        sv_min_node = self.create_transform_obj(
            sv_transform.SVMinTransform).create_supported_nodes(
                relu6_node.name, intermediate_edge, relu6_node.outputs[0],
                relu6_node.control_inputs, 6)[0]

        return self.create_transform_result(to_add=[sv_max_node],
                                            to_replace=[sv_min_node])
Ejemplo n.º 19
0
    def create_supported_nodes(self,
                               sq_diff_name,
                               input0_edge,
                               input1_edge,
                               output_edge,
                               control_inputs):
        """
        Creates a supported tanh node in standard format

        Params:
            sq_diff_name: name of original node
            input0_edge: edge of the first input for the original node
            input1_edge: edge of the second input for the original node
            output_edge: edge of the output for the original node
            control_inputs: a list of node names for the control inputs
        """

        common_args = self._common_args()

        vv_sub_output_edge = lgf_pb2.EdgeInfo()
        vv_sub_output_edge.CopyFrom(output_edge)
        vv_sub_output_edge.name = sq_diff_name + "_sub"

        vv_sub_node = vv_transform.VVSubTransform(*common_args).create_supported_nodes(
            vv_sub_output_edge.name,
            input0_edge,
            input1_edge,
            vv_sub_output_edge,
            control_inputs)[0]

        sv_pow_node = sv_transform.SVPowTransform(*common_args).create_supported_nodes(
            sq_diff_name,
            vv_sub_node.outputs[0],
            output_edge,
            control_inputs,
            2)[0]

        return [sv_pow_node, vv_sub_node]
    def _unstack_edges(self, edge, num):
        """
        Unstack input edges of a BatchMatMulV2 node to a list of 2D edges which can
        then be fed into regular matmul nodes.

        A reshape node is added if the original input edge has a rank larger than 3.
        """
        common_args = self._common_args()
        new_nodes = []

        batch_prod = 1
        for i, v in enumerate(edge.shape.d[:-2]):
            if v != -1:
                batch_prod *= v
            else:
                assert i == edge.shape.batch_dim_indx

        do_reshape = len(edge.shape.d) > 3
        if do_reshape:
            # Create a reshape node to flatten all but the lowest two dimensions.
            reshape_edge = lgf_pb2.EdgeInfo()
            reshape_edge.name = edge.name + "_reshape"
            reshape_edge.port = 0
            reshape_edge.dtype.CopyFrom(edge.dtype)
            reshape_edge.shape.d[:] = edge.shape.d[-3:]
            if edge.shape.batch_dim_indx == -1:
                reshape_edge.shape.d[0] = batch_prod
                reshape_edge.shape.batch_dim_indx = -1
            elif edge.shape.batch_dim_indx < len(edge.shape.d) - 2:
                reshape_edge.shape.d[0] = -1
                reshape_edge.shape.batch_dim_indx = 0
                reshape_edge.shape.batch_dilation_factor = (
                    edge.shape.batch_dilation_factor)
                reshape_edge.shape.batch_dilation_factor *= batch_prod
            else:
                reshape_edge.shape.d[0] = batch_prod
                reshape_edge.shape.batch_dim_indx = edge.shape.batch_dim_indx - (
                    len(edge.shape.d) - 3)
                reshape_edge.shape.batch_dilation_factor = (
                    edge.shape.batch_dilation_factor)

            reshape_node = reshape_transform.ReshapeTransform(
                *common_args).create_supported_nodes(reshape_edge.name, edge,
                                                     reshape_edge, [])[0]

            new_nodes.append(reshape_node)
        else:
            reshape_edge = edge

        unstacked_edges = []
        for i in range(num):
            new_edge = lgf_pb2.EdgeInfo()
            new_edge.name = edge.name + "_unstack"
            new_edge.port = i
            new_edge.dtype.CopyFrom(edge.dtype)
            new_edge.shape.d[:] = reshape_edge.shape.d[-2:]
            if reshape_edge.shape.batch_dim_indx >= 1:
                new_edge.shape.batch_dim_indx = reshape_edge.shape.batch_dim_indx - 1
                new_edge.shape.batch_dilation_factor = (
                    reshape_edge.shape.batch_dilation_factor)
            else:
                new_edge.shape.batch_dim_indx = -1
            unstacked_edges.append(new_edge)

        unstack_node = unstack_transform.UnstackTransform(
            *common_args).create_supported_nodes(unstacked_edges[0].name,
                                                 reshape_edge, unstacked_edges,
                                                 [], 0)[0]
        new_nodes.append(unstack_node)

        return new_nodes
Ejemplo n.º 21
0
    def create_supported_nodes(self, bn_name, input_edge, output_edge,
                               control_inputs, mean, variance, scale, bias,
                               epsilon):
        """
        Creates a supported batchnorm node in standard format

        Params:
            bn_name: name of original node
            input_edge: edge of the input for the original node
            output_edge: edge of the output for the original node
            control_inputs: a list of node names for the control inputs
            mean: list or numpy array for the mean
            variance: list of numpy array for the variance
            scale: list or numpy array for the scale
            bias: list or numpy array for the scale
            epsilon: float for epsilon
        """
        if self.decompose():
            eff_scale = np.array(scale).flatten() / (
                np.sqrt(np.array(variance).flatten()) + epsilon)
            eff_bias = (np.array(bias).flatten() /
                        eff_scale) - np.array(mean).flatten()

            eff_scale_node = self.create_const_node(
                eff_scale, bn_name + "_eff_scale", self._sw_config.float_type,
                lgf_pb2.ConstNode.GRAPH_CONST)
            eff_bias_node = self.create_const_node(
                eff_bias, bn_name + "_eff_bias", self._sw_config.float_type,
                lgf_pb2.ConstNode.GRAPH_CONST)

            vv_add_output_edge = lgf_pb2.EdgeInfo()
            vv_add_output_edge.CopyFrom(output_edge)
            vv_add_output_edge.name = output_edge.name + "_add_eff_bias"

            vv_add_node = self.create_transform_obj(
                vv_transform.VVAddTransform).create_supported_nodes(
                    bn_name + "_add_eff_bias",
                    input_edge,
                    eff_bias_node.outputs[0],
                    vv_add_output_edge,
                    control_inputs,
                )[0]

            vv_mul_node = self.create_transform_obj(
                vv_transform.VVMulTransform).create_supported_nodes(
                    bn_name,
                    vv_add_node.outputs[0],
                    eff_scale_node.outputs[0],
                    output_edge,
                    control_inputs,
                )[0]

            return [vv_mul_node, vv_add_node, eff_bias_node, eff_scale_node]
        else:
            # Create constant nodes
            mean_node = self.create_const_node(
                np.array(mean).flatten(), bn_name + "_mean",
                self._sw_config.float_type, lgf_pb2.ConstNode.GRAPH_CONST)
            variance_node = self.create_const_node(
                np.array(variance).flatten(), bn_name + "_variance",
                self._sw_config.float_type, lgf_pb2.ConstNode.GRAPH_CONST)
            scale_node = self.create_const_node(
                np.array(scale).flatten(), bn_name + "_scale",
                self._sw_config.float_type, lgf_pb2.ConstNode.GRAPH_CONST)
            bias_node = self.create_const_node(
                np.array(bias).flatten(), bn_name + "_bias",
                self._sw_config.float_type, lgf_pb2.ConstNode.GRAPH_CONST)

            # Create list of input edges
            inputs = [None] * self.NUM_INPUTS
            inputs[lgf_pb2.FusedBatchNormNode.INPUT_INDEX] = input_edge
            inputs[
                lgf_pb2.FusedBatchNormNode.MEAN_INDEX] = mean_node.outputs[0]
            inputs[lgf_pb2.FusedBatchNormNode.
                   VARIANCE_INDEX] = variance_node.outputs[0]
            inputs[
                lgf_pb2.FusedBatchNormNode.SCALE_INDEX] = scale_node.outputs[0]
            inputs[
                lgf_pb2.FusedBatchNormNode.BIAS_INDEX] = bias_node.outputs[0]

            # Create batch norm node
            bn_node = self.create_simple_node(
                bn_name, lgf_pb2.LNF.batchnorm.DESCRIPTOR.name, inputs,
                [output_edge], control_inputs)
            bn_node.batchnorm.epsilon = epsilon

            return [bn_node, mean_node, variance_node, scale_node, bias_node]
Ejemplo n.º 22
0
    def matmul_graph(hw_spec,
                     sw_config,
                     sim_params,
                     weights,
                     inp_shape=(1, 4),
                     inp_dtype_t=dtypes_pb2.DT_BFLOAT,
                     inp_dtype_p=16,
                     add_activation=False):
        inp1 = lgf_pb2.EdgeInfo()
        inp1.name = "inp_ten1"
        inp1.port = 0
        inp1.dtype.t = inp_dtype_t
        inp1.dtype.p = inp_dtype_p
        inp1.shape.d.extend(list(inp_shape))

        weights_i = lgf_pb2.EdgeInfo()
        weights_i.name = "weights_ten"
        weights_i.port = 0
        weights_i.dtype.CopyFrom(inp1.dtype)
        weights_i.shape.d.extend(weights.shape)

        outp = lgf_pb2.EdgeInfo()
        outp.CopyFrom(inp1)
        outp.name = "out_ten"
        outp.shape.d[1] = weights.shape[1]
        outp.dtype.t = inp_dtype_t
        outp.dtype.p = inp_dtype_p

        wn = lgf_pb2.LNF()
        wn.name = "weights_ten"
        wn.const.SetInParent()
        wn.outputs.add().CopyFrom(weights_i)
        wn.const.value.CopyFrom(
            utils.array_to_tensor_pb(weights, weights_i.dtype))
        wn.const.const_type = lgf_pb2.ConstNode.GRAPH_CONST
        wn.supported = True

        mm_tx = matmul_transform.MatMulTransform(hw_spec, sw_config,
                                                 sim_params)
        mm_nodes = mm_tx.create_supported_nodes("out_ten", inp1, weights_i,
                                                outp, [])

        act_nodes = []
        if add_activation:
            bias = base_transform.BaseTransform.create_const_node(
                np.random.random(size=(1, inp_shape[1])), "add_bias",
                inp1.dtype, lgf_pb2.ConstNode.GRAPH_CONST)
            act_nodes.append(bias)

            act = lgf_pb2.LNF()
            act.name = "act"
            act.vv_add.SetInParent()
            act.supported = True
            act.inputs.add().CopyFrom(mm_nodes[0].outputs[0])
            act.inputs.add().CopyFrom(bias.outputs[0])
            act.outputs.add().CopyFrom(mm_nodes[0].outputs[0])
            act.outputs[0].name = "act"
            outp = act.outputs[0]
            act_nodes.append(act)

        lg = lgf_graph.LightGraph([wn] + mm_nodes + act_nodes,
                                  input_edges=[inp1],
                                  output_edges=[outp])

        folder = fold_phasify_constants.FoldPhasifyConstants(
            hw_spec, sw_config, sim_params)

        return folder.process_transforms(lg)
Ejemplo n.º 23
0
 def _copy_edge_info(self, edge_info):
     edge_info_copy = lgf_pb2.EdgeInfo()
     edge_info_copy.CopyFrom(edge_info)
     return edge_info_copy
Ejemplo n.º 24
0
    def insert_collect_hist_node(edge, sw_config, hist_key, num_bins, hist_coll):
        to_add = []
        to_reroute = []
        edge_reroute = transform_result_pb2.ToReroute.edge_reroute.DESCRIPTOR.name

        # Cast to float if necessary
        insert_cast = (edge.dtype.p > sw_config.float_type.p)
        if insert_cast:
            # Cast to float
            cast_node = lgf_pb2.LNF()
            cast_node.name = "{}_{}_cast".format(edge.name, edge.port)
            cast_node.supported = True
            cast_node.cast.SetInParent()

            # Cast inputs and outputs
            cast_node.inputs.add().CopyFrom(edge)
            cast_output_edge = lgf_pb2.EdgeInfo()
            cast_output_edge.name = cast_node.name
            cast_output_edge.port = 0
            cast_output_edge.dtype.CopyFrom(sw_config.float_type)
            cast_output_edge.shape.CopyFrom(edge.shape)
            cast_node.outputs.add().CopyFrom(cast_output_edge)

            to_add.append(cast_node)
            to_reroute.append((edge_reroute, [], edge, cast_output_edge))
        else:
            cast_output_edge = edge

        # Collect hist node
        collect_hist_node = lgf_pb2.LNF()
        collect_hist_node.name = "{}_{}_collect_hist".format(edge.name, edge.port)
        collect_hist_node.supported = True
        collect_hist_node.collect_hist.SetInParent()
        collect_hist_node.collect_hist.hist_keys.keys.append(hist_key)
        collect_hist_node.collect_hist.hist_keys.quant_type = common_pb2.QT_SINGLE
        hist_coll.initialize_empty_histogram(hist_key, num_bins)

        # Calibration inputs and outputs
        collect_hist_node.inputs.add().CopyFrom(cast_output_edge)
        collect_hist_output_edge = lgf_pb2.EdgeInfo()
        collect_hist_output_edge.CopyFrom(cast_output_edge)
        collect_hist_output_edge.name = collect_hist_node.name
        collect_hist_output_edge.port = 0
        collect_hist_node.outputs.add().CopyFrom(collect_hist_output_edge)

        to_add.append(collect_hist_node)
        to_reroute.append((edge_reroute, [], cast_output_edge, collect_hist_output_edge))

        # Reverse cast if necessary
        if insert_cast:
            # Reverse cast
            reverse_cast_node = lgf_pb2.LNF()
            reverse_cast_node.name = "{}_{}_reverse_cast".format(edge.name, edge.port)
            reverse_cast_node.supported = True
            reverse_cast_node.cast.SetInParent()

            # Reverse cast inputs and outputs
            reverse_cast_node.inputs.add().CopyFrom(collect_hist_output_edge)
            reverse_cast_output_edge = lgf_pb2.EdgeInfo()
            reverse_cast_output_edge.CopyFrom(collect_hist_output_edge)
            reverse_cast_output_edge.name = reverse_cast_node.name
            reverse_cast_output_edge.dtype.CopyFrom(edge.dtype)
            reverse_cast_node.outputs.add().CopyFrom(reverse_cast_output_edge)

            to_add.append(reverse_cast_node)
            to_reroute.append((edge_reroute,
                               [],
                               collect_hist_output_edge,
                               reverse_cast_output_edge))

        return base_transform.BaseTransform.create_transform_result(
            to_add=to_add,
            to_reroute=to_reroute)