Exemplo n.º 1
0
    def version_10(cls, ctx, node, **kwargs):
        # map to onnx as:
        # not (isinf(x) or isnan(x))

        shapes = node.output_shapes
        dtypes = [onnx_pb.TensorProto.BOOL] * len(node.output_dtypes)

        ctx.remove_node(node.name)

        inf_node = ctx.make_node("IsInf",
                                 inputs=node.input,
                                 name=utils.make_name(node.name),
                                 shapes=shapes,
                                 dtypes=dtypes)
        nan_node = ctx.make_node("IsNaN",
                                 inputs=node.input,
                                 name=utils.make_name(node.name),
                                 shapes=shapes,
                                 dtypes=dtypes)
        or_node = ctx.make_node(
            "Or",
            inputs=[inf_node.output[0], nan_node.output[0]],
            name=utils.make_name(node.name),
            shapes=shapes,
            dtypes=dtypes)
        _ = ctx.make_node("Not",
                          inputs=or_node.output,
                          name=node.name,
                          shapes=shapes,
                          dtypes=dtypes)
Exemplo n.º 2
0
    def version_11(cls, ctx, node, **kwargs):

        supported_dtypes = [
            onnx_pb.TensorProto.INT32, onnx_pb.TensorProto.INT64
        ]
        onnx_dtype = ctx.get_dtype(node.input[0])
        utils.make_sure(onnx_dtype in supported_dtypes,
                        "InvertPermutation only applies on INT32, INT64.")

        shape = ctx.get_shape(node.input[0])

        shape_node = ctx.make_node("Shape",
                                   inputs=node.input,
                                   name=utils.make_name(node.name + '_shape'))

        neg_node = ctx.make_node("Neg",
                                 inputs=node.input,
                                 name=utils.make_name(node.name + '_neg'))

        topk_node = ctx.make_node(
            "TopK",
            inputs=[neg_node.output[0], shape_node.output[0]],
            name=utils.make_name(node.name + '_topk'),
            output_count=2)

        ctx.remove_node(node.name)

        last_node = ctx.make_node("Identity",
                                  inputs=topk_node.output[1:],
                                  name=utils.make_name(node.name + '_indices'),
                                  shapes=[shape],
                                  dtypes=[onnx_dtype])

        ctx.replace_all_inputs(node.output[0],
                               last_node.output[0])  # ops=ctx.get_nodes()
Exemplo n.º 3
0
    def _create_loop_node(self, context, loop_props, init_cond_output, branches=None):
        loop_outputs = []
        loop_output_shapes = []
        loop_output_dtypes = []
        for tensor_value_info in loop_props.state_outputs_exits + loop_props.scan_outputs_exits:
            if tensor_value_info.id:
                loop_outputs.append(tensor_value_info.id)
                loop_output_shapes.append(tensor_value_info.shape)
                loop_output_dtypes.append(tensor_value_info.dtype)
                n = self.g.get_node_by_output(tensor_value_info.id)
                self.g.remove_node(n.name)
            else:
                loop_outputs.append(utils.make_name("unused_loop_output_"))
                loop_output_shapes.append([-1])
                loop_output_dtypes.append(None)

        # trip count and cond are not used, giving them values just because bug
        # (https://github.com/Microsoft/onnxruntime/issues/255) of onnxruntime.
        trip_cnt = self.g.make_const(utils.make_name("trip_count"), np.array(sys.maxsize, dtype=np.int64))
        loop_node = self.g.make_node("Loop", [trip_cnt.output[0]] + [init_cond_output] +
                                     loop_props.state_inputs_initial_values,  # ONNX Loop support state inputs only
                                     outputs=loop_outputs, op_name_scope="generic_loop",
                                     shapes=loop_output_shapes, dtypes=loop_output_dtypes,
                                     skip_conversion=False, branches=branches)

        return loop_node
Exemplo n.º 4
0
 def any_version(cls, opset, ctx, node, **kwargs):
     node.domain = constants.CONTRIB_OPS_DOMAIN
     separator = node.get_attr_value("separator")
     if separator is None:
         separator = b''
     separator = separator.decode('UTF-8')
     separator_node = ctx.make_const(utils.make_name("separator"),
                                     np.array([separator], np.object))
     axis_node = ctx.make_const(utils.make_name("axis"),
                                np.array([0], np.int64))
     inps_with_shapes = [i for i in node.input if ctx.get_shape(i) != []]
     shape_node = None
     if 0 < len(inps_with_shapes) < len(node.input):
         shape_node = ctx.make_node("Shape", [inps_with_shapes[0]])
     unsqueezes = []
     for inp in node.input:
         if ctx.get_shape(inp) == [] and shape_node is not None:
             expand_node = ctx.make_node("Expand",
                                         [inp, shape_node.output[0]])
             inp = expand_node.output[0]
         unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({
             'data': inp,
             'axes': [0]
         })
         unsqueezes.append(unsqueeze_node)
     stack_node = ctx.make_node("Concat", unsqueezes, attr={'axis': 0})
     ctx.replace_inputs(node, [
         stack_node.output[0], separator_node.output[0], axis_node.output[0]
     ])
Exemplo n.º 5
0
 def version_1(cls, ctx, node, **kwargs):
     if 'scale' not in node.attr:
         # Somtimes tflite uses a Dequantize to go from fp16 to fp32
         node.type = "Cast"
         node.set_attr('to', ctx.get_dtype(node.output[0]))
         return
     scale = np.array(node.get_attr_value('scale'), dtype=np.float32)
     zero_point = np.array(node.get_attr_value('zero_point'), dtype=np.float32)
     axis = node.get_attr_value('quantized_dimension')
     in_rank = ctx.get_rank(node.input[0])
     def expand_tensor(t):
         if t.shape == (1,):
             return t[0]
         utils.make_sure(in_rank is not None, "Cannot dequantize node %s with unknown input rank", node.name)
         new_shape = [1] * in_rank
         new_shape[axis] = t.shape[0]
         return t.reshape(new_shape)
     scale = expand_tensor(scale)
     zero_point = expand_tensor(zero_point)
     if node.inputs[0].is_const():
         x_val = node.inputs[0].get_tensor_value(as_list=False).astype(np.float32)
         new_val = (x_val - zero_point) * scale
         dequant_const = ctx.make_const(utils.make_name(node.name), new_val)
         ctx.replace_all_inputs(node.output[0], dequant_const.output[0])
         ctx.remove_node(node.name)
     else:
         scale_const = ctx.make_const(utils.make_name(node.name + "_scale"), scale).output[0]
         zero_point_const = ctx.make_const(utils.make_name(node.name + "_zero_point"), zero_point).output[0]
         cast_node = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.FLOAT},
                                   op_name_scope=node.name).output[0]
         sub_node = ctx.make_node("Sub", [cast_node, zero_point_const], op_name_scope=node.name).output[0]
         mul_node = ctx.make_node("Mul", [sub_node, scale_const], op_name_scope=node.name).output[0]
         ctx.replace_all_inputs(node.output[0], mul_node)
         ctx.remove_node(node.name)
Exemplo n.º 6
0
    def process_weights_and_bias_per_layer(self, context, i):
        weights = context.weights[i]
        w_r_icfo = weights["weight"]
        w_dtype = weights["weight"].dtype
        b_r_icfo = weights["bias"]
        b_dtype = weights["bias"].dtype
        ft_bias_scalar = weights["ft_bias"]

        # split bias for each hidden unit
        # b_r_icfo: (4 * num_units,)
        bias_dim = b_r_icfo.shape[0]
        hidden_size = int(bias_dim / 4)
        b_r_icfo = np.reshape(b_r_icfo, (1, bias_dim))
        bias_gates = np.split(b_r_icfo, 4, axis=1)
        ft_bias = np.add(bias_gates[2], ft_bias_scalar)
        wb_bias_iofc = np.concatenate(
            (bias_gates[0], bias_gates[3], ft_bias, bias_gates[1]), axis=1)

        # fill Rb with empty since in TF, we have only one bias.
        rb_bias_iofc = np.zeros((1, bias_dim), dtype=b_dtype)
        B = np.concatenate((wb_bias_iofc, rb_bias_iofc), axis=1)
        assert B.shape == (1, 2 * bias_dim)

        [wx, wh] = np.split(w_r_icfo, [-1 * hidden_size])
        input_size = wx.shape[0]
        assert wx.shape[0] == input_size
        assert int(wx.shape[1] / 4) == hidden_size

        # split weight for gates
        w_gates = np.split(wx, 4, axis=1)
        new_wx = np.concatenate(
            (w_gates[0], w_gates[3], w_gates[2], w_gates[1]), axis=1)

        h_gates = np.split(wh, 4, axis=1)
        new_wh = np.concatenate(
            (h_gates[0], h_gates[3], h_gates[2], h_gates[1]), axis=1)
        W_iofc = np.transpose(new_wx)
        R_iofc = np.transpose(new_wh)

        W = np.array([W_iofc], w_dtype)
        R = np.array([R_iofc], w_dtype)

        # create node
        w_name = utils.make_name("W" + str(i))
        w_node = self.g.make_const(w_name, W, skip_conversion=True)

        r_name = utils.make_name("R" + str(i))
        r_node = self.g.make_const(r_name, R, skip_conversion=True)

        b_name = utils.make_name("B" + str(i))
        b_node = self.g.make_const(b_name, B, skip_conversion=True)

        context.input_size[i] = input_size
        context.hidden_size[i] = hidden_size
        context.onnx_input_ids[i]["W"] = w_node.output[0]
        context.onnx_input_ids[i]["R"] = r_node.output[0]
        context.onnx_input_ids[i]["B"] = b_node.output[0]
Exemplo n.º 7
0
def wire_tfl_while_body(g, loop_node_inputs, output_shapes, output_dtypes,
                        cond_graph, scan_outputs):
    """Wire subgraph graph into main."""

    g = copy.deepcopy(g)
    graph_inputs = g.inputs.copy()

    # onnx will pass in cond as argument
    iter_node = g.make_node("Placeholder", [],
                            name=utils.make_name("iteration_num"),
                            output_count=1,
                            dtypes=[TensorProto.INT64],
                            shapes=[[]])
    cond_node = g.make_node("Placeholder", [],
                            name=utils.make_name("cond"),
                            output_count=1,
                            dtypes=[TensorProto.BOOL],
                            shapes=[[]])
    cond_binding = parameter_binding(cond_graph, g.outputs)

    to_remove = set()
    for idx, scan_output in scan_outputs:
        inp = graph_inputs[idx]

        # Remove consumers of scan input
        stack = [inp]
        while stack:
            node = stack.pop()
            if node not in to_remove:
                to_remove.add(node)
                for out in node.output:
                    stack += g.find_output_consumers(out)

        # Remove scan input from cond graph
        cond_binding = {
            k: "@@ALLOC" if v == g.outputs[idx] else v
            for k, v in cond_binding.items()
        }
        del g.inputs[idx]
        del g.outputs[idx]
        g.outputs.append(scan_output)

    for node in to_remove:
        g.remove_node(node.name)

    # in onnx the body inputs are: index, cond, [loop_vars]
    g.inputs = [iter_node, cond_node] + g.inputs

    for p, c in zip(loop_node_inputs, g.input_names):
        shape = p.output_shapes[0]
        g.set_shape(c, shape)

    cond_outputs = inline_subgraph(g, cond_graph, "cond__", cond_binding)

    g.outputs = [cond_outputs[0]] + g.outputs
    return g
Exemplo n.º 8
0
 def version_11(cls, ctx, node, **kwargs):
     # add min and max as inputs
     node.type = "Clip"
     onnx_dtype = ctx.get_dtype(node.input[0])
     np_dtype = utils.ONNX_TO_NUMPY_DTYPE[onnx_dtype]
     clip_min = ctx.make_const(utils.make_name("{}_min".format(node.name)),
                               np.array(0.0, dtype=np_dtype))
     clip_max = ctx.make_const(utils.make_name("{}_max".format(node.name)),
                               np.array(6.0, dtype=np_dtype))
     node.input.append(clip_min.output[0])
     node.input.append(clip_max.output[0])
Exemplo n.º 9
0
    def any_version(cls, opset, ctx, node, **kwargs):
        """
        Computes the modules of a complex.
        If the matrix dtype is not complex64 or complex128,
        it assumes the first dimension means real part (0)
        and imaginary part (1, :, :...).
        """
        supported_dtypes = [
            onnx_pb.TensorProto.FLOAT,
            onnx_pb.TensorProto.FLOAT16,
            onnx_pb.TensorProto.DOUBLE,
            onnx_pb.TensorProto.COMPLEX64,
            onnx_pb.TensorProto.COMPLEX128,
        ]
        onnx_dtype = ctx.get_dtype(node.input[0])
        utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
        shape = ctx.get_shape(node.input[0])
        np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
        utils.make_sure(shape[0] == 2, "ComplexAbs expected the first dimension to be 2 but shape is %r", shape)

        ind0 = ctx.make_const(name=utils.make_name('cst0'), np_val=np.array([0], dtype=np.int64))
        ind1 = ctx.make_const(name=utils.make_name('cst1'), np_val=np.array([1], dtype=np.int64))
        p2 = ctx.make_const(name=utils.make_name('p2'), np_val=np.array([2], dtype=np_dtype))

        real_part = ctx.make_node(
            'Gather', inputs=[node.input[0], ind0.name], attr=dict(axis=0),
            name=utils.make_name('Real_' + node.name))
        imag_part = ctx.make_node(
            'Gather', inputs=[node.input[0], ind1.name], attr=dict(axis=0),
            name=utils.make_name('Imag_' + node.name))

        real_part2 = ctx.make_node(
            'Pow', inputs=[real_part.output[0], p2.name],
            name=utils.make_name(real_part.name + 'p2p'))

        imag_part2 = ctx.make_node(
            'Pow', inputs=[imag_part.output[0], p2.name],
            name=utils.make_name(imag_part.name + 'p2p'))

        ctx.remove_node(node.name)
        add = ctx.make_node(
            "Add", inputs=[real_part2.output[0], imag_part2.output[0]],
            name=utils.make_name('ComplexAbs_' + node.name))

        squeezed = GraphBuilder(ctx).make_squeeze(
            {'data': add.output[0], 'axes': [0]}, name=utils.make_name('ComplexAbs' + node.name), return_node=True)

        last_node = ctx.make_node(
            "Sqrt", inputs=squeezed.output[:1],
            name=utils.make_name('ComplexAbs' + node.name),
            shapes=[shape[1:]], dtypes=[onnx_dtype])

        ctx.replace_all_inputs(node.output[0], last_node.output[0])  # ops=ctx.get_nodes()
Exemplo n.º 10
0
    def version_8(cls, ctx, node, **kwargs):
        supported = [
            onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT,
            onnx_pb.TensorProto.DOUBLE
        ]
        # fetch those upfront since they are not accessible once we remove 'node'
        shapes = node.output_shapes
        dtypes = node.output_dtypes
        input_dtype = ctx.get_dtype(node.input[0])
        name = node.name
        min_node = node.input[1]
        if ctx.get_dtype(min_node) not in supported:
            # cast min if needed
            min_node = ctx.insert_new_node_on_input(
                node, "Cast", min_node, to=onnx_pb.TensorProto.FLOAT).output[0]
        max_node = node.input[2]
        if ctx.get_dtype(max_node) not in supported:
            # cast max if needed
            max_node = ctx.insert_new_node_on_input(
                node, "Cast", max_node, to=onnx_pb.TensorProto.FLOAT).output[0]
        ctx.remove_node(name)
        new_node = ctx.make_node("Max", [node.input[0], min_node],
                                 outputs=[node.output[0]],
                                 shapes=shapes,
                                 dtypes=dtypes)
        if input_dtype not in supported:
            # cast the data tensor if needed
            ctx.insert_new_node_on_input(new_node,
                                         "Cast",
                                         new_node.input[0],
                                         to=onnx_pb.TensorProto.FLOAT)

        new_node = ctx.insert_new_node_on_output("Min",
                                                 new_node.output[0],
                                                 name=utils.make_name(name))
        new_node.input.append(max_node)
        # copy shape and type
        ctx.set_dtype(new_node.output[0], dtypes[0])
        ctx.set_shape(new_node.output[0], shapes[0])
        if dtypes[0] not in supported:
            # cast output if needed
            new_node = ctx.insert_new_node_on_output(
                "Cast",
                new_node.output[0],
                name=utils.make_name(name),
                to=dtypes[0])
            # copy shape and type
            ctx.set_dtype(new_node.output[0], dtypes[0])
            ctx.set_shape(new_node.output[0], shapes[0])
Exemplo n.º 11
0
    def version_11(cls, ctx, node, **kwargs):
        dir_map = {"LeftShift": "LEFT", "RightShift": "RIGHT"}
        direction = dir_map[node.type]
        supported = [
            onnx_pb.TensorProto.UINT8, onnx_pb.TensorProto.UINT16,
            onnx_pb.TensorProto.UINT32, onnx_pb.TensorProto.UINT64
        ]
        type_map = {
            onnx_pb.TensorProto.INT8: onnx_pb.TensorProto.UINT8,
            onnx_pb.TensorProto.INT16: onnx_pb.TensorProto.UINT32,
            onnx_pb.TensorProto.INT32: onnx_pb.TensorProto.UINT64
        }
        shapes = node.output_shapes
        dtypes = node.output_dtypes
        ctx.remove_node(node.name)

        node = ctx.make_node("BitShift",
                             inputs=node.input,
                             outputs=node.output,
                             name=node.name,
                             shapes=shapes,
                             dtypes=dtypes,
                             domain=constants.ONNX_DOMAIN,
                             attr={'direction': direction})

        if node.maybe_cast_input([supported, supported], type_map):
            cast_back_node = ctx.insert_new_node_on_output(
                "Cast",
                node.output[0],
                name=utils.make_name(node.name) + "_castback",
                to=dtypes[0])
            ctx.set_dtype(cast_back_node.output[0], dtypes[0])
            ctx.copy_shape(node.name, cast_back_node.output[0])
Exemplo n.º 12
0
    def version_6(cls, ctx, node, **kwargs):
        # T output = All(T x, list(int) reduce_indices, @bool keepdims)
        # T output = Any(T x, list(int) reduce_indices, @bool keepdims)
        reduce_dim = node.inputs[1].get_tensor_value()

        # for Any, the reduce_indices can be scalar as observed.
        if np.isscalar(reduce_dim):
            reduce_dim = [reduce_dim]

        if ctx.opset < 11:
            utils.make_sure(all(i >= 0 for i in reduce_dim), "negative reduce axis is not supported in onnx for now")

        cast = ctx.make_node(op_type="Cast", inputs=[node.input[0]], attr={"to": onnx_pb.TensorProto.FLOAT})
        keepdims = helper.get_attribute_value(node.get_attr("keep_dims"))
        op_type = "ReduceMin" if node.type == "All" else "ReduceSum"

        if op_type == "ReduceSum":
            reduce_node_output = GraphBuilder(ctx).make_reduce_sum(
                {"data": cast.output[0], "axes": reduce_dim, "keepdims": keepdims, "noop_with_empty_axes": 1})
        else:
            reduce_node_output = ctx.make_node(op_type=op_type, inputs=cast.output,
                                               attr={"axes": reduce_dim, "keepdims": keepdims}).output[0]

        zero_node = ctx.make_const(utils.make_name("zero_reduce"), np.array(0, dtype=np.float32))

        shapes = node.output_shapes
        dtypes = node.output_dtypes
        ctx.remove_node(node.name)
        ctx.make_node(op_type="Greater", inputs=[reduce_node_output, zero_node.output[0]],
                      name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
Exemplo n.º 13
0
 def test_rewrite_subgraph(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     pattern = \
         OpTypePattern('Abs', name='output', inputs=[
             OpTypePattern('Add', name='input')
         ])
     ops = g.get_nodes()
     matcher = GraphMatcher(pattern)
     match_results = list(matcher.match_ops(ops))
     for match in match_results:
         input_node = match.get_op('input')
         output_node = match.get_op('output')
         op_name = utils.make_name("ReplacedOp")
         out_name = utils.port_name(op_name)
         new_node = g.make_node("Sub",
                                inputs=input_node.input,
                                outputs=[out_name],
                                name=op_name)
         g.replace_all_inputs(output_node.output[0],
                              new_node.output[0])  # ops=ops
         for n in set(match.get_nodes()):
             g.remove_node(n.name)
     g.topological_sort(ops)
     result = onnx_to_graphviz(g)
     expected = 'digraph { Placeholder__5 [op_type=Placeholder] n1 [op_type=Abs] ' \
                'n3 [op_type=Abs] n2 [op_type=Abs] ReplacedOp__6 [op_type=Sub] ' \
                'n6 [op_type=Identity] n5_graph_outputs_Identity__4 [op_type=Identity] ' \
                'input -> n1 n1:0 -> n3 n1:0 -> n2 n2:0 -> ReplacedOp__6 n3:0 -> ReplacedOp__6 ' \
                'ReplacedOp__6:0 -> n6 ReplacedOp__6:0 -> n5_graph_outputs_Identity__4 }'
     self.assertEqual(expected, result)
Exemplo n.º 14
0
    def version_9(cls, ctx, node, **kwargs):
        # T output = Select(bool condition, T x, T y)
        # T1 output = Where(bool condition, T1 x, T1 y)
        # NOTE: condition can be 1-dimension in tensorflow, while in onnx,
        # it should be broadcastable with other two inputs
        if ctx.get_dtype(node.output[0]) != TensorProto.STRING:
            # Due to bad ORT implementation, Mul/Add ops are faster than Where op
            cls.version_7(ctx, node, **kwargs)
            return

        cond_shape = ctx.get_shape(node.input[0])
        input_shape = ctx.get_shape(node.input[1])
        if input_shape is None:
            input_shape = ctx.get_shape(node.input[2])
        input_rank = len(input_shape) if input_shape is not None else None
        cond_rank = len(cond_shape) if cond_shape is not None else None
        # if cond shape is 1-dimensional while input has higher rank, need to be reshaped to broadcast
        if node.type == "Select" and cond_rank == 1 and input_rank != 1:
            utils.make_sure(input_rank is not None,
                            "input_rank unknown and cond_rank == 1")
            broadcast_shape = [cond_shape[0]] + [1] * (input_rank - 1)
            shape_const = ctx.make_const(
                utils.make_name(node.name),
                np.array(broadcast_shape, dtype=np.int64))
            reshape = ctx.make_node("Reshape",
                                    [node.input[0], shape_const.output[0]])
            ctx.replace_input(node, node.input[0], reshape.output[0], 0)
        node.type = "Where"
Exemplo n.º 15
0
    def version_1(cls, ctx, node, **kwargs):
        # output_type output = ArgMin(T input, Tidx dimension, @type Tidx, @type output_type)
        # tensor(int32) reduced = ArgMin(T data, @INT axis, @INT keepdims)
        axis_node = node.inputs[1]
        axis = axis_node.get_tensor_value()
        if axis < 0:
            # ArgMax|ArgMin in onnx don't necessary support negative axis(not in doc explicitly)
            input_shape = ctx.get_shape(node.input[0])
            dim_count = len(input_shape) if input_shape else 0
            axis = dim_count + axis

        # TF ArgMin/ArgMax may return int32 or int64
        # Onnx ArgMin/ArgMax only supports int64 output, add cast if needed
        if node.get_attr_int("output_type") == onnx_pb.TensorProto.INT32:
            # current node will return int64 after conversion, which differs from previous dtype got from tf
            ctx.set_dtype(node.output[0], onnx_pb.TensorProto.INT64)
            op_name = utils.make_name("Cast")
            cast_node = ctx.insert_new_node_on_output("Cast", node.output[0], name=op_name,
                                                      to=onnx_pb.TensorProto.INT32)
            ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT32)
            ctx.copy_shape(node.output[0], cast_node.output[0])

        node.set_attr("axis", axis)
        node.set_attr("keepdims", 0)
        ctx.remove_input(node, node.input[1], 1)
Exemplo n.º 16
0
 def version_1(cls, ctx, node, **kwargs):
     node.type = "Sqrt"
     op_name = utils.make_name(node.name)
     reciprocal = ctx.insert_new_node_on_output("Reciprocal",
                                                node.output[0],
                                                name=op_name)
     ctx.copy_shape(node.output[0], reciprocal.output[0])
Exemplo n.º 17
0
    def version_1(cls, ctx, node, **kwargs):
        # ONNX: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta
        # TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
        #     output = input / (bias + alpha * sqr_sum) ** beta

        # by default, depth_radius is 5 in tensorflow
        size = node.get_attr_value("depth_radius", 5) * 2 + 1

        node.set_attr("size", size)
        node.set_attr("alpha", size * node.get_attr("alpha").f)

        shapes = node.output_shapes[0]
        dtypes = node.output_dtypes[0]

        ctx.insert_new_node_on_input(node,
                                     "Transpose",
                                     node.input[0],
                                     perm=constants.NHWC_TO_NCHW)
        ctx.update_node_shape_dtype(node, override=True)
        op_name = utils.make_name(node.name)
        ctx.insert_new_node_on_output("Transpose",
                                      node.output[0],
                                      perm=constants.NCHW_TO_NHWC,
                                      name=op_name,
                                      shapes=shapes,
                                      dtypes=dtypes)
Exemplo n.º 18
0
 def version_1(cls, ctx, node, **kwargs):
     node.type = "Sub"
     op_name = utils.make_name(node.name)
     mul = ctx.insert_new_node_on_output("Mul",
                                         node.output[0],
                                         name=op_name)
     mul.input.append(node.output[0])
Exemplo n.º 19
0
 def version_1(cls, ctx, node, **kwargs):
     if ctx.is_target(constants.TARGET_CAFFE2):
         # workaround a bug in caffe2 pre Feb2018, pow(a, b) becomes np.exp(np.log(a) * b)
         node.type = "Log"
         b = node.input[1]
         ctx.remove_input(node, node.input[1], 1)
         op_name = utils.make_name(node.name)
         mul_op = ctx.insert_new_node_on_output("Mul",
                                                node.output[0],
                                                name=op_name)
         mul_op.input.append(b)
         op_name = utils.make_name(node.name)
         exp_op = ctx.insert_new_node_on_output("Exp",
                                                mul_op.output[0],
                                                name=op_name)
         ctx.copy_shape(node.output[0], exp_op.output[0])
         BroadcastOp.version_1(ctx, mul_op, **kwargs)
Exemplo n.º 20
0
 def version_1(cls, ctx, node, **kwargs):
     node.domain = constants.CONTRIB_OPS_DOMAIN
     num_buckets = node.get_attr_int('num_buckets')
     num_buckets_const = ctx.make_const(
         utils.make_name('num_buckets'),
         np.array([num_buckets], dtype=np.int64))
     ctx.replace_inputs(node, [node.input[0], num_buckets_const.output[0]])
     del node.attr['num_buckets']
Exemplo n.º 21
0
 def version_1(cls, ctx, node, **kwargs):
     node.domain = constants.CONTRIB_OPS_DOMAIN
     node.type = "StringRegexReplace"
     pattern = node.get_attr_str("pattern")
     rewrite = node.get_attr_str("rewrite")
     utils.make_sure(
         node.get_attr_value("replace_global") != 0,
         "Can not convert StaticRegexReplace if replace_global is False")
     pattern_node = ctx.make_const(utils.make_name("pattern"),
                                   np.array([pattern], np.object))
     rewrite_node = ctx.make_const(utils.make_name("rewrite"),
                                   np.array([rewrite], np.object))
     del node.attr["pattern"]
     del node.attr["rewrite"]
     del node.attr["replace_global"]
     ctx.replace_inputs(
         node,
         [node.input[0], pattern_node.output[0], rewrite_node.output[0]])
Exemplo n.º 22
0
 def version_10(cls, ctx, node, **kwargs):
     scale = node.get_attr_value('scale')
     zero_point = node.get_attr_value('zero_point')
     axis = node.get_attr_value('quantized_dimension')
     np_q_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.output[0]))
     if len(scale) > 1 or len(zero_point) > 1:
         utils.make_sure(ctx.opset >= 13, "Opset 13 is required for per-axis quantization for node %s", node.name)
         node.set_attr("axis", axis)
     scale_node = ctx.make_const(utils.make_name("scale"), np.array(scale[0], dtype=np.float32))
     zero_point_node = ctx.make_const(utils.make_name("zero_point"), np.array(zero_point[0], dtype=np_q_type))
     ctx.replace_inputs(node, [node.input[0], scale_node.output[0], zero_point_node.output[0]])
     del node.attr["scale"]
     del node.attr["zero_point"]
     del node.attr["quantized_dimension"]
     if "min" in node.attr:
         del node.attr["min"]
     if "max" in node.attr:
         del node.attr["max"]
Exemplo n.º 23
0
 def version_7(cls, ctx, node, **kwargs):
     GreaterLess.version_7(ctx, node, **kwargs)
     output_name = node.output[0]
     node.op.op_type = "Less" if node.op.op_type == "GreaterEqual" else "Greater"
     new_node = ctx.insert_new_node_on_output("Not",
                                              output_name,
                                              name=utils.make_name(
                                                  node.name))
     ctx.copy_shape(output_name, new_node.output[0])
     ctx.set_dtype(new_node.output[0], ctx.get_dtype(output_name))
Exemplo n.º 24
0
 def version_1(cls, ctx, node, **kwargs):
     need_not = node.type == "NotEqual"
     common.BroadcastOp.version_1(ctx, node, **kwargs)
     if need_not:
         node.type = "Equal"
         output_name = node.output[0]
         not_node = ctx.insert_new_node_on_output("Not",
                                                  output_name,
                                                  name=utils.make_name(
                                                      node.name))
         ctx.copy_shape(output_name, not_node.output[0])
         ctx.copy_dtype(output_name, not_node.output[0])
Exemplo n.º 25
0
 def to_tf(cls, ctx, node, **kwargs):
     if len(node.input) == 1 or ctx.get_rank(node.input[1]) != 1:
         new_shape = node.get_attr_value('new_shape')
         if new_shape == [0]:
             # Legacy tflite models use a shape parameter of [0] to indicate scalars
             new_shape = []
         new_shape_const = ctx.make_const(utils.make_name("new_shape"),
                                          np.array(new_shape, np.int64))
         ctx.replace_inputs(node,
                            [node.input[0], new_shape_const.output[0]])
     if 'new_shape' in node.attr:
         del node.attr['new_shape']
Exemplo n.º 26
0
 def version_9(cls, ctx, node, **kwargs):
     # T_y output = Where(T_x condition), return indices of elements whose value are True
     node.type = "NonZero"
     # in onnx, indices are returned in this way [[ind_a_0, ind_b_0, ...], [ind_a_1, ind_b_1,...]];
     # while in tf, the result will be [[ind_a_0, ind_a_1, ...], [ind_b_0, ind_b_1, ...], ...]
     # this is the reason a transpose node inserted here.
     transpose_node = ctx.insert_new_node_on_output(
         "Transpose",
         node.output[0],
         name=utils.make_name("where_op_added"))
     ctx.copy_shape(node.output[0], transpose_node.output[0])
     ctx.copy_dtype(node.output[0], transpose_node.output[0])
Exemplo n.º 27
0
def process_single_init_node(g, fw_init_input_id, bw_init_input_id, to_append):
    fw_init_is_const, init_fw_val = check_const(g, fw_init_input_id)
    bw_init_is_const, init_bw_val = check_const(g, bw_init_input_id)
    if fw_init_is_const and bw_init_is_const:
        initial_val = np.concatenate((init_fw_val, init_bw_val), axis=0)
        init_name = utils.make_name("initial")
        init_node = g.make_const(init_name, initial_val, skip_conversion=True)
    else:
        init_node = g.make_node("Concat", [fw_init_input_id, bw_init_input_id],
                                attr={"axis": 0})

    to_append.append(init_node)
    return init_node
Exemplo n.º 28
0
 def version_11(cls, ctx, node, **kwargs):
     # starting with opset-11, equal supports all types (but both operands must be of the same type)
     _add_cast_to_same_type_to_inputs(ctx, node)
     need_not = node.type == "NotEqual"
     if need_not:
         node.type = "Equal"
         output_name = node.output[0]
         not_node = ctx.insert_new_node_on_output("Not",
                                                  output_name,
                                                  name=utils.make_name(
                                                      node.name))
         ctx.copy_shape(output_name, not_node.output[0])
         ctx.copy_dtype(output_name, not_node.output[0])
Exemplo n.º 29
0
def make_range_const(ctx, start, limit, delta, output, scope_name, shape,
                     dtype):
    """make Range subgraph if all inputs are const."""
    # T range = Range(T start, T limit, T delta)
    # V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
    base_name = utils.make_name(scope_name)
    start = ctx.get_node_by_output(start).get_tensor_value(as_list=False)
    limit = ctx.get_node_by_output(limit).get_tensor_value(as_list=False)
    delta = ctx.get_node_by_output(delta).get_tensor_value(as_list=False)
    val = np.arange(start, limit, delta, dtype=start.dtype)
    const_range = ctx.make_const(base_name, val)
    ctx.make_node("Identity", [const_range.output[0]],
                  shapes=[shape],
                  dtypes=[dtype],
                  outputs=[output])
Exemplo n.º 30
0
 def version_6(cls, ctx, node, **kwargs):
     # T output = FloorDiv(T x, T y)
     node.type = "Div"
     dtype = ctx.get_dtype(node.input[0])
     if dtype in [
             onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16,
             onnx_pb.TensorProto.DOUBLE
     ]:
         new_node_name = utils.make_name("floor_div_res")
         floor_res = ctx.insert_new_node_on_output(
             op_type="Floor",
             output_name=node.output[0],
             name=new_node_name)
         ctx.copy_dtype(node.output[0], floor_res.output[0])
         ctx.copy_shape(node.output[0], floor_res.output[0])