Пример #1
0
 def before(x):
     inj = relay.squeeze(x)
     y1 = relay.add(inj, relay.const(1, "float32"))
     tmp = relay.squeeze(inj)
     tmp = relay.add(tmp, relay.const(1, "float32"))
     y2 = relay.add(tmp, relay.const(1, "float32"))
     y3 = relay.add(inj, relay.const(1, "float32"))
     concat = relay.concatenate((y1, y2, y3), axis=1)
     out_inj = relay.squeeze(concat)
     out = relay.add(out_inj, relay.const(1, "float32"))
     return relay.Function(relay.ir_pass.free_vars(out), out)
Пример #2
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     return relay.Function([x], w)
Пример #3
0
def test_squeeze_infer_type():
    n, t, d = 1, 4, 1
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.squeeze(x, axis=(2,))
    assert "axis=" in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType(
        (1, 4), "float32")

    n, t, d = 1, 4, 1
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.squeeze(x)
    assert "axis=" not in y.astext()
    yy = relay.ir_pass.infer_type(y)
    assert yy.checked_type == relay.TensorType(
        (4,), "float32")
Пример #4
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     f1 = relay.Function([x], w)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Пример #5
0
 def expected():
     x = relay.var("p", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.squeeze(y)
     u = relay.transpose(y, axes=[0, 1])
     w = relay.left_shift(z, u)
     f1 = relay.Function([x], w)
     x = relay.var("x", shape=(10, 20))
     y = relay.Call(f1, [x])
     return relay.Function([x], y)
Пример #6
0
 def expected(x, conv_weight, out_scale, channels):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight]
     squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
     conv_weight = relay.multiply(
         conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     return relay.Function(args, y)
Пример #7
0
    def verify_squeeze(shape, dtype, axis):
        x = relay.var("x", relay.TensorType(shape, dtype))
        squeeze = relay.squeeze(x, axis=axis)

        np_axis = tuple(axis) if axis is not None else None

        data = np.random.random_sample(shape).astype(dtype)
        intrp = create_executor()
        op_res = intrp.evaluate(squeeze, { x : relay.const(data) })
        ref_res = np.squeeze(data, axis=np_axis)
        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
Пример #8
0
 def expected(x, conv_weight, in_bias, in_scale, channels):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight, in_bias]
     in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
     squeezed_scale = relay.squeeze(in_scale, axis=[1,2])
     x = relay.nn.relu(x)
     in_bias = relay.divide(in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
     x = relay.add(x, in_bias)
     conv_weight = relay.multiply(
         conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     return relay.Function(args, y)
 def expected(x, conv_weight, out_scale, channels, blocking):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight]
     if blocking:
         squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3])
         conv_weight = relay.multiply(
             conv_weight,
             relay.reshape(squeezed_scale, (channels // blocking[1], 1, 1, 1, 1, blocking[1])),
         )
     else:
         squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
         conv_weight = relay.multiply(
             conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
         )
     y = relay.nn.conv2d(
         x,
         conv_weight,
         channels=channels,
         kernel_size=(3, 3),
         padding=(1, 1),
         data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
         kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
     )
     return relay.Function(args, y)
Пример #10
0
 def expected(x, conv_weight, in_bias, in_scale, channels):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight, in_bias, in_scale]
     in_scale = relay.expand_dims(in_scale, axis=1, num_newaxis=2)
     in_bias = relay.expand_dims(in_bias, axis=1, num_newaxis=2)
     squeezed_scale = relay.squeeze(in_scale, axis=[1,2])
     x = relay.nn.relu(x)
     in_bias = relay.divide(in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
     x = relay.add(x, in_bias)
     conv_weight = relay.multiply(
         conv_weight , relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2))
     y = relay.nn.conv2d(x, conv_weight,
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     return relay.Function(args, y)
Пример #11
0
    def expected(x, conv_weight, out_bias, out_scale, in_channels, channels,
                 blocking):
        # use a fixed order of args so alpha equal check can pass
        args = [x, conv_weight, out_bias]
        if not blocking:
            out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
        squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])

        def fold_conv_weight():
            if blocking:
                return relay.multiply(
                    conv_weight,
                    relay.reshape(
                        squeezed_scale,
                        (channels // blocking[1], 1, 1, 1, 1, blocking[1])),
                )
            else:
                return relay.multiply(
                    conv_weight,
                    relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))

        y1 = relay.nn.conv2d(
            x,
            fold_conv_weight(),
            channels=channels,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
            kernel_layout="OIHW1i{}o".format(blocking[1])
            if blocking else "OIHW",
        )
        y1 = relay.nn.relu(y1)
        y2 = relay.nn.conv2d(
            x,
            fold_conv_weight(),
            channels=channels,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
            kernel_layout="OIHW1i{}o".format(blocking[1])
            if blocking else "OIHW",
        )
        y2 = relay.nn.relu(y2)
        y = relay.add(y1, y2)
        return relay.Function(args, y)
Пример #12
0
def build_relay_module(batch_size, input_size, hidden_size, time_steps, dense_dim):
    mod = tvm.IRModule()
    mod["lstm_layer"] = lstm_definition(batch_size, input_size, hidden_size, time_steps)
    mod["linear_layer"] = linear_layer_definition(batch_size, hidden_size, dense_dim)
    lstm_var = mod.get_global_var("lstm_layer")
    linear_var = mod.get_global_var("linear_layer")

    # now we build up our main function
    input_var = relay.var("input", shape=(batch_size, time_steps, input_size))
    init_hidden_var = relay.var("init_hidden", shape=(batch_size, hidden_size))
    init_cell_var = relay.var("init_cell", shape=(batch_size, hidden_size))
    i2h_weight_var = relay.var("i2h_weight", shape=(4*hidden_size, input_size))
    h2h_weight_var = relay.var("h2h_weight", shape=(4*hidden_size, hidden_size))
    lstm_bias_var = relay.var("lstm_bias", shape=(4*hidden_size,))
    linear_weight_var = relay.var("linear_weight", shape=(dense_dim, hidden_size))
    linear_bias_var = relay.var("linear_bias", shape=(dense_dim,))

    builder = relay.ScopeBuilder()
    state_var = builder.let("state", relay.Tuple([init_hidden_var, init_cell_var]))
    lstm_res = builder.let("lstm_res",
                           lstm_var(input_var, state_var,
                                    i2h_weight_var, h2h_weight_var,
                                    lstm_bias_var,
                                    # the keras model only gave one bias,
                                    # so set the other to zero
                                    # (hopefully this is correct)
                                    relay.zeros_like(lstm_bias_var)))
    final_hidden = builder.let("final_hidden",
                               relay.TupleGetItem(lstm_res, 1))
    # to match PT's semantics, we're undoing the reshape in LSTM :)
    reshape_hidden = builder.let("reshape_hidden",
                                 relay.squeeze(final_hidden, axis=[0]))
    linear_result = builder.let("linear_result",
                                linear_var(reshape_hidden,
                                           linear_weight_var, linear_bias_var))
    # finally do a softmax
    builder.ret(relay.nn.softmax(linear_result))
    main_func = relay.Function([input_var, init_hidden_var, init_cell_var,
                                i2h_weight_var, h2h_weight_var, lstm_bias_var,
                                linear_weight_var, linear_bias_var],
                               builder.get())
    mod["main"] = main_func
    return mod
Пример #13
0
    def verify_squeeze(shape, axis, oshape):
        x = relay.var("x", relay.TensorType(shape, "float32"))
        y = relay.var("y", relay.TensorType(axis, "float32"))
        z = relay.squeeze(x, relay.shape_of(y))
        func = run_infer_type(relay.Function([x, y], z))
        func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()),
                             transform.InferType())

        zz = func2.body
        assert isinstance(zz, relay.Call)
        assert zz.op == relay.op.get("squeeze")
        assert "axis=" in zz.astext()
        assert zz.checked_type == relay.ty.TensorType(oshape, "float32")

        x_data = np.random.uniform(low=-1, high=1,
                                   size=shape).astype("float32")
        y_data = np.random.uniform(low=-1, high=1, size=axis).astype("float32")
        ref_res = np.squeeze(x_data, axis)
        verify_func(func2, [x_data, y_data], ref_res)
Пример #14
0
 def expected(x, conv_weight, out_bias, out_scale, channels):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight, out_bias]
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
     def fold_conv_weight():
         return  relay.multiply(
             conv_weight ,
             relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
     y1 = relay.nn.conv2d(x, fold_conv_weight(),
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.conv2d(x, fold_conv_weight(),
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y2 = relay.nn.relu(y2)
     y = relay.add(y1, y2)
     return relay.Function(args, y)
Пример #15
0
 def expected(x, conv_weight, out_bias, out_scale, channels):
     # use a fixed order of args so alpha equal check can pass
     args = [x, conv_weight, out_bias]
     out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
     squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
     def fold_conv_weight():
         return  relay.multiply(
             conv_weight ,
             relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
     y1 = relay.nn.conv2d(x, fold_conv_weight(),
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y1 = relay.nn.relu(y1)
     y2 = relay.nn.conv2d(x, fold_conv_weight(),
                         channels=channels,
                         kernel_size=(3, 3),
                         padding=(1, 1))
     y2 = relay.nn.relu(y2)
     y = relay.add(y1, y2)
     return relay.Function(args, y)
Пример #16
0
    def expected(x, conv_weight, out_bias, out_scale, channels):
        # use a fixed order of args so alpha equal check can pass
        args = [x, conv_weight, out_bias]
        squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
        conv_weight = relay.multiply(
            conv_weight,
            relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))

        y = relay.nn.conv2d(
            x,
            conv_weight,
            channels=channels,
            kernel_size=(3, 3),
            padding=(1, 1),
            data_layout="NCHW",
            kernel_layout="OIHW",
        )

        out_bias = relay.multiply(out_bias, squeezed_scale)
        y = relay.nn.bias_add(y, out_bias)
        y = relay.nn.relu(y)
        return relay.Function(args, y)
Пример #17
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     return relay.Function([x], w)
    def _execute(self):
        self.node_dict = {}
        # self.node_dict['1'] = relay.const(np.zeros((1, 128)), dtype='int32')
        gelu_a = relay.var('gelu_a', shape=())
        gelu_b = relay.var('gelu_b', shape=())
        gelu_c = relay.var('gelu_c', shape=())
        gelu_d = relay.var('gelu_d', shape=())
        gelu_e = relay.var('gelu_e', shape=())

        self.node_dict['1'] = relay.var('input.1', shape=(1,128), dtype='int32')
        self.node_dict['2'] = relay.var('input.2', shape=(1,128), dtype='int32')
        for gnode in self.graph:
            name = gnode['name']
            op_type = gnode['op_type']
            attrs = gnode['attrs']
            del attrs['A_shape']
            del attrs['O_shape']

            inputs = gnode['inputs']

            if op_type == 'Const':
                arr = np.zeros(attrs['shape'], dtype=np.int32)
                y =  relay.const(arr, dtype='int32')

            elif op_type == 'expand_dims':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.expand_dims(x, attrs['axis'], attrs['num_newaxis'])

            elif op_type == 'reshape':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.reshape(x, attrs['newshape'])
            elif op_type == 'take':
                data = get_input(self.node_dict, self.params, inputs[0])
                indices = get_input(self.node_dict, self.params, inputs[1])
                y = relay.take(data, indices, axis=attrs['axis'][0], mode=attrs['mode'])
            elif op_type == 'one_hot':
                x = get_input(self.node_dict, self.params, inputs[0])
                cc1 = get_input(self.node_dict, self.params, inputs[1])
                cc2 = get_input(self.node_dict, self.params, inputs[2])
                y = relay.one_hot(x, cc1, cc2, **attrs)
            elif op_type == 'strided_slice':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.strided_slice(x, **attrs)
            elif op_type == 'mean':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.mean(x, axis=attrs['axis'],
                        exclude=attrs['exclude'],
                        keepdims=attrs['keepdims'])
            elif op_type == 'nn.dense':
                x = get_input(self.node_dict, self.params, inputs[0])
                weight = get_input(self.node_dict, self.params, inputs[1])
                y = relay.nn.dense(x, weight, units=attrs['units'][0])
            elif op_type == 'add':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.add(x1, x2)
            elif op_type == 'subtract':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.subtract(x1, x2)
            elif op_type == 'multiply':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.multiply(x1, x2)
            elif op_type == 'power':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.power(x1, x2)
            elif op_type == 'transpose':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.transpose(x, **attrs)
            elif op_type == 'tanh':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.tanh(x)
            elif op_type == 'squeeze':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.squeeze(x, **attrs)
            elif op_type == 'nn.batch_matmul':
                x1 = get_input(self.node_dict, self.params, inputs[0])
                x2 = get_input(self.node_dict, self.params, inputs[1])
                y = relay.nn.batch_matmul(x1, x2)
            elif op_type == 'nn.softmax':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = relay.nn.softmax(x, **attrs)
            elif op_type == 'gelu':
                x = get_input(self.node_dict, self.params, inputs[0])
                y = x * gelu_a * (gelu_b + relay.tanh(
                           ( gelu_c * (x + gelu_d *
                                   relay.power(x, gelu_e)))))
            else:
                import pdb; pdb.set_trace()
                print( 'not supported op %s ' % op_type)
            self.node_dict[name] = y

        output_name = self.output_node_ids[0]
        output = self.node_dict[output_name]

        inputs = relay.analysis.free_vars(output)
        # inputs = [self.node_dict['1'], self.node_dict['2']]
        func = relay.Function(inputs, output)
        mod = tvm.IRModule()
        mod['main'] = func

        with relay.build_config(opt_level=0):
            graph, lib, params = relay.build(mod, 'llvm', params={})
        self.m = graph_runtime.create(graph, lib, tvm.cpu())
Пример #19
0
 def get_graph(x_shape, axis):
     x = relay.var("x", shape=(x_shape), dtype="float32")
     out = relay.squeeze(x, axis=axis)
     f = relay.Function([x], out)
     return f, {"x": x_shape}, []
 def fold_conv_weight():
     squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
     return relay.multiply(
         conv_weight,
         relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
Пример #21
0
def test_squeeze_grad():
    data = relay.var("data", shape=(2, 1, 1, 3, 4, 1), dtype="float64")
    fwd_func = relay.Function([data], relay.squeeze(data))
    fwd_func_subset = relay.Function([data], relay.squeeze(data, axis=[1, -1]))
    check_grad(fwd_func)
    check_grad(fwd_func_subset)
Пример #22
0
def lstm_definition(batch_size,
                    input_size,
                    hidden_size,
                    time_steps,
                    time_axis=1):
    state_tensor_type = relay.TensorType((batch_size, hidden_size))
    state_tuple_type = relay.TupleType([state_tensor_type, state_tensor_type])

    input_var = relay.var("input", shape=(batch_size, time_steps, input_size))
    state_var = relay.var("state", type_annotation=state_tuple_type)
    i2h_weight_var = relay.var("i2h_weight",
                               shape=(4 * hidden_size, input_size))
    h2h_weight_var = relay.var("h2h_weight",
                               shape=(4 * hidden_size, hidden_size))
    i2h_bias_var = relay.var("i2h_bias", shape=(4 * hidden_size, ))
    h2h_bias_var = relay.var("h2h_bias", shape=(4 * hidden_size, ))

    # in this case, we are ignoring the state outputs
    builder = relay.ScopeBuilder()
    cell_var = builder.let(
        "lstm_cell", relay_lstm_cell(batch_size, input_size, hidden_size))
    splits = builder.let(
        "splits",
        relay.split(input_var, time_steps, time_axis).astuple())
    last_state = state_var
    seq_outs = []
    for i in range(time_steps):
        squeezed = builder.let(
            f"squeezed_{i}",
            relay.squeeze(relay.TupleGetItem(splits, i), axis=[time_axis]))
        cell_out = builder.let(
            f"cell_out_{i}",
            cell_var(squeezed, last_state, i2h_weight_var, h2h_weight_var,
                     i2h_bias_var, i2h_bias_var))
        new_seq_out = builder.let(f"seq_out_{i}",
                                  relay.TupleGetItem(cell_out, 0))
        seq_outs.append(new_seq_out)
        new_hidden = builder.let(f"state_update_{i}",
                                 relay.TupleGetItem(cell_out, 1))
        last_state = new_hidden

    stacked = builder.let("stacked", relay.stack(seq_outs, axis=time_axis))
    # finally reshape to match pytorch's semantics (one layer)
    reshape_hidden = builder.let(
        "final_hidden",
        relay.reshape(relay.TupleGetItem(last_state, 0),
                      (1, batch_size, hidden_size)))
    reshape_cell = builder.let(
        "final_cell",
        relay.reshape(relay.TupleGetItem(last_state, 1),
                      (1, batch_size, hidden_size)))
    builder.ret(relay.Tuple([stacked, reshape_hidden, reshape_cell]))

    ret_type = relay.TupleType([
        relay.TensorType((batch_size, time_steps, hidden_size)),
        relay.TensorType((1, batch_size, hidden_size)),
        relay.TensorType((1, batch_size, hidden_size))
    ])

    return relay.Function([
        input_var, state_var, i2h_weight_var, h2h_weight_var, i2h_bias_var,
        h2h_bias_var
    ],
                          builder.get(),
                          ret_type=ret_type)
Пример #23
0
def test_squeeze_bad_axes_infer_type():
    n, t, d = 1, 4, 1
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.squeeze(x, axis=(1,))
    yy = relay.ir_pass.infer_type(y)
Пример #24
0
 def fold_conv_weight():
     squeezed_scale = relay.squeeze(out_scale, axis=[1,2])
     return  relay.multiply(
         conv_weight ,
         relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3))
Пример #25
0
def test_squeeze_bad_axes_infer_type():
    (n, t, d) = (1, 4, 1)
    x = relay.var('x', relay.TensorType((n, t, d), 'float32'))
    y = relay.squeeze(x, axis=(1,))
    yy = run_infer_type(y)
Пример #26
0
 def before():
     x = relay.var("x", shape=(10, 20))
     y = relay.add(x, relay.const(1, "float32"))
     z = relay.exp(y)
     w = relay.squeeze(z)
     return relay.Function([x], w)
Пример #27
0
 def verify_squeeze(shape, dtype, axis):
     x = relay.var("x", relay.TensorType(shape, dtype))
     z = relay.squeeze(x, axis=axis)
     func = relay.Function([x], z)
     x_data = np.random.random_sample(shape).astype(dtype)
     verify_results(func, [x_data], "test_squeeze", rtol=1e-5, atol=1e-5)
Пример #28
0
def test_squeeze_bad_axes_infer_type():
    n, t, d = 1, 4, 1
    x = relay.var("x", relay.TensorType((n, t, d), "float32"))
    y = relay.squeeze(x, axis=(1,))
    yy = relay.ir_pass.infer_type(y)
Пример #29
0
 def test_squeeze(x_shape, axis):
     x = relay.var('x', shape=(x_shape), dtype='float32')
     out = relay.squeeze(x, axis=axis)
     f = relay.Function([x], out)
     return f, {'x': x_shape}