Exemple #1
0
def test_mul_param():
    x = relay.var('x', shape=(10, 10))
    y = relay.var('y', shape=(1, 10))
    func = relay.Function([x, y], op.multiply(x, y))
    x_data = np.random.rand(10, 10).astype('float32')
    y_data = np.random.rand(1, 10).astype('float32')
    check_eval(func, [x_data, y_data], x_data * y_data)
Exemple #2
0
def _multiply(children, attrs, odtype='float32'):
    if len(children) == 1:
        left = children[0]
        scalar = attrs.get_float('scalar')
        right = relay.const(scalar, dtype=odtype)
    else:
        assert len(children) == 2
        left = children[0]
        right = children[1]

    return op.multiply(left, right)
    def compute(self, input_size, hidden_size, output_size):
        self.category_var = category = relay.var('category',
                                                 shape=(1, data.N_CATEGORIES))
        self.inp_topi_var = inp_topi = relay.var('input',
                                                 shape=(),
                                                 dtype='int32')
        self.hidden_var = hidden = relay.var('hidden', shape=(1, hidden_size))
        self.hidden = initialize(self.hidden_var)
        n_letter = relay.const(data.N_LETTERS)
        one_diag = relay.const(np.diag(np.ones(58)).astype('float32'))
        boxed_one = relay.const(np.array([1]).astype('int32'))
        inp = op.take(one_diag, op.multiply(boxed_one, inp_topi), axis=0)
        combined = op.concatenate(
            [op.concatenate([category, inp], axis=1), hidden], axis=1)
        hidden = self.linear(data.N_CATEGORIES + input_size + hidden_size,
                             hidden_size,
                             combined,
                             name='i2h')
        output = self.linear(data.N_CATEGORIES + input_size + hidden_size,
                             output_size,
                             combined,
                             name='i2o')
        output_combined = op.concatenate([hidden, output], axis=1)
        output = self.linear(hidden_size + output_size,
                             output_size,
                             output_combined,
                             name='o2o')
        # output = op.nn.dropout(output, 0.1) #attributes has not been registered
        output = op.nn.log_softmax(output, axis=1)
        topi = op.argmax(output)
        body = relay.Tuple([
            hidden, topi,
            op.equal(topi, op.subtract(n_letter, relay.const(1)))
        ])
        fwd_para = [self.category_var, self.inp_topi_var, self.hidden_var]
        fwd_func = relay.Function(fwd_para, body)
        self.fwd = relay.Var('fwd')

        max = relay.var('max', shape=(), dtype='int32')
        inp_para = [max] + [copy_var(v) for v in fwd_para]
        fwd_res = self.fwd(*inp_para[1:])
        fwd_res_0 = relay.TupleGetItem(fwd_res, 0)
        fwd_res_1 = relay.TupleGetItem(fwd_res, 1)
        fwd_res_2 = relay.TupleGetItem(fwd_res, 2)
        else_else_branch = self.prelude.cons(
            fwd_res_1,
            self.recurse(op.subtract(max, relay.const(1)), inp_para[1],
                         fwd_res_1, fwd_res_0))
        else_branch = relay.If(fwd_res_2, self.prelude.nil(), else_else_branch)
        body = relay.If(op.equal(max, relay.const(0)), self.prelude.nil(),
                        else_branch)
        return inp_para, relay.Let(self.fwd, fwd_func, body), None
Exemple #4
0
def aten_mul(inputs, attributes, scope):
    # print_inputs(inputs)
    lfs, rfs = inputs
    ctx = current_context()
    net = ctx.network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        output = _scale_or_elementwise(net, lfs, rfs, "mul", scope)
        output.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        lfs, rfs = _tvm_to_const([lfs, rfs])
        return [_op.multiply(lfs, rfs)]
    return [lfs * rfs]
Exemple #5
0
def _mx_masked_softmax(inputs, attrs):
    assert len(inputs) == 1 or len(inputs) == 2
    axis = attrs.get_int("axis")
    temperature = attrs.get_float("temperature")
    if len(inputs) == 1:
        result = _op.nn.softmax(inputs[0] / _expr.const(temperature),
                                axis=axis)
    else:
        neg = -1e18
        att_score, mask = inputs
        att_score_dtype = _infer_type(att_score).checked_type.dtype
        if att_score_dtype == "float16":
            neg = -1e4
        temp = _op.where(mask, att_score, _expr.const(neg))
        result = _op.multiply(
            _op.nn.softmax(temp / _expr.const(temperature), axis=axis),
            mask.astype("float32"))
    return result
def test_pass_profiler():
    x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"]
    e1 = op.add(x, y)
    e2 = op.subtract(x, z)
    e3 = op.multiply(e1, e1 / e2)
    mod = tvm.IRModule.from_expr(e3 + e2)

    tvm.transform.enable_pass_profiling()

    mod = tvm.relay.transform.AnnotateSpans()(mod)
    mod = tvm.relay.transform.ToANormalForm()(mod)
    mod = tvm.relay.transform.InferType()(mod)

    profiles = tvm.transform.render_pass_profiles()
    assert "AnnotateSpans" in profiles
    assert "ToANormalForm" in profiles
    assert "InferType" in profiles

    tvm.transform.clear_pass_profiles()
    tvm.transform.disable_pass_profiling()
Exemple #7
0
def get_test_model():
    x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"]
    e1 = op.add(x, y)
    e2 = op.subtract(x, z)
    e3 = op.multiply(e1, e1 / e2)
    return tvm.IRModule.from_expr(e3 + e2)