def expected(x, w1, w2, w3, w4):
        # use a fixed order of args so alpha equal check can pass
        args = [x, w1, w2, w3, w4]
        x_stacked = relay.stack((x, x, x), axis=0)
        w = relay.stack((w1, w2, w4), axis=0)
        y = relay.nn.batch_matmul(x_stacked, w)
        (y1, y2, y4) = relay.split(y, 3)
        y1 = relay.squeeze(y1, [0])
        y2 = relay.squeeze(y2, [0])
        y4 = relay.squeeze(y4, [0])

        # y3 cannot be combined
        y3 = relay.nn.dense(x, w3)

        y = relay.Tuple((y1, y2, y3, y4))
        return relay.Function(args, y)
    def expected(x, w1, w2, b1, b2, is_2d_bias):
        args = [x, w1, w2, b1, b2]
        x_stacked = relay.stack((x, x), axis=0)
        w = relay.stack((w1, w2), axis=0)
        y = relay.nn.batch_matmul(x_stacked, w)

        if not is_2d_bias:
            b1 = relay.expand_dims(b1, 0)
            b2 = relay.expand_dims(b2, 0)

        b = relay.stack((b1, b2), axis=0)
        y = relay.add(y, b)
        (y1, y2) = relay.split(y, 2)
        y1 = relay.squeeze(y1, [0])
        y2 = relay.squeeze(y2, [0])
        y = relay.Tuple((y1, y2))
        return relay.Function(args, y)
Beispiel #3
0
    def verify_stack(dshapes, axis):
        x_data = [np.random.normal(size=shape).astype('int32') for shape in dshapes]
        ref_res = np.stack(x_data, axis=axis)

        args = []
        for data in x_data:
            args.append(relay.const(data))
        call = relay.stack(relay.Tuple(args), axis)
        call_val = run_as_python(call)
        assert_tensor_value(call_val, ref_res)
 def expected(x, w1, w2, b1, b2, scale1, scale2, newshape):
     args = [x, w1, w2, b1, b2, scale1, scale2]
     x_stacked = relay.stack((x, x), axis=0)
     w = relay.stack((w1, w2), axis=0)
     y = relay.nn.batch_matmul(x_stacked, w)
     b1 = relay.expand_dims(b1, 0)
     b2 = relay.expand_dims(b2, 0)
     b = relay.stack((b1, b2), axis=0)
     y = relay.add(y, b)
     scale1 = relay.expand_dims(scale1, 0)
     scale2 = relay.expand_dims(scale2, 0)
     scale = relay.stack((scale1, scale2), axis=0)
     y = relay.multiply(y, scale)
     (y1, y2) = relay.split(y, 2)
     y1 = relay.squeeze(y1, [0])
     y2 = relay.squeeze(y2, [0])
     y1 = relay.reshape(y1, newshape=newshape)
     y2 = relay.reshape(y2, newshape=newshape)
     y = relay.Tuple((y1, y2))
     return relay.Function(args, y)
Beispiel #5
0
def test_let_bound_constants():
    """This tests for an ICHECK failure for ill-formed IR with let-bound constants"""

    x = relay.var("x", shape=(3,), dtype="int32")
    y = relay.take(x, relay.const(0))
    z = relay.const(1)

    f = relay.Function([x], relay.stack((z, y), axis=0))
    mod = IRModule.from_expr(f)

    compiler = VMCompiler()
    compiler.optimize(mod, target="llvm")
Beispiel #6
0
def verify_any_stack(data_shape, np_dshape, num_data, axis):
    mod = tvm.IRModule()
    dtype = "float32"
    inputs = []
    for i in range(num_data):
        inputs.append(relay.var("data{}".format(i), shape=data_shape, dtype=dtype))
    y = relay.stack(inputs, axis)
    mod["main"] = relay.Function(inputs, y)
    np_inputs = []
    for _ in range(num_data):
        np_inputs.append(np.random.uniform(size=np_dshape).astype(dtype))
    ref_res = np.stack(np_inputs, axis)
    check_result(np_inputs, mod, ref_res)
Beispiel #7
0
 def verify_stack(dshapes, axis):
     y = []
     for shape in dshapes:
         y.append(relay.var('input', relay.TensorType(shape, 'float32')))
     x = relay.Tuple(y)
     z = relay.stack(x, axis=axis)
     func = relay.Function(y, z)
     x_data = [np.random.normal(size=shape).astype('float32') for shape in dshapes]
     ref_res = np.stack(x_data, axis=axis)
     for (target, ctx) in tvm.testing.enabled_targets():
         for kind in ['graph', 'debug']:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(func)(*x_data)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-05)
Beispiel #8
0
def lstm_definition(batch_size, input_size, hidden_size, time_steps,
                    time_axis=1):
    state_tensor_type = relay.TensorType((batch_size, hidden_size))
    state_tuple_type = relay.TupleType([state_tensor_type, state_tensor_type])

    input_var = relay.var("input", shape=(batch_size, time_steps, input_size))
    state_var = relay.var("state", type_annotation=state_tuple_type)
    i2h_weight_var = relay.var("i2h_weight", shape=(4*hidden_size, input_size))
    h2h_weight_var = relay.var("h2h_weight", shape=(4*hidden_size, hidden_size))
    i2h_bias_var = relay.var("i2h_bias", shape=(4*hidden_size,))
    h2h_bias_var = relay.var("h2h_bias", shape=(4*hidden_size,))

    # in this case, we are ignoring the state outputs
    builder = relay.ScopeBuilder()
    cell_var = builder.let("lstm_cell", relay_lstm_cell(batch_size, input_size, hidden_size))
    splits = builder.let("splits", relay.split(input_var, time_steps, time_axis).astuple())
    last_state = state_var
    seq_outs = []
    for i in range(time_steps):
        squeezed = builder.let(f"squeezed_{i}", relay.squeeze(relay.TupleGetItem(splits, i), axis=[time_axis]))
        cell_out = builder.let(f"cell_out_{i}",
                               cell_var(squeezed, last_state,
                                        i2h_weight_var, h2h_weight_var,
                                        i2h_bias_var, i2h_bias_var))
        new_seq_out = builder.let(f"seq_out_{i}", relay.TupleGetItem(cell_out, 0))
        seq_outs.append(new_seq_out)
        new_hidden = builder.let(f"state_update_{i}", relay.TupleGetItem(cell_out, 1))
        last_state = new_hidden

    stacked = builder.let("stacked", relay.stack(seq_outs, axis=time_axis))
    # finally reshape to match pytorch's semantics (one layer)
    reshape_hidden = builder.let("final_hidden",
                                 relay.reshape(relay.TupleGetItem(last_state, 0),
                                               (1, batch_size, hidden_size)))
    reshape_cell = builder.let("final_cell",
                               relay.reshape(relay.TupleGetItem(last_state, 1),
                                             (1, batch_size, hidden_size)))
    builder.ret(relay.Tuple([stacked, reshape_hidden, reshape_cell]))

    ret_type = relay.TupleType([
        relay.TensorType((batch_size, time_steps, hidden_size)),
        relay.TensorType((1, batch_size, hidden_size)),
        relay.TensorType((1, batch_size, hidden_size))
    ])

    return relay.Function([input_var, state_var, i2h_weight_var, h2h_weight_var,
                           i2h_bias_var, h2h_bias_var],
                          builder.get(),
                          ret_type=ret_type)
Beispiel #9
0
    def verify_stack(dshapes, axis):
        y = []
        for shape in dshapes:
            y.append(relay.var("input", relay.TensorType(shape, "float32")))
        x = relay.Tuple(y)
        z = relay.stack(x, axis=axis)

        func = relay.Function(y, z)
        x_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
        ref_res = np.stack(x_data, axis=axis)

        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(*x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
Beispiel #10
0
    def verify_stack(dshapes, axis):
        y = []
        for shape in dshapes:
            y.append(relay.var("input", relay.TensorType(shape, "float32")))
        x = relay.Tuple(y)
        z = relay.stack(x, axis=axis)

        func = relay.Function(y, z)
        x_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
        ref_res = np.stack(x_data, axis=axis)

        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(func)(*x_data)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_stack_grad():
    args = [relay.var(c, shape=(2, 3, 4), dtype="float64") for c in "xyz"]
    fwd_func = relay.Function(args, relay.stack(args, axis=0))
    check_grad(fwd_func)
Beispiel #12
0
def get_net(batch_size, image_shape, num_classes, dtype):
    height = image_shape[1]
    width = image_shape[2]
    data_shape = (batch_size,) + image_shape
    net = relay.var("data", shape=data_shape, dtype=dtype)

    net = conv_3x3(net, 3, 64, "conv1_1")
    net = conv_3x3(net, 64, 64, "conv1_2")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 64, 128, "conv2_1")
    net = conv_3x3(net, 64, 128, "conv2_2")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 128, 256, "conv3_1")
    net = conv_3x3(net, 256, 256, "conv3_2")
    net = conv_3x3(net, 256, 256, "conv3_3")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 256, 512, "conv4_1")
    net = conv_3x3(net, 512, 512, "conv4_2")
    net = conv_3x3(net, 512, 512, "conv4_3")
    net = relay.nn.max_pool2d(net, pool_size=(2, 2), strides=(2, 2), ceil_mode=True)

    net = conv_3x3(net, 512, 512, "conv5_1")
    net = conv_3x3(net, 512, 512, "conv5_2")
    net = conv_3x3(net, 512, 512, "conv5_3")

    net = relay.nn.dropout(net, rate=0.5)

    net = conv_3x3(net, 512, 72, "conv6", activation=False)
    net = relay.transpose(net, (0, 2, 3, 1))

    num_class_probs = 9 * 3
    num_confidence_scores = 9
    #num_box_delta = 9 * 4
    pred_class_probs, pred_conf, pred_box_delta = relay.split(net,
            (num_class_probs, num_class_probs + num_confidence_scores),
            axis=-1)

    # Probability
    pred_class_probs = relay.reshape(pred_class_probs, (-1, 3))
    pred_class_probs = relay.nn.softmax(pred_class_probs)
    pred_class_probs = relay.reshape(pred_class_probs, (batch_size, -1, 3))

    # Confidence
    pred_conf = relay.sigmoid(pred_conf)
    pred_conf = relay.reshape(pred_conf, (batch_size, -1, 1))

    # Bbox_delta
    pred_box_delta = relay.reshape(pred_box_delta, (batch_size, -1, 4))
    delta_x, delta_y, delta_w, delta_h = relay.split(pred_box_delta, (1, 2, 3), axis=2)
    delta_x = relay.reshape(delta_x, (batch_size, -1))
    delta_y = relay.reshape(delta_y, (batch_size, -1))
    delta_w = relay.reshape(delta_w, (batch_size, -1))
    delta_h = relay.reshape(delta_h, (batch_size, -1))

    anchor_box = set_anchors(height, width)
    anchor_x = relay.Constant(tvm.nd.array(anchor_box[:, 0]))
    anchor_y = relay.Constant(tvm.nd.array(anchor_box[:, 1]))
    anchor_w = relay.Constant(tvm.nd.array(anchor_box[:, 2]))
    anchor_h = relay.Constant(tvm.nd.array(anchor_box[:, 3]))

    box_center_x = anchor_x + delta_x * anchor_w
    box_center_y = anchor_y + delta_y * anchor_h
    '''
    box_width    = anchor_w * relay.exp(delta_w)
    box_height   = anchor_h * relay.exp(delta_h)
    '''
    box_width    = anchor_w + safe_exp(delta_w)
    box_height   = anchor_h + safe_exp(delta_h)

    xmins, ymins, xmaxs, ymaxs = bbox_transform(box_center_x, box_center_y, box_width, box_height)
    xmins = relay.minimum(relay.maximum(relay.const(0.0), xmins), relay.const(width - 1.0))
    ymins = relay.minimum(relay.maximum(relay.const(0.0), ymins), relay.const(height - 1.0))
    xmaxs = relay.maximum(relay.minimum(relay.const(width - 1.0), xmaxs), relay.const(0.0))
    ymaxs = relay.maximum(relay.minimum(relay.const(height - 1.0), ymaxs), relay.const(0.0))

    det_boxes = relay.stack(bbox_transform_inv(xmins, ymins, xmaxs, ymaxs), axis=-1)

    probs = relay.multiply(pred_class_probs, pred_conf)
    det_probs = relay.max(probs, axis=2)
    det_class = relay.argmax(probs, axis=2)

    out = relay.Tuple([det_boxes, det_probs, det_class])
    args = relay.analysis.free_vars(out)
    
    return relay.Function(args, out)