Exemplo n.º 1
0
def test_load_linear_regressor(m_):
    shape_dict = {"m": m_}
    m = pm.parameter("m")
    mu = pm.parameter(name="mu", default=1.0)
    x = pm.input("x", shape=(m))
    y = pm.input("y")
    w = pm.state("w", shape=(m))

    graph = pm.linear_regressor_train(x, w, y, mu, m)
    test_graph, input_info, out_info, keys = linear(m=m_, coarse=True)
    assert len(test_graph.nodes.keys()) == len(graph.nodes.keys())
    assert op_counts(test_graph) == op_counts(graph)

    shape_val_pass = pm.NormalizeGraph(shape_dict)
    new_graph = shape_val_pass(graph)
    test_res = new_graph(keys, input_info)
    np.testing.assert_allclose(test_res, out_info["w"])

    test_graph_lowered, input_info, new_out_info, keys = linear(m=m_)
    flatten_pass = pm.Lower({})
    test_flatten_pass = pm.Lower({})
    flattened_g = flatten_pass(new_graph)
    ref_lowered = test_flatten_pass(test_graph_lowered, {})
    assert len(ref_lowered.nodes.keys()) == len(flattened_g.nodes.keys())
    assert op_counts(ref_lowered) == op_counts(flattened_g)

    all_vals = flattened_g(keys, input_info)
    np.testing.assert_allclose(new_out_info["w"], all_vals)
Exemplo n.º 2
0
def test_translate_elem_mul(x_shape):
    a = np.random.randint(-3, 3, x_shape)
    b = np.random.randint(-3, 3, x_shape)
    np_res = a * b
    graph = pm.Node("elem_mul")

    pm_a = pm.input(name="a", shape=x_shape, graph=graph)
    pm_b = pm.input(name="b", shape=x_shape, graph=graph)
    pm_o = pm.output(name="out", shape=x_shape, graph=graph)
    with graph:
        pm.elem_mul(pm_a, pm_b, pm_o)
    pm_res = graph("out", {"a": a, "b": b})
    np.testing.assert_allclose(pm_res, np_res)
Exemplo n.º 3
0
def test_gather1():
    axis = 1
    x = np.random.randn(5, 4, 3, 2).astype(np.float32)
    idx = np.array([0, 1, 3])

    with pm.Node(name="gather_op") as graph:
        data = pm.input(name="input", shape=x.shape)
        indices = pm.input(name="indices", shape=idx.shape)
        out = pm.gather(data, indices, axis=axis, name="res")

    pm_y = graph("res", {"input": x, "indices": idx})
    np_y = np.take(x, idx, axis=axis)
    np.testing.assert_allclose(np_y, pm_y)
Exemplo n.º 4
0
def test_translate_vmul(x_shape):
    a = np.random.randint(-3, 3, x_shape)
    b = np.random.randint(-3, 3, x_shape)
    np_res = a.dot(b)
    with pm.Node("vmul") as pm_graph:
        pm_a = pm.input(name="a", shape=x_shape)
        pm_b = pm.input(name="b", shape=x_shape)
        pm_o = pm.output(name="o", shape=x_shape)
        pm_s = pm.output(name="out")
        pm.elem_mul(pm_a, pm_b, pm_o)
        pm.reduce_sum(pm_o, pm_s, axes=(0,), keepdims=0)

    pm_res = pm_graph("out", {"a": a, "b": b})
    np.testing.assert_allclose(pm_res, np_res)
Exemplo n.º 5
0
def test_single_dim_op_slice():
    with pm.Node(name="elem3") as graph:
        m = pm.parameter(name="m")
        x = pm.input("x", shape=m)
        w = pm.state("w", shape=m)
        i = pm.index(0, m - 1, name="i")
        out = (w[i] * x[i])
        w[i] = (out[i] - w[i])

    m_ = 3
    x_ = np.random.randint(0, 10, m_)
    w_ = np.random.randint(0, 10, m_)

    coarse_eval = graph("w", x=x_, w=w_)
    np_result = x_ * w_ - w_
    np.testing.assert_allclose(coarse_eval, np_result)

    shape_pass = NormalizeGraph({"m": 3})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("w", x=x_, w=w_)

    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    fine_grained_eval = lowered_graph("w/w(2,)", input_info)
    assert fine_grained_eval == np_result[2]
Exemplo n.º 6
0
def test_multi_dim():
    with pm.Node(name="elem4") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.input("x", shape=(m, n))
        w = pm.state("w", shape=(m, n))
        i = pm.index(0, m - 1, name="i")
        j = pm.index(0, n - 1, name="j")
        w[i, j] = (w[i, j] * x[i, j])
    m_ = 3
    n_ = 4
    x_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    w_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    coarse_eval = graph("w", x=x_, w=w_)
    np_result = x_ * w_
    np.testing.assert_allclose(coarse_eval, np_result)
    shape_pass = NormalizeGraph({"m": m_, "n": n_})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("w", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {}
    for i in range(m_):
        for j in range(n_):
            input_info[f"w/w({i}, {j})"] = w_[i, j]
            input_info[f"x/x({i}, {j})"] = x_[i, j]

    fine_grained_eval = lowered_graph("w/w(2, 3)", input_info)
    assert fine_grained_eval == np_result[2, 3]
Exemplo n.º 7
0
def test_multidim_sigmoid(m_):

    with pm.Node(name="logistic") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.input("x", shape=(m))
        w = pm.state("w", shape=(m))
        i = pm.index(0, m - 1, name="i")
        o = pm.sigmoid(w[i] * x[i], name="out")
    x_ = np.random.randint(0, 10, m_).astype(np.float)
    w_ = np.random.randint(0, 10, m_).astype(np.float)
    shape_dict = {"m": m_}
    input_dict = {"x": x_, "w": w_}
    np_res = sigmoid((x_ * w_))

    coarse_eval = graph("out", input_dict)
    np.testing.assert_allclose(np_res, coarse_eval)
    lowered = set_shape_and_lower(graph, shape_dict)
    keys = [f"out/out({i},)" for i in range(m_)]

    x_ = np.random.randint(0, 10, m_).astype(np.float)
    w_ = np.random.randint(0, 10, m_).astype(np.float)
    input_dict = {}
    for i in range(m_):
        input_dict[f"x/x({i},)"] = x_[i]
        input_dict[f"w/w({i},)"] = w_[i]
    np_res = sigmoid((x_ * w_))

    lower_res = np.asarray(lowered(keys, input_dict)).reshape(np_res.shape)
    np.testing.assert_allclose(lower_res, np_res)
Exemplo n.º 8
0
def test_translate_conv(x_shape, w_shape, params):
    shape_dict = {"n": x_shape[0], "c": x_shape[1], "ih": x_shape[2], "iw": x_shape[3],
                  "nf": w_shape[0], "kh": w_shape[2], "kw": w_shape[3],
                  "stride": params["stride"], "pad": params["pad"]}

    _, input_info, out_info, keys = conv(x_shape, w_shape, params, coarse=True, debug_matrix=False)

    n = pm.parameter(name="n")
    c = pm.parameter(name="ic")
    ih = pm.parameter(name="ih")
    iw = pm.parameter(name="iw")
    nf = pm.parameter(name="nf")
    kh = pm.parameter(name="kh")
    kw = pm.parameter(name="kw")
    x = pm.input(name="data", shape=(n, c, ih, iw))
    w = pm.state(name="w", shape=(nf, c, kh, kw))
    b = pm.state(name="bias", shape=(nf))
    stride = pm.parameter(name="stride")
    pad = pm.parameter(name="pad")
    out = pm.output(name="out")
    graph = pm.conv_bias(x, w, b, out, stride, pad)
    tinput_info = copy.deepcopy(input_info)
    res0 = graph("out", tinput_info)

    np.testing.assert_allclose(res0, out_info["out"])
Exemplo n.º 9
0
def test_multi_dim_op_slice():
    with pm.Node(name="elem2") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        mu = pm.parameter(name="mu", default=2.0)
        x = pm.input(name="x", shape=(m, n))
        w = pm.state(name="w", shape=(m, n))
        i = pm.index(0, m - 1, name="i")
        j = pm.index(0, n - 1, name="j")
        out = (x[i, j] * w[i, j]).set_name("w_out")
        w[i, j] = (mu * (out[i, j] - w[i, j]))
    m_ = 3
    n_ = 2
    x_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    w_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    coarse_eval = graph("w", x=x_, w=w_)
    np_result = (x_ * w_ - w_) * 2.0
    np.testing.assert_allclose(coarse_eval, np_result)
    shape_pass = NormalizeGraph({"m": m_, "n": n_})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("w", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {}
    for i in range(m_):
        for j in range(n_):
            input_info[f"w/w({i}, {j})"] = w_[i, j]
            input_info[f"x/x({i}, {j})"] = x_[i, j]
    fine_grained_eval = lowered_graph("w/w(2, 1)", input_info)
    assert fine_grained_eval == np_result[2, 1]
Exemplo n.º 10
0
def test_conv2d_transpose_shapes(inp_shape, wgt_shape, stride, pad):
    groups = 1
    dilation = 1
    out_pad = 0
    inp = np.random.randint(-15, 15, np.prod(inp_shape)).reshape(inp_shape)
    wgt = np.random.randint(-15, 15, np.prod(wgt_shape)).reshape(wgt_shape)
    torch_res = F.conv_transpose2d(torch.from_numpy(inp), torch.from_numpy(wgt),
                                   stride=stride, padding=pad)
    torch_res = conv2d_transpose(torch.from_numpy(inp), torch.from_numpy(wgt), stride, pad)
    # np.testing.assert_allclose(tres.numpy(), torch_res.numpy())
    info = {
        'data': inp,
        'w': wgt,
    }
    N, C, H, W = inp.shape

    x = pm.input(name="data", shape=inp_shape)
    w = pm.state(name="w", shape=wgt_shape)
    out = pm.output(name="out")

    graph = pm.conv_transpose(x, w, out, stride, pad)
    #
    tres = graph("out", info)

    np.testing.assert_allclose(tres, torch_res.numpy())
Exemplo n.º 11
0
def test_single_dim_norm():
    with pm.Node(name="elem1") as graph:
        m = pm.parameter("m")
        x = pm.input("x", shape=m)
        w = pm.state("w", shape=m)
        i = pm.index(0, m - 1, name="i")
        w[i] = (w[i] * x[i])
    x_ = np.random.randint(0, 10, 3)
    w_ = np.random.randint(0, 10, 3)
    coarse_eval = graph("w", x=x_, w=w_)

    np_result = x_ * w_
    np.testing.assert_allclose(coarse_eval, np_result)
    shape_pass = NormalizeGraph({"m": 3})
    graph_shapes = shape_pass(graph)

    shape_res = graph_shapes("w", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    fine_grained_eval = lowered_graph("w/w(1,)", input_info)

    assert fine_grained_eval == np_result[1]

    pb_path = f"{OUTPATH}/{graph.name}.srdfg"
    pm.pb_store(lowered_graph, OUTPATH)
    loaded_node = pm.pb_load(pb_path)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    fine_grained_eval = loaded_node("w/w(1,)", input_info)
    assert fine_grained_eval == np_result[1]
Exemplo n.º 12
0
def create_svm_wifi(features, locations, lr=0.0001, deltav=1, train_size=7703):
    with pm.Node(name="svm_wifi") as graph:
        learning_rate = pm.parameter("learning_rate", default=lr)
        delta = pm.parameter("delta", default=deltav)
        n_features = pm.parameter("n_features", default=features)
        n_locations = pm.parameter("n_locations", default=locations)
        x_train = pm.input("x_train", shape=(n_features, ))
        y_train = pm.input("y_train", shape=(n_locations, ))
        y_train_inv = pm.input("y_train_inv", shape=(n_locations, ))
        weights = pm.state("weights", shape=(n_features, n_locations))

        i = pm.index(0, n_features - 1, name="i")
        j = pm.index(0, n_locations - 1, name="j")

        scores = pm.sum([i], (weights[i, j] * x_train[i]), name="scores")
        correct_class_score = pm.sum([j], (scores[j] * y_train[j]),
                                     name="correct_class_score")

        h = ((scores[j] - correct_class_score + delta).set_name("h") > 0)

        # margin = (pm.cast(np.float32, h[j]) * y_train_inv[j]).set_name("margin")
        margin = (h[j] * y_train_inv[j]).set_name("margin")
        valid_margin_count = pm.sum([j], margin[j], name="valid_margin_count")
        partial = (y_train[j] * valid_margin_count).set_name("partial")
        updated_margin = (margin[j] - partial[j]).set_name("updated_margin")
        # # #
        dW = (x_train[i] * updated_margin[j]).set_name("dW")
        weights[i, j] = (weights[i, j] -
                         learning_rate * dW[i, j]).set_name("weights_update")

    shape_dict = {"n_features": features, "n_locations": locations}
    input_info, keys, out_info = svm_wifi_datagen(features,
                                                  locations,
                                                  lr,
                                                  deltav,
                                                  lowered=True)

    cwd = Path(f"{__file__}").parent
    full_path = f"{cwd}/outputs"
    tabla_path = f"{full_path}/{graph.name}_{locations}_{features}_tabla.json"

    tabla_ir, tabla_graph = pm.generate_tabla(graph,
                                              shape_dict,
                                              tabla_path,
                                              context_dict=input_info,
                                              add_kwargs=True)
Exemplo n.º 13
0
def test_translate_softmax(x_shape, axis):
    x = np.random.randint(0, 5, x_shape).astype(np.float)
    data = pm.input("x", shape=x.shape)
    out = pm.output("out")
    g = pm.softmax(data, out, axis=1)
    res = g("out", {"x": x})
    np_res = np_softmax(x, axis=1)
    np.testing.assert_allclose(np_res, res)
Exemplo n.º 14
0
def test_bnorm():
    shape = (1, 16, 32, 32)
    grad = torch.rand(shape)
    x = torch.rand(shape)
    scale = torch.rand((shape[1], ))
    bias = torch.rand((shape[1], ))
    mean = torch.rand((shape[1], ))
    var = torch.rand((shape[1], ))
    torch_res = batchnorm2d_backward(grad, x, scale, bias)

    grad = grad.numpy()
    x = x.numpy()
    scale = scale.numpy()
    bias = bias.numpy()
    mean = mean.numpy()
    var = var.numpy()
    optimizer = "sgd"
    optimizer_kwargs = {"lr": 0.01}
    pm_x = pm.input(name="x", shape=shape)
    pm_grad = pm.input(name="grad", shape=shape)
    pm_scale = pm.state(name="scale", shape=scale.shape)
    pm_bias = pm.state(name="bias", shape=scale.shape)
    pm_mean = pm.state(name="mean", shape=scale.shape)
    pm_var = pm.state(name="var", shape=scale.shape)
    pm_x_grad = pm.output(name="x_grad", shape=shape)
    pm_scale_grad = pm.output(name="scale_grad", shape=scale.shape)
    pm_b_grad = pm.output(name="bias_grad", shape=bias.shape)

    inp_map = {
        'x': x,
        'grad': grad,
        'scale': scale,
        'bias': bias,
        'mean': mean,
        'var': var,
    }
    graph = pm.batchnorm_grad(pm_x, pm_scale, pm_bias, pm_mean, pm_var,
                              pm_grad, pm_x_grad, pm_scale_grad, pm_b_grad,
                              optimizer, optimizer_kwargs)
    rtol, atol = 1.3e-3, 1e-3
    gout = graph("bias_grad", inp_map)
    np.testing.assert_allclose(gout,
                               torch_res.numpy().reshape(gout.shape),
                               rtol=rtol,
                               atol=atol)
Exemplo n.º 15
0
    def populate_input(self, node):

        if node.shape != pm.DEFAULT_SHAPES[0]:
            indices = list(product(*tuple([np.arange(i) for i in node.shape])))
            for i in indices:
                x = pm.input(graph=node,
                             name=f"{node.name}{i}",
                             root_name=node.name,
                             shape=(1, ))
                self.stored_objects[id(x)] = x
Exemplo n.º 16
0
def test_flip(in_shape, axis):
    x = np.random.randn(*in_shape).astype(np.float32)

    with pm.Node(name="flip_op") as graph:
        data = pm.input(name="input", shape=x.shape)
        out = pm.flip(data, axis, name="res")

    np_y = np.flip(x, axis)
    pm_y = graph("res", {"input": x})
    np.testing.assert_allclose(np_y, pm_y)
Exemplo n.º 17
0
def test_transpose(in_shape):
    x = np.random.randint(0, 30, np.prod(in_shape)).reshape(in_shape)

    with pm.Node(name="tpose") as graph:
        x_pm = pm.input(name="x", shape=in_shape)
        pm.transpose(x_pm, (1,0), name="o")

    in_dict = {"x": x}
    res = graph("o", in_dict)
    np.testing.assert_allclose(x.T, res)
Exemplo n.º 18
0
def test_reshape(in_shape, out_shape):
    x = np.zeros(in_shape).astype(np.float32)

    with pm.Node(name="reshape_op") as graph:
        data = pm.input(name="input", shape=x.shape)
        out = pm.reshape(data, out_shape, name="res")

    pm_y = graph("res", {"input": x})
    np_y = np.reshape(x, out_shape)
    np.testing.assert_allclose(np_y, pm_y)
    assert np_y.shape == pm_y.shape
Exemplo n.º 19
0
def test_translate_flatten(x_shape):
    x = np.random.randint(0, 5, x_shape)
    data = pm.input("x", shape=x.shape)
    out = pm.output("out")

    g = pm.batch_flatten(data, out)

    res = g("out", {"x": x})
    print(res)
    print(x.reshape(-1))
    np.testing.assert_allclose(res, x.reshape(-1))
Exemplo n.º 20
0
def gen_from_shape(graph_type, input_shape, params=None):
    if graph_type == "linear":
        x = pm.input(name="x", shape=input_shape)
        w = pm.state(name="w", shape=input_shape)
        y = pm.input(name="y")
        mu = pm.parameter(name="mu", default=1.0)
        m = pm.parameter(name="m", default=input_shape)
        return pm.linear_regressor_train(x,
                                         w,
                                         y,
                                         mu,
                                         m,
                                         name="linear_regressor")
    elif graph_type == "logistic":
        x = pm.input(name="x", shape=input_shape)
        w = pm.state(name="w", shape=input_shape)
        y = pm.input(name="y")
        mu = pm.parameter(name="mu", default=1.0)
        m = pm.parameter(name="m", default=input_shape)
        return pm.logistic_regressor_train(x,
                                           w,
                                           y,
                                           mu,
                                           m,
                                           name="logistic_regressor")
    elif graph_type == "svm":
        x = pm.input(name="x", shape=input_shape)
        w = pm.state(name="w", shape=input_shape)
        y = pm.input(name="y")
        mu = pm.parameter(name="mu", default=1.0)
        m = pm.parameter(name="m", default=input_shape)
        return pm.svm_classifier_train(x, w, y, mu, m, name="svm_classifier")
Exemplo n.º 21
0
def test_lower_group_op():
    with pm.Node(name="linear_reg1") as graph:
        m = pm.parameter(name="m")
        x = pm.input("x", shape=(m))
        y = pm.input("y")
        w = pm.state("w", shape=(m))
        i = pm.index(0, m - 1, name="i")
        h = pm.sum([i], w[i] * x[i], name="h")
    m_ = 3
    n_ = 3
    x_ = np.random.randint(0, 10, m_)
    w_ = np.random.randint(0, 10, (m_))
    np_result = np.sum(x_ * w_)
    np.testing.assert_allclose(graph("h", {"w": w_, "x": x_}), np_result)
    np.testing.assert_allclose(graph("h", w=w_, x=x_), np_result)
    shape_pass = NormalizeGraph({"m": m_, "n": n_})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("h", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)

    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    #
    fine_grained_eval = lowered_graph("h/h(4,)", input_info)
    assert fine_grained_eval == np_result

    pb_path = f"{OUTPATH}/linear_reg1.srdfg"

    pm.pb_store(lowered_graph, OUTPATH)
    loaded_node = pm.pb_load(pb_path)  #
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})

    loaded_res = loaded_node("h/h(4,)", input_info)

    assert loaded_node.func_hash() == lowered_graph.func_hash()
    assert loaded_res == np_result
Exemplo n.º 22
0
def test_translate_reduce_sum(x_shape):
    data = np.random.randint(-3, 3, x_shape)
    np_res = np.sum(data)
    graph = pm.Node("reduce")
    pm_data = pm.input(name="a", shape=x_shape, graph=graph)
    out = pm.output(name="out", graph=graph)
    axis = (0,)
    keepdims = 0

    with graph:
        pm.reduce_sum(pm_data, out, axes=axis, keepdims=keepdims)
    pm_res = graph("out", {"a": data})
    np.testing.assert_allclose(pm_res, np_res)
Exemplo n.º 23
0
def test_broadcast(a_shape, b_shape, c_shape):

    from einops import repeat
    with pm.Node(name="broadcast") as graph:
        a = pm.input("a", shape=a_shape)
        b = pm.input("b", shape=b_shape)
        c = pm.output("c", shape=c_shape)
        a_idx, b_idx, c_idx = _get_elem_indices(a, b, c)

        c[c_idx] = a[a_idx] + b[b_idx]

    a_np = np.random.randint(0, 32, np.prod(a_shape)).reshape(a_shape)
    b_np = np.random.randint(0, 32, np.prod(b_shape)).reshape(b_shape)
    if len(c_shape) > 2:
        c_np_out = np.zeros(c_shape)
    else:
        c_np_out = np.zeros((c_shape[0], 1, c_shape[1]))

    a_np_t = repeat(a_np, 'i k -> i k j', j=b_shape[1])
    b_np_t = repeat(b_np, 'i k -> j i k', j=a_shape[0])
    actual_res = (a_np_t + b_np_t).squeeze()
    graph_res = graph("c", {"a": a_np, "b": b_np})

    np.testing.assert_allclose(graph_res, actual_res)
Exemplo n.º 24
0
def test_log_softmax(shape):
    inp = np.random.uniform(-15, 15, np.prod(shape)).reshape(shape)
    torch_res = F.log_softmax(torch.from_numpy(inp))
    info = {
        'data': inp,
    }
    np_res = log_softmax(inp)
    np.testing.assert_allclose(np_res, torch_res.numpy())
    x = pm.input(name="data", shape=shape)
    lsmx = pm.output(name="lsmx")

    graph = pm.log_softmax(x, lsmx, axis=1)
    tres = graph("lsmx", info)

    np.testing.assert_allclose(tres, torch_res.numpy())
Exemplo n.º 25
0
def test_load_nested_linear_regressor(m_):
    shape_dict = {"m": m_}
    with pm.Node(name="nested_linear") as graph:
        m = pm.parameter(name="m")
        mu = pm.parameter(name="mu", default=1.0)
        x = pm.input("x", shape=(m))
        y = pm.input("y")
        w = pm.state("w", shape=(m))
        pm.linear_regressor_train(x, w, y, mu, m, name="linear_regressor")
        j = pm.index(0, m-1, name="j")
        tw = (w[j] - 4).set_name("tw")

    test_graph, input_info, out_info, keys = linear(m=m_, coarse=True)
    shape_val_pass = pm.NormalizeGraph(shape_dict)
    new_graph = shape_val_pass(graph)
    test_res = new_graph("tw", input_info)
    np.testing.assert_allclose(test_res, (out_info["w"] - 4))

    ref_graph, input_info, new_out_info, keys = linear(m=m_)
    flatten_pass = pm.Lower({})
    keys = [f"tw/tw({i},)" for i in range(m_)]

    flattened_g = flatten_pass(new_graph)
    all_vals = flattened_g(keys, input_info)
Exemplo n.º 26
0
def test_pad(in_shape, pad_start, pad_end):
    x = np.random.randn(*in_shape).astype(np.float32)

    with pm.Node(name="pad_op") as graph:
        data = pm.input(name="input", shape=x.shape)
        out = pm.pad(data, pad_start, pad_end=pad_end, name="res")

    if pad_end is None:
        padding_val = tuple((pad_start[i], pad_start[i]) for i in range(len(pad_start)))
    else:
        padding_val = tuple((pad_start[i], pad_end[i]) for i in range(len(pad_start)))
    np_y = np.pad(x, padding_val)
    pm_y = graph("res", {"input": x})
    assert np_y.shape == pm_y.shape
    np.testing.assert_allclose(np_y, pm_y)
Exemplo n.º 27
0
def test_sigmoid(m_):

    with pm.Node(name="logistic1") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.input("x", shape=(m))
        w = pm.state("w", shape=(m))
        i = pm.index(0, m - 1, name="i")
        o = pm.sigmoid(pm.sum([i], w[i] * x[i]), name="out")
    x_ = np.random.randint(0, 10, m_)
    w_ = np.random.randint(0, 10, m_)
    input_dict = {"x": x_, "w": w_}
    np_res = int(sigmoid(np.sum(x_ * w_)))
    shape_dict = {"m": m_}

    coarse_eval = graph("out", x=x_, w=w_)
    np.testing.assert_allclose(np_res, coarse_eval)
    lowered = set_shape_and_lower(graph, shape_dict)
Exemplo n.º 28
0
def test_matmul(in_shape, w_shape):

    x = np.random.randint(0, 30, np.prod(in_shape)).reshape(in_shape)
    w = np.random.randint(0, 30, np.prod(w_shape)).reshape(w_shape)
    if in_shape[-1] == w_shape[-1]:
        o_np = [email protected]
    else:
        assert in_shape[-1] == w_shape[0]
        o_np = x @ w
    with pm.Node(name="mmul") as graph:
        x_pm = pm.input(name="x", shape=in_shape)
        w_pm = pm.state(name="w", shape=w_shape)
        o_pm = pm.output(name="o", shape=o_np.shape)
        pm.matmul(x_pm, w_pm, o_pm)


    in_dict = {"x": x, "w": w}
    res = graph("o", in_dict)
    np.testing.assert_allclose(o_np, res)
Exemplo n.º 29
0
def test_loss(shape):
    inp = np.random.uniform(-15, 15, np.prod(shape)).reshape(shape)
    tgt = np.random.randint(0, 15, np.prod(shape[0]))

    torch_res = F.cross_entropy(torch.from_numpy(inp), torch.from_numpy(tgt))
    info = {
        'data': inp,
        'tgt': tgt,
    }
    np_res = torch_ce_loss(inp, tgt)
    np.testing.assert_allclose(np_res, torch_res.numpy())
    x = pm.input(name="data", shape=shape)
    tgt_ = pm.state(name="tgt", shape=(shape[0], ))

    loss = pm.output(name="loss")

    graph = pm.cross_entropy_loss(x, tgt_, loss)
    tres = graph("loss", info)

    np.testing.assert_allclose(tres, np_res)
Exemplo n.º 30
0
def test_avg_pool(data_shape, kernel_shape, stride):
    data = np.random.randint(0, 5, data_shape)
    tout = pooling(data, kernel_shape[0], kernel_shape[1], stride=stride)

    out = pm.output(name="out")
    n = pm.parameter("ns")
    ic = pm.parameter("ic")
    ih = pm.parameter("ih")
    iw = pm.parameter("iw")
    kh = pm.parameter("kh")
    kw = pm.parameter("kw")
    x = pm.input(name="data", shape=(n, ic, ih, iw))

    g = pm.avg_pool2d(x, out, kh, kw, stride=stride, pad=0)
    inp_info = {}
    inp_info["data"] = data
    inp_info["kh"] = kernel_shape[0]
    inp_info["kw"] = kernel_shape[1]
    test_out = g("out", inp_info)
    np.testing.assert_allclose(test_out, tout)