Пример #1
0
def test_translate_conv(x_shape, w_shape, params):
    shape_dict = {"n": x_shape[0], "c": x_shape[1], "ih": x_shape[2], "iw": x_shape[3],
                  "nf": w_shape[0], "kh": w_shape[2], "kw": w_shape[3],
                  "stride": params["stride"], "pad": params["pad"]}

    _, input_info, out_info, keys = conv(x_shape, w_shape, params, coarse=True, debug_matrix=False)

    n = pm.parameter(name="n")
    c = pm.parameter(name="ic")
    ih = pm.parameter(name="ih")
    iw = pm.parameter(name="iw")
    nf = pm.parameter(name="nf")
    kh = pm.parameter(name="kh")
    kw = pm.parameter(name="kw")
    x = pm.input(name="data", shape=(n, c, ih, iw))
    w = pm.state(name="w", shape=(nf, c, kh, kw))
    b = pm.state(name="bias", shape=(nf))
    stride = pm.parameter(name="stride")
    pad = pm.parameter(name="pad")
    out = pm.output(name="out")
    graph = pm.conv_bias(x, w, b, out, stride, pad)
    tinput_info = copy.deepcopy(input_info)
    res0 = graph("out", tinput_info)

    np.testing.assert_allclose(res0, out_info["out"])
Пример #2
0
def gen_from_shape(graph_type, input_shape, params=None):
    if graph_type == "linear":
        x = pm.input(name="x", shape=input_shape)
        w = pm.state(name="w", shape=input_shape)
        y = pm.input(name="y")
        mu = pm.parameter(name="mu", default=1.0)
        m = pm.parameter(name="m", default=input_shape)
        return pm.linear_regressor_train(x,
                                         w,
                                         y,
                                         mu,
                                         m,
                                         name="linear_regressor")
    elif graph_type == "logistic":
        x = pm.input(name="x", shape=input_shape)
        w = pm.state(name="w", shape=input_shape)
        y = pm.input(name="y")
        mu = pm.parameter(name="mu", default=1.0)
        m = pm.parameter(name="m", default=input_shape)
        return pm.logistic_regressor_train(x,
                                           w,
                                           y,
                                           mu,
                                           m,
                                           name="logistic_regressor")
    elif graph_type == "svm":
        x = pm.input(name="x", shape=input_shape)
        w = pm.state(name="w", shape=input_shape)
        y = pm.input(name="y")
        mu = pm.parameter(name="mu", default=1.0)
        m = pm.parameter(name="m", default=input_shape)
        return pm.svm_classifier_train(x, w, y, mu, m, name="svm_classifier")
Пример #3
0
    def define_graph(self, inp, weight, bias, grad, inp_grad, weight_grad,
                     bias_grad, optimizer, optimizer_kwargs):
        transA = False
        transB = False

        if grad.shape[1] != weight.shape[0]:
            indices = tuple([pm.index(0, s - 1) for s in weight.shape])
            # weight_transposed = pm.temp(name=f"{weight.name}_transposed", shape=(weight.shape[1], weight.shape[0]))
            weight_transposed = pm.state(name=f"{weight.name}_transposed",
                                         shape=(weight.shape[1],
                                                weight.shape[0]))
            weight_transposed[indices[1], indices[0]] = weight[indices]
            pm.gemm_no_bias(grad,
                            weight_transposed,
                            inp_grad,
                            transA=transA,
                            transB=transB,
                            strict_shapes=True)
        else:
            pm.gemm_no_bias(grad,
                            weight,
                            inp_grad,
                            transA=transA,
                            transB=transB,
                            strict_shapes=True)

        if grad.shape[0] != inp.shape[1]:
            indices = tuple([pm.index(0, s - 1) for s in inp.shape])
            # inp_transposed = pm.temp(name=f"{inp.name}_transposed", shape=(inp.shape[1], inp.shape[0]))
            inp_transposed = pm.state(name=f"{inp.name}_transposed",
                                      shape=(inp.shape[1], inp.shape[0]))
            inp_transposed[indices[1], indices[0]] = inp[indices]
            pm.gemm_no_bias(inp_transposed,
                            grad,
                            weight_grad,
                            transA=transA,
                            transB=transB,
                            strict_shapes=True)
        else:
            pm.gemm_no_bias(inp,
                            grad,
                            weight_grad,
                            transA=transA,
                            transB=transB,
                            strict_shapes=True)

        # Weight update
        assert weight_grad.shape == weight.shape

        OPTIMIZERS[optimizer](weight, weight_grad, **optimizer_kwargs)

        pm.reduce_sum(grad, bias_grad)
        OPTIMIZERS[optimizer](bias, bias_grad, **optimizer_kwargs)
Пример #4
0
def test_multi_dim():
    with pm.Node(name="elem4") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.input("x", shape=(m, n))
        w = pm.state("w", shape=(m, n))
        i = pm.index(0, m - 1, name="i")
        j = pm.index(0, n - 1, name="j")
        w[i, j] = (w[i, j] * x[i, j])
    m_ = 3
    n_ = 4
    x_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    w_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    coarse_eval = graph("w", x=x_, w=w_)
    np_result = x_ * w_
    np.testing.assert_allclose(coarse_eval, np_result)
    shape_pass = NormalizeGraph({"m": m_, "n": n_})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("w", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {}
    for i in range(m_):
        for j in range(n_):
            input_info[f"w/w({i}, {j})"] = w_[i, j]
            input_info[f"x/x({i}, {j})"] = x_[i, j]

    fine_grained_eval = lowered_graph("w/w(2, 3)", input_info)
    assert fine_grained_eval == np_result[2, 3]
Пример #5
0
def test_single_dim_op_slice():
    with pm.Node(name="elem3") as graph:
        m = pm.parameter(name="m")
        x = pm.input("x", shape=m)
        w = pm.state("w", shape=m)
        i = pm.index(0, m - 1, name="i")
        out = (w[i] * x[i])
        w[i] = (out[i] - w[i])

    m_ = 3
    x_ = np.random.randint(0, 10, m_)
    w_ = np.random.randint(0, 10, m_)

    coarse_eval = graph("w", x=x_, w=w_)
    np_result = x_ * w_ - w_
    np.testing.assert_allclose(coarse_eval, np_result)

    shape_pass = NormalizeGraph({"m": 3})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("w", x=x_, w=w_)

    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    fine_grained_eval = lowered_graph("w/w(2,)", input_info)
    assert fine_grained_eval == np_result[2]
Пример #6
0
def test_load_linear_regressor(m_):
    shape_dict = {"m": m_}
    m = pm.parameter("m")
    mu = pm.parameter(name="mu", default=1.0)
    x = pm.input("x", shape=(m))
    y = pm.input("y")
    w = pm.state("w", shape=(m))

    graph = pm.linear_regressor_train(x, w, y, mu, m)
    test_graph, input_info, out_info, keys = linear(m=m_, coarse=True)
    assert len(test_graph.nodes.keys()) == len(graph.nodes.keys())
    assert op_counts(test_graph) == op_counts(graph)

    shape_val_pass = pm.NormalizeGraph(shape_dict)
    new_graph = shape_val_pass(graph)
    test_res = new_graph(keys, input_info)
    np.testing.assert_allclose(test_res, out_info["w"])

    test_graph_lowered, input_info, new_out_info, keys = linear(m=m_)
    flatten_pass = pm.Lower({})
    test_flatten_pass = pm.Lower({})
    flattened_g = flatten_pass(new_graph)
    ref_lowered = test_flatten_pass(test_graph_lowered, {})
    assert len(ref_lowered.nodes.keys()) == len(flattened_g.nodes.keys())
    assert op_counts(ref_lowered) == op_counts(flattened_g)

    all_vals = flattened_g(keys, input_info)
    np.testing.assert_allclose(new_out_info["w"], all_vals)
Пример #7
0
def test_multidim_sigmoid(m_):

    with pm.Node(name="logistic") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.input("x", shape=(m))
        w = pm.state("w", shape=(m))
        i = pm.index(0, m - 1, name="i")
        o = pm.sigmoid(w[i] * x[i], name="out")
    x_ = np.random.randint(0, 10, m_).astype(np.float)
    w_ = np.random.randint(0, 10, m_).astype(np.float)
    shape_dict = {"m": m_}
    input_dict = {"x": x_, "w": w_}
    np_res = sigmoid((x_ * w_))

    coarse_eval = graph("out", input_dict)
    np.testing.assert_allclose(np_res, coarse_eval)
    lowered = set_shape_and_lower(graph, shape_dict)
    keys = [f"out/out({i},)" for i in range(m_)]

    x_ = np.random.randint(0, 10, m_).astype(np.float)
    w_ = np.random.randint(0, 10, m_).astype(np.float)
    input_dict = {}
    for i in range(m_):
        input_dict[f"x/x({i},)"] = x_[i]
        input_dict[f"w/w({i},)"] = w_[i]
    np_res = sigmoid((x_ * w_))

    lower_res = np.asarray(lowered(keys, input_dict)).reshape(np_res.shape)
    np.testing.assert_allclose(lower_res, np_res)
Пример #8
0
def test_single_dim_norm():
    with pm.Node(name="elem1") as graph:
        m = pm.parameter("m")
        x = pm.input("x", shape=m)
        w = pm.state("w", shape=m)
        i = pm.index(0, m - 1, name="i")
        w[i] = (w[i] * x[i])
    x_ = np.random.randint(0, 10, 3)
    w_ = np.random.randint(0, 10, 3)
    coarse_eval = graph("w", x=x_, w=w_)

    np_result = x_ * w_
    np.testing.assert_allclose(coarse_eval, np_result)
    shape_pass = NormalizeGraph({"m": 3})
    graph_shapes = shape_pass(graph)

    shape_res = graph_shapes("w", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    fine_grained_eval = lowered_graph("w/w(1,)", input_info)

    assert fine_grained_eval == np_result[1]

    pb_path = f"{OUTPATH}/{graph.name}.srdfg"
    pm.pb_store(lowered_graph, OUTPATH)
    loaded_node = pm.pb_load(pb_path)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    fine_grained_eval = loaded_node("w/w(1,)", input_info)
    assert fine_grained_eval == np_result[1]
Пример #9
0
def test_multi_dim_op_slice():
    with pm.Node(name="elem2") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        mu = pm.parameter(name="mu", default=2.0)
        x = pm.input(name="x", shape=(m, n))
        w = pm.state(name="w", shape=(m, n))
        i = pm.index(0, m - 1, name="i")
        j = pm.index(0, n - 1, name="j")
        out = (x[i, j] * w[i, j]).set_name("w_out")
        w[i, j] = (mu * (out[i, j] - w[i, j]))
    m_ = 3
    n_ = 2
    x_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    w_ = np.random.randint(0, 10, m_ * n_).reshape((m_, n_))
    coarse_eval = graph("w", x=x_, w=w_)
    np_result = (x_ * w_ - w_) * 2.0
    np.testing.assert_allclose(coarse_eval, np_result)
    shape_pass = NormalizeGraph({"m": m_, "n": n_})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("w", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)
    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {}
    for i in range(m_):
        for j in range(n_):
            input_info[f"w/w({i}, {j})"] = w_[i, j]
            input_info[f"x/x({i}, {j})"] = x_[i, j]
    fine_grained_eval = lowered_graph("w/w(2, 1)", input_info)
    assert fine_grained_eval == np_result[2, 1]
Пример #10
0
def test_conv2d_transpose_shapes(inp_shape, wgt_shape, stride, pad):
    groups = 1
    dilation = 1
    out_pad = 0
    inp = np.random.randint(-15, 15, np.prod(inp_shape)).reshape(inp_shape)
    wgt = np.random.randint(-15, 15, np.prod(wgt_shape)).reshape(wgt_shape)
    torch_res = F.conv_transpose2d(torch.from_numpy(inp), torch.from_numpy(wgt),
                                   stride=stride, padding=pad)
    torch_res = conv2d_transpose(torch.from_numpy(inp), torch.from_numpy(wgt), stride, pad)
    # np.testing.assert_allclose(tres.numpy(), torch_res.numpy())
    info = {
        'data': inp,
        'w': wgt,
    }
    N, C, H, W = inp.shape

    x = pm.input(name="data", shape=inp_shape)
    w = pm.state(name="w", shape=wgt_shape)
    out = pm.output(name="out")

    graph = pm.conv_transpose(x, w, out, stride, pad)
    #
    tres = graph("out", info)

    np.testing.assert_allclose(tres, torch_res.numpy())
Пример #11
0
def get_constant_of_shape(input_var,
                          name=None,
                          shape=None,
                          out=None,
                          **kwargs):
    if not out:
        out = pm.state(name=name, shape=shape)
    return out
Пример #12
0
def test_bnorm():
    shape = (1, 16, 32, 32)
    grad = torch.rand(shape)
    x = torch.rand(shape)
    scale = torch.rand((shape[1], ))
    bias = torch.rand((shape[1], ))
    mean = torch.rand((shape[1], ))
    var = torch.rand((shape[1], ))
    torch_res = batchnorm2d_backward(grad, x, scale, bias)

    grad = grad.numpy()
    x = x.numpy()
    scale = scale.numpy()
    bias = bias.numpy()
    mean = mean.numpy()
    var = var.numpy()
    optimizer = "sgd"
    optimizer_kwargs = {"lr": 0.01}
    pm_x = pm.input(name="x", shape=shape)
    pm_grad = pm.input(name="grad", shape=shape)
    pm_scale = pm.state(name="scale", shape=scale.shape)
    pm_bias = pm.state(name="bias", shape=scale.shape)
    pm_mean = pm.state(name="mean", shape=scale.shape)
    pm_var = pm.state(name="var", shape=scale.shape)
    pm_x_grad = pm.output(name="x_grad", shape=shape)
    pm_scale_grad = pm.output(name="scale_grad", shape=scale.shape)
    pm_b_grad = pm.output(name="bias_grad", shape=bias.shape)

    inp_map = {
        'x': x,
        'grad': grad,
        'scale': scale,
        'bias': bias,
        'mean': mean,
        'var': var,
    }
    graph = pm.batchnorm_grad(pm_x, pm_scale, pm_bias, pm_mean, pm_var,
                              pm_grad, pm_x_grad, pm_scale_grad, pm_b_grad,
                              optimizer, optimizer_kwargs)
    rtol, atol = 1.3e-3, 1e-3
    gout = graph("bias_grad", inp_map)
    np.testing.assert_allclose(gout,
                               torch_res.numpy().reshape(gout.shape),
                               rtol=rtol,
                               atol=atol)
Пример #13
0
    def define_graph(self,
                     inp,
                     weight,
                     bias,
                     grad,
                     inp_grad,
                     weight_grad,
                     bias_grad,
                     optimizer,
                     optimizer_kwargs,
                     stride=1,
                     pad=0,
                     dilation=1):
        min_sizes = []
        k = len(grad.shape) - 2

        for d in range(k):
            min_sizes.append((grad.shape[d + 2] - 1) * stride - 2 * pad +
                             (weight.shape[-1] - 1) * dilation + 1)

        grad_input_padding = tuple(inp.shape[-k + d] - min_sizes[d]
                                   for d in range(k))
        assert grad_input_padding[0] == grad_input_padding[1]
        pm.conv_transpose_bias(grad,
                               weight,
                               bias,
                               inp_grad,
                               stride=stride,
                               pad=pad,
                               out_pad=grad_input_padding[0])
        inp_indices = tuple(pm.index(0, s - 1) for s in inp.shape)
        grad_indices = tuple(pm.index(0, s - 1) for s in grad.shape)
        weight_indices = tuple(pm.index(0, s - 1) for s in weight.shape)
        inp_transposed = pm.temp(name=f"transposed_{inp.name}",
                                 shape=(inp.shape[1], inp.shape[0],
                                        inp.shape[2], inp.shape[3]))
        grad_transposed = pm.state(name=f"transposed_{grad.name}",
                                   shape=(grad.shape[1], grad.shape[0],
                                          grad.shape[2], grad.shape[3]))
        wgt_grad_transposed = pm.temp(name=f"transposed_{weight.name}",
                                      shape=(weight.shape[1], weight.shape[0],
                                             weight.shape[2], weight.shape[3]))
        pm.tensor_transpose(inp, inp_transposed, perm=(1, 0, 2, 3))
        pm.tensor_transpose(grad, grad_transposed, perm=(1, 0, 2, 3))
        pm.conv(inp_transposed,
                grad_transposed,
                wgt_grad_transposed,
                stride=dilation,
                pad=pad,
                dilation=stride)
        pm.tensor_transpose(wgt_grad_transposed,
                            weight_grad,
                            perm=(1, 0, 2, 3))
        # Weight update
        OPTIMIZERS[optimizer](weight, weight_grad, **optimizer_kwargs)
        pm.reduce_sum(grad, bias_grad)
        OPTIMIZERS[optimizer](bias, bias_grad, **optimizer_kwargs)
Пример #14
0
def test_flatten():
    shape = (2, 3, 4, 5)
    a = np.random.random_sample(shape).astype(np.float32)
    for i in range(len(shape)):
        with pm.Node(name="flatten_op") as graph:
            x = pm.state("x", shape=shape)
            x_us = pm.flatten(x, axis=i, name="res")

        new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)
        b = np.reshape(a, new_shape)
        pm_b = graph("res", {"x": a})
        np.testing.assert_allclose(pm_b, b)
Пример #15
0
def test_squeeze():
    with pm.Node(name="indexop") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.state("x", shape=(m, n))
        x_us = pm.squeeze(x, axis=None, name="res")
    m_ = 5
    n_ = 1
    x_ = np.random.randint(0, 10, (m_, n_))
    input_info = {"m": m_, "n": n_, "x": x_}
    res = graph("res", input_info)

    np.testing.assert_allclose(res, np.squeeze(x_, axis=1))
Пример #16
0
 def populate_state(self, node):
     if node.shape != pm.DEFAULT_SHAPES[0]:
         indices = list(product(*tuple([np.arange(i) for i in node.shape])))
         for i in indices:
             if node.init_value is not None:
                 init_val = node.init_value[i]
             else:
                 init_val = None
             x = pm.state(graph=node,
                          init_value=init_val,
                          name=f"{node.name}{i}",
                          root_name=node.name,
                          shape=(1, ))
             self.stored_objects[id(x)] = x
Пример #17
0
def create_svm_wifi(features, locations, lr=0.0001, deltav=1, train_size=7703):
    with pm.Node(name="svm_wifi") as graph:
        learning_rate = pm.parameter("learning_rate", default=lr)
        delta = pm.parameter("delta", default=deltav)
        n_features = pm.parameter("n_features", default=features)
        n_locations = pm.parameter("n_locations", default=locations)
        x_train = pm.input("x_train", shape=(n_features, ))
        y_train = pm.input("y_train", shape=(n_locations, ))
        y_train_inv = pm.input("y_train_inv", shape=(n_locations, ))
        weights = pm.state("weights", shape=(n_features, n_locations))

        i = pm.index(0, n_features - 1, name="i")
        j = pm.index(0, n_locations - 1, name="j")

        scores = pm.sum([i], (weights[i, j] * x_train[i]), name="scores")
        correct_class_score = pm.sum([j], (scores[j] * y_train[j]),
                                     name="correct_class_score")

        h = ((scores[j] - correct_class_score + delta).set_name("h") > 0)

        # margin = (pm.cast(np.float32, h[j]) * y_train_inv[j]).set_name("margin")
        margin = (h[j] * y_train_inv[j]).set_name("margin")
        valid_margin_count = pm.sum([j], margin[j], name="valid_margin_count")
        partial = (y_train[j] * valid_margin_count).set_name("partial")
        updated_margin = (margin[j] - partial[j]).set_name("updated_margin")
        # # #
        dW = (x_train[i] * updated_margin[j]).set_name("dW")
        weights[i, j] = (weights[i, j] -
                         learning_rate * dW[i, j]).set_name("weights_update")

    shape_dict = {"n_features": features, "n_locations": locations}
    input_info, keys, out_info = svm_wifi_datagen(features,
                                                  locations,
                                                  lr,
                                                  deltav,
                                                  lowered=True)

    cwd = Path(f"{__file__}").parent
    full_path = f"{cwd}/outputs"
    tabla_path = f"{full_path}/{graph.name}_{locations}_{features}_tabla.json"

    tabla_ir, tabla_graph = pm.generate_tabla(graph,
                                              shape_dict,
                                              tabla_path,
                                              context_dict=input_info,
                                              add_kwargs=True)
Пример #18
0
def test_sigmoid(m_):

    with pm.Node(name="logistic1") as graph:
        m = pm.parameter(name="m")
        n = pm.parameter(name="n")
        x = pm.input("x", shape=(m))
        w = pm.state("w", shape=(m))
        i = pm.index(0, m - 1, name="i")
        o = pm.sigmoid(pm.sum([i], w[i] * x[i]), name="out")
    x_ = np.random.randint(0, 10, m_)
    w_ = np.random.randint(0, 10, m_)
    input_dict = {"x": x_, "w": w_}
    np_res = int(sigmoid(np.sum(x_ * w_)))
    shape_dict = {"m": m_}

    coarse_eval = graph("out", x=x_, w=w_)
    np.testing.assert_allclose(np_res, coarse_eval)
    lowered = set_shape_and_lower(graph, shape_dict)
Пример #19
0
def test_matmul(in_shape, w_shape):

    x = np.random.randint(0, 30, np.prod(in_shape)).reshape(in_shape)
    w = np.random.randint(0, 30, np.prod(w_shape)).reshape(w_shape)
    if in_shape[-1] == w_shape[-1]:
        o_np = [email protected]
    else:
        assert in_shape[-1] == w_shape[0]
        o_np = x @ w
    with pm.Node(name="mmul") as graph:
        x_pm = pm.input(name="x", shape=in_shape)
        w_pm = pm.state(name="w", shape=w_shape)
        o_pm = pm.output(name="o", shape=o_np.shape)
        pm.matmul(x_pm, w_pm, o_pm)


    in_dict = {"x": x, "w": w}
    res = graph("o", in_dict)
    np.testing.assert_allclose(o_np, res)
Пример #20
0
def test_loss(shape):
    inp = np.random.uniform(-15, 15, np.prod(shape)).reshape(shape)
    tgt = np.random.randint(0, 15, np.prod(shape[0]))

    torch_res = F.cross_entropy(torch.from_numpy(inp), torch.from_numpy(tgt))
    info = {
        'data': inp,
        'tgt': tgt,
    }
    np_res = torch_ce_loss(inp, tgt)
    np.testing.assert_allclose(np_res, torch_res.numpy())
    x = pm.input(name="data", shape=shape)
    tgt_ = pm.state(name="tgt", shape=(shape[0], ))

    loss = pm.output(name="loss")

    graph = pm.cross_entropy_loss(x, tgt_, loss)
    tres = graph("loss", info)

    np.testing.assert_allclose(tres, np_res)
Пример #21
0
def test_second():
    test_a = np.array([1, 2, 3, 4])
    test_b = np.array([5, 6, 7, 8])
    with pm.Node(name="main") as graph:
        a = pm.parameter(default=6, name="a")
        b = pm.parameter(default=5, name="b")
        a = (a + b).set_name("a_mul_b")
        with pm.Node(name="graph2") as graph2:
            n = pm.placeholder("n")
            b = pm.placeholder("b")
            e = pm.parameter(default=6, name="e")
            l = pm.state("test", shape=(n, b))
            i = pm.index(0, graph2["n"] - 1)
            j = pm.index(0, graph2["b"] - 1)
            lij = pm.var_index(l, [i, j], "lij")

            x = (l * e).set_name("placeholdermult")

        _ = graph2("test", {l: np.arange(16).reshape((-1, 4))})
        _ = graph2("lij", {l: np.arange(16).reshape((-1, 4))})
        _ = graph2("placeholdermult", {l: np.arange(16).reshape((-1, 4))})
Пример #22
0
def test_lower_group_op():
    with pm.Node(name="linear_reg1") as graph:
        m = pm.parameter(name="m")
        x = pm.input("x", shape=(m))
        y = pm.input("y")
        w = pm.state("w", shape=(m))
        i = pm.index(0, m - 1, name="i")
        h = pm.sum([i], w[i] * x[i], name="h")
    m_ = 3
    n_ = 3
    x_ = np.random.randint(0, 10, m_)
    w_ = np.random.randint(0, 10, (m_))
    np_result = np.sum(x_ * w_)
    np.testing.assert_allclose(graph("h", {"w": w_, "x": x_}), np_result)
    np.testing.assert_allclose(graph("h", w=w_, x=x_), np_result)
    shape_pass = NormalizeGraph({"m": m_, "n": n_})
    graph_shapes = shape_pass(graph)
    shape_res = graph_shapes("h", x=x_, w=w_)
    np.testing.assert_allclose(shape_res, np_result)

    lower_pass = Lower({})
    lowered_graph = lower_pass(graph_shapes)
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})
    #
    fine_grained_eval = lowered_graph("h/h(4,)", input_info)
    assert fine_grained_eval == np_result

    pb_path = f"{OUTPATH}/linear_reg1.srdfg"

    pm.pb_store(lowered_graph, OUTPATH)
    loaded_node = pm.pb_load(pb_path)  #
    input_info = {f"w/w({i},)": w_[i] for i in range(len(w_))}
    input_info.update({f"x/x({i},)": x_[i] for i in range(len(x_))})

    loaded_res = loaded_node("h/h(4,)", input_info)

    assert loaded_node.func_hash() == lowered_graph.func_hash()
    assert loaded_res == np_result
Пример #23
0
def test_load_nested_linear_regressor(m_):
    shape_dict = {"m": m_}
    with pm.Node(name="nested_linear") as graph:
        m = pm.parameter(name="m")
        mu = pm.parameter(name="mu", default=1.0)
        x = pm.input("x", shape=(m))
        y = pm.input("y")
        w = pm.state("w", shape=(m))
        pm.linear_regressor_train(x, w, y, mu, m, name="linear_regressor")
        j = pm.index(0, m-1, name="j")
        tw = (w[j] - 4).set_name("tw")

    test_graph, input_info, out_info, keys = linear(m=m_, coarse=True)
    shape_val_pass = pm.NormalizeGraph(shape_dict)
    new_graph = shape_val_pass(graph)
    test_res = new_graph("tw", input_info)
    np.testing.assert_allclose(test_res, (out_info["w"] - 4))

    ref_graph, input_info, new_out_info, keys = linear(m=m_)
    flatten_pass = pm.Lower({})
    keys = [f"tw/tw({i},)" for i in range(m_)]

    flattened_g = flatten_pass(new_graph)
    all_vals = flattened_g(keys, input_info)
Пример #24
0
def lenet(lenet_type="lenet5", coarse=True, debug=False):

    with pm.Node(name="lenet") as graph:
        n = pm.parameter(name="n")
        c = pm.parameter(name="ic")
        ih = pm.parameter(name="ih")
        iw = pm.parameter(name="iw")
        nf1 = pm.parameter(name="nf1")
        kh1 = pm.parameter(name="kh1")
        kw1 = pm.parameter(name="kw1")
        data = pm.input(name="data", shape=(n, c, ih, iw))
        w1 = pm.state(name="w1", shape=(nf1, c, kh1, kw1))
        b1 = pm.state(name="b1", shape=(nf1))

        s1 = pm.parameter(name="s1")
        p1 = pm.parameter(name="p1")
        c1 = pm.output(name="c1", shape=(n, nf1, 28, 28))
        a1 = pm.output(name="a1", shape=(n, nf1, 28, 28))
        l1 = pm.output(name="l1", shape=(n, nf1, 14, 14))

        pm.conv_bias(data, w1, b1, c1, s1, p1)
        pm.elem_tanh(c1, a1, shape=a1.shape)
        pm.avg_pool2d(a1, l1, 2, 2, 2, 0)

        nf2 = pm.parameter(name="nf2")
        kh2 = pm.parameter(name="kh2")
        kw2 = pm.parameter(name="kw2")
        w2 = pm.state(name="w2", shape=(nf2, nf1, kh2, kw2))

        b2 = pm.state(name="b2", shape=(nf2))
        s2 = pm.parameter(name="s2")
        p2 = pm.parameter(name="p2")
        c2 = pm.output(name="c2", shape=(n, nf2, 10, 10))
        a2 = pm.output(name="a2", shape=(n, nf2, 10, 10))
        l2 = pm.output(name="l2", shape=(n, nf2, 5, 5))

        pm.conv_bias(l1, w2, b2, c2, s2, p2)
        pm.elem_tanh(c2, a2, shape=a2.shape)
        pm.avg_pool2d(a2, l2, 2, 2, 2, 0)

        nf3 = pm.parameter(name="nf3")
        kh3 = pm.parameter(name="kh3")
        kw3 = pm.parameter(name="kw3")
        w3 = pm.state(name="w3", shape=(nf3, nf2, kh3, kw3))
        b3 = pm.state(name="b3", shape=(nf3))
        s3 = pm.parameter(name="s3")
        p3 = pm.parameter(name="p3")
        c3 = pm.output(name="c3", shape=(n, nf3, 1, 1))
        a3 = pm.output(name="a3", shape=(n, nf3, 1, 1))

        pm.conv_bias(l2, w3, b3, c3, s3, p3)
        pm.elem_tanh(c3, a3, shape=a3.shape)

        f4 = pm.output(name="f4", shape=(n, nf3))
        pm.coarse_flatten(a3, f4, axis=1, shape=f4.shape)

        m5 = pm.parameter(name="m5")
        n5 = pm.parameter(name="n5")
        f5 = pm.output(name="f5", shape=(n, m5))
        w5 = pm.state(name="w5", shape=(m5, n5))
        # w5 = pm.state(name="w5", shape=(n5, m5))
        a6 = pm.output(name="a5", shape=(n, m5))
        b5 = pm.state(name="b5", shape=(n5, ))
        pm.gemm(f4,
                w5,
                b5,
                f5,
                shape=f5.shape,
                alpha=1.0,
                beta=0.0,
                transA=False,
                transB=True)
        pm.elem_tanh(f5, a6, shape=a6.shape)

        m7 = pm.parameter(name="m7")
        n7 = pm.parameter(name="n7")
        f7 = pm.output(name="f7", shape=(n, n7))
        w7 = pm.state(name="w7", shape=(m7, n7))
        # w7 = pm.state(name="w7", shape=(n7, m7))
        b7 = pm.state(name="b7", shape=(n7, ))

        pm.gemm(a6,
                w7,
                b7,
                f7,
                shape=f7.shape,
                alpha=1.0,
                beta=0.0,
                transA=False,
                transB=False)
        out = pm.output(name="sm")
        pm.softmax(f7, out, axis=1)

    if coarse:
        in_info, keys, out_info = np_lenetv2()
        return graph, in_info, out_info, keys
    else:

        shape_dict = {
            "n": 1,
            "ic": 1,
            "ih": 32,
            "iw": 32,
            "nf1": 6,
            "kh1": 5,
            "kw1": 5,
            "s1": 1,
            "p1": 0,
            "nf2": 16,
            "kh2": 5,
            "kw2": 5,
            "s2": 1,
            "p2": 0,
            "nf3": 120,
            "kh3": 5,
            "kw3": 5,
            "s3": 1,
            "p3": 0,
            "m5": 120,
            "n5": 84,
            "m7": 84,
            "n7": 10
        }
        shape_val_pass = pm.NormalizeGraph(shape_dict, debug=debug)
        new_graph = shape_val_pass(graph)
        in_info, keys, out_info = np_lenetv2(lowered=True)
        return new_graph, in_info, out_info, keys
Пример #25
0
def test_flatten_reco():
    with pm.Node(name="recommender") as graph:
        m = pm.parameter("m")
        n = pm.parameter("n")
        k = pm.parameter("k")
        x1 = pm.input("x1", shape=(k, ))
        x2 = pm.input("x2", shape=(k, ))

        r1 = pm.input("r1", shape=(m, ))
        y1 = pm.input("y1", shape=(m, ))

        r2 = pm.input("r2", shape=(n, ))
        y2 = pm.input("y2", shape=(n, ))

        w1 = pm.state("w1", shape=(m, k))
        w2 = pm.state("w2", shape=(n, k))
        i = pm.index(0, m - 1, name="i")
        j = pm.index(0, n - 1, name="j")
        l = pm.index(0, k - 1, name="l")
        h1_sum = pm.sum([l], (w1[i, l] *
                              x2[l]).set_name("w1*x2")).set_name("h1_sum")
        h1 = (h1_sum[i] * r1[i]).set_name("h1")
        h2_sum = pm.sum([l], (w2[j, l] *
                              x1[l]).set_name("w2*x1")).set_name("h2_sum")
        h2 = (h2_sum[j] * r2[j]).set_name("h2")

        d1 = (h1[i] - y1[i]).set_name("d1")
        d2 = (h2[j] - y2[j]).set_name("d2")
        g1 = (d1[i] * x2[l]).set_name("g1")
        g2 = (d2[j] * x1[l]).set_name("g2")
        w1[i, l] = (w1[i, l] - g1[i, l])
        w2[j, l] = (w2[j, l] - g2[j, l])
    m_ = 3
    n_ = 3
    k_ = 2
    input_info = {}
    input_info["m"] = m_
    input_info["n"] = n_
    input_info["k"] = k_
    input_info["w1"] = np.random.randint(1, 6, m_ * k_).reshape(m_, k_)
    input_info["w2"] = np.random.randint(1, 6, n_ * k_).reshape(n_, k_)
    input_info["x1"] = np.random.randint(1, 6, k_)
    input_info["x2"] = np.random.randint(1, 6, k_)

    input_info["r1"] = np.random.randint(0, 2, m_)
    input_info["y1"] = np.random.randint(0, 6, m_)
    input_info["r2"] = np.random.randint(0, 2, n_)
    input_info["y2"] = np.random.randint(0, 6, n_)
    out_info = numpy_reco(input_info)
    shape_val_pass = NormalizeGraph({"m": m_, "n": n_, "k": k_})
    flatten_pass = Lower({})

    new_graph = shape_val_pass(graph)
    test_res = new_graph(["w1", "w2"], input_info)
    np.testing.assert_allclose(test_res[0], out_info["w1"])
    np.testing.assert_allclose(test_res[1], out_info["w2"])
    flattened_g = flatten_pass(new_graph)
    input_info = {}
    input_info["m"] = m_
    input_info["n"] = n_
    input_info["k"] = k_
    input_info["w1"] = np.random.randint(1, 6, m_ * k_).reshape(m_, k_)
    input_info["w2"] = np.random.randint(1, 6, n_ * k_).reshape(n_, k_)
    input_info["x1"] = np.random.randint(1, 6, k_)
    input_info["x2"] = np.random.randint(1, 6, k_)

    input_info["r1"] = np.random.randint(0, 2, m_)
    input_info["y1"] = np.random.randint(0, 6, m_)
    input_info["r2"] = np.random.randint(0, 2, n_)
    input_info["y2"] = np.random.randint(0, 6, n_)
    new_out_info = numpy_reco(input_info)

    pairs_w1 = list(
        product(*tuple([np.arange(i) for i in input_info["w1"].shape])))
    pairs_w2 = list(
        product(*tuple([np.arange(i) for i in input_info["w2"].shape])))
    w1_init = input_info["w1"]
    for p in pairs_w1:
        input_info[f"w1/w1({p[0]}, {p[1]})"] = input_info["w1"][p]
    input_info.pop("w1")
    w2_init = input_info["w2"]

    for p in pairs_w2:
        input_info[f"w2/w2({p[0]}, {p[1]})"] = input_info["w2"][p]
    input_info.pop("w2")

    for p in range(k_):
        input_info[f"x1/x1({p},)"] = input_info["x1"][p]
        input_info[f"x2/x2({p},)"] = input_info["x2"][p]
    input_info.pop("x1")
    input_info.pop("x2")

    for p in range(m_):
        input_info[f"r1/r1({p},)"] = input_info["r1"][p]
        input_info[f"y1/y1({p},)"] = input_info["y1"][p]
    input_info.pop("r1")
    input_info.pop("y1")

    for p in range(n_):
        input_info[f"r2/r2({p},)"] = input_info["r2"][p]
        input_info[f"y2/y2({p},)"] = input_info["y2"][p]
    input_info.pop("r2")
    input_info.pop("y2")

    w1_keys = [f"w1/w1({p[0]}, {p[1]})" for p in pairs_w1]
    w2_keys = [f"w2/w2({p[0]}, {p[1]})" for p in pairs_w2]

    all_vals = flattened_g(w1_keys + w2_keys, input_info)
    out1 = np.asarray(list(all_vals[0:6])).reshape(new_out_info["w2"].shape)
    out2 = np.asarray(list(all_vals[6:])).reshape(new_out_info["w2"].shape)
    np.testing.assert_allclose(
        new_out_info["w1"],
        np.asarray(list(all_vals[0:6])).reshape(new_out_info["w2"].shape))
    np.testing.assert_allclose(
        new_out_info["w2"],
        np.asarray(list(all_vals[6:])).reshape(new_out_info["w2"].shape))
Пример #26
0
def generate_srdfg(onnx_graph):
    names = [des.name for des in onnx_graph.DESCRIPTOR.fields]
    graph_name = getattr(onnx_graph, "name")
    initializers = get_initializers(onnx_graph.initializer)
    mgdfg = pm.Node(name=graph_name)
    # TODO: This is a hotfix for identifying gradient updates, but weights should have initializers
    state_variables = get_states_by_gradient(onnx_graph)
    node_info = {}
    # TODO: If a value has an initializer, set the initializer value as the value for the node
    for o in onnx_graph.output:

        assert o.name not in node_info

        if o.name in state_variables:
            node_info[o.name] = pm.state(name=state_variables[o.name],
                                         shape=get_value_info_shape(o, mgdfg),
                                         graph=mgdfg)
            node_info[state_variables[o.name]] = node_info[o.name]
        else:
            node_info[o.name] = pm.output(name=o.name,
                                          shape=get_value_info_shape(o, mgdfg),
                                          graph=mgdfg)

    for i in onnx_graph.input:
        if i.name in state_variables.values():
            assert i.name in node_info
            continue
        assert i.name not in node_info
        if i.name in state_variables:
            node_info[i.name] = pm.state(name=state_variables[i.name],
                                         shape=get_value_info_shape(i, mgdfg),
                                         graph=mgdfg)
            node_info[state_variables[i.name]] = node_info[i.name]
        elif i.name in initializers and not itercheck(initializers[i.name]):
            node_info[i.name] = pm.parameter(name=i.name,
                                             default=initializers[i.name],
                                             graph=mgdfg)
        elif i.name in initializers:
            node_info[i.name] = pm.state(name=i.name,
                                         shape=get_value_info_shape(i, mgdfg),
                                         graph=mgdfg)
        else:
            node_info[i.name] = pm.input(name=i.name,
                                         shape=get_value_info_shape(i, mgdfg),
                                         graph=mgdfg)

    for v in onnx_graph.value_info:
        if v.name in node_info:
            continue
        elif v.name in initializers:
            node_info[v.name] = pm.variable(initializers[v.name],
                                            name=v.name,
                                            shape=get_value_info_shape(
                                                v, mgdfg),
                                            graph=mgdfg)
        else:

            node_info[v.name] = {
                "name": v.name,
                "shape": get_value_info_shape(v, mgdfg)
            }

    for k, v in initializers.items():
        if k not in node_info:
            # TODO: Need to set the value here
            node_info[k] = pm.state(name=k,
                                    shape=get_value_info_shape(v, mgdfg),
                                    graph=mgdfg)
            state_variables[k] = node_info[k]

    for k, v in mgdfg.nodes.items():
        if isinstance(v, pm.parameter) and k not in node_info:
            node_info[k] = v

    for n in onnx_graph.node:
        assert n.op_type in NODE_NAMES
        _ = convert_node(n, mgdfg, node_info, state_variables)

    return mgdfg