Пример #1
0
def neural_mf(user_input, item_input, y_, num_users, num_items):
    batch_size = 256
    embed_dim = 8
    layers = [64, 32, 16, 8]
    learning_rate = 0.01

    User_Embedding = init.random_normal(
        (num_users, embed_dim + layers[0] // 2),
        stddev=0.01,
        name="user_embed",
        ctx=ndarray.cpu(0))
    Item_Embedding = init.random_normal(
        (num_items, embed_dim + layers[0] // 2),
        stddev=0.01,
        name="item_embed",
        ctx=ndarray.cpu(0))
    # MLP_User_Embedding = init.random_normal((num_users, layers[0] // 2), stddev=0.01, name="mlp_user_embed", ctx=ndarray.cpu(0))
    # MLP_Item_Embedding = init.random_normal((num_items, layers[0] // 2), stddev=0.01, name="mlp_item_embed", ctx=ndarray.cpu(0))

    user_latent = ad.embedding_lookup_op(User_Embedding,
                                         user_input,
                                         ctx=ndarray.cpu(0))
    item_latent = ad.embedding_lookup_op(Item_Embedding,
                                         item_input,
                                         ctx=ndarray.cpu(0))

    mf_user_latent = ad.slice_op(user_latent, (0, 0), (-1, embed_dim))
    mlp_user_latent = ad.slice_op(user_latent, (0, embed_dim), (-1, -1))
    mf_item_latent = ad.slice_op(item_latent, (0, 0), (-1, embed_dim))
    mlp_item_latent = ad.slice_op(item_latent, (0, embed_dim), (-1, -1))

    # mf_user_latent = ad.embedding_lookup_op(MF_User_Embedding, user_input, ctx=ndarray.cpu(0))
    # mf_item_latent = ad.embedding_lookup_op(MF_Item_Embedding, item_input, ctx=ndarray.cpu(0))
    # mlp_user_latent = ad.embedding_lookup_op(MLP_User_Embedding, user_input, ctx=ndarray.cpu(0))
    # mlp_item_latent = ad.embedding_lookup_op(MLP_Item_Embedding, item_input, ctx=ndarray.cpu(0))

    W1 = init.random_normal((layers[0], layers[1]), stddev=0.1, name='W1')
    W2 = init.random_normal((layers[1], layers[2]), stddev=0.1, name='W2')
    W3 = init.random_normal((layers[2], layers[3]), stddev=0.1, name='W3')
    W4 = init.random_normal((embed_dim + layers[3], 1), stddev=0.1, name='W4')

    mf_vector = ad.mul_op(mf_user_latent, mf_item_latent)
    mlp_vector = ad.concat_op(mlp_user_latent, mlp_item_latent, axis=1)
    fc1 = ad.matmul_op(mlp_vector, W1)
    relu1 = ad.relu_op(fc1)
    fc2 = ad.matmul_op(relu1, W2)
    relu2 = ad.relu_op(fc2)
    fc3 = ad.matmul_op(relu2, W3)
    relu3 = ad.relu_op(fc3)
    concat_vector = ad.concat_op(mf_vector, relu3, axis=1)
    y = ad.matmul_op(concat_vector, W4)
    y = ad.sigmoid_op(y)
    loss = ad.binarycrossentropy_op(y, y_)
    loss = ad.reduce_mean_op(loss, [0])
    opt = optimizer.SGDOptimizer(learning_rate=learning_rate)
    # opt = optimizer.AdamOptimizer(learning_rate=learning_rate)
    train_op = opt.minimize(loss)
    return loss, y, train_op
Пример #2
0
def wdl_adult(X_deep, X_wide, y_):
    lr = 5 / 128
    dim_wide = 809
    dim_deep = 68

    W = init.random_normal([dim_wide+20, 2], stddev=0.1, name="W")
    W1 = init.random_normal([dim_deep, 50], stddev=0.1, name="W1")
    b1 = init.random_normal([50], stddev=0.1, name="b1")
    W2 = init.random_normal([50, 20], stddev=0.1, name="W2")
    b2 = init.random_normal([20], stddev=0.1, name="b2")

    #deep
    Embedding = []
    X_deep_input = None

    for i in range(8):
        Embedding_name = "Embedding_deep_" + str(i)
        Embedding.append(init.random_normal([50, 8], stddev=0.1, name=Embedding_name))
        now = ad.embedding_lookup_op(Embedding[i], X_deep[i])
        now = ad.array_reshape_op(now, (-1, 8))
        if X_deep_input is None:
            X_deep_input = now
        else:
            X_deep_input = ad.concat_op(X_deep_input, now, 1)

    for i in range(4):
        now = ad.array_reshape_op(X_deep[i + 8], (-1, 1))
        X_deep_input = ad.concat_op(X_deep_input, now, 1)

    mat1 = ad.matmul_op(X_deep_input, W1)
    add1 = mat1 + ad.broadcastto_op(b1, mat1)
    relu1= ad.relu_op(add1)
    dropout1 = relu1 #ad.dropout_op(relu1, 0.5)
    mat2 = ad.matmul_op(dropout1, W2)
    add2 = mat2 + ad.broadcastto_op(b2, mat2)
    relu2= ad.relu_op(add2)
    dropout2 = relu2 #ad.dropout_op(relu2, 0.5)
    dmodel=dropout2

    # wide
    wmodel = ad.concat_op(X_wide, dmodel, 1)
    wmodel = ad.matmul_op(wmodel, W)

    prediction = wmodel
    loss = ad.softmaxcrossentropy_op(prediction, y_)
    loss = ad.reduce_mean_op(loss, [0])

    opt = optimizer.SGDOptimizer(learning_rate=lr)
    train_op = opt.minimize(loss)

    return loss, prediction, y_, train_op
Пример #3
0
def dc_criteo(dense_input, sparse_input, y_):

    feature_dimension = 33762577
    embedding_size = 8
    learning_rate = 0.001

    Embedding = init.random_normal([feature_dimension, embedding_size],
                                   stddev=0.01,
                                   name="snd_order_embedding")
    sparse_input = ad.embedding_lookup_op(Embedding, sparse_input)
    sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size))

    ## dc_model
    x = ad.concat_op(sparse_input, dense_input, axis=1)

    input_dim = 26 * 8 + 13
    hidden_dim = input_dim
    residual_out = build_residual_layers(x,
                                         input_dim,
                                         hidden_dim,
                                         num_layers=5)

    W4 = init.random_normal([26 * embedding_size + 13, 1],
                            stddev=0.1,
                            name="W4")
    y = ad.matmul_op(residual_out, W4)
    y = ad.sigmoid_op(y)

    loss = ad.binarycrossentropy_op(y, y_)
    loss = ad.reduce_mean_op(loss, [0])
    opt = optimizer.SGDOptimizer(learning_rate=learning_rate)
    train_op = opt.minimize(loss)

    return loss, y, y_, train_op
Пример #4
0
 def __call__(self, input, subgraph_size: list, use_sparse: list):
     """
         Build the computation graph, return the output node
         split , in-graph message-passing, inter-graph message-passing , concat
     """
     x = ad.matmul_op(input, self.weight)
     msg = x + ad.broadcastto_op(self.bias, x)
     output_nodes = []
     msgs = []
     split_at = 0
     # message passing for each subgraph
     for i in range(self.npart):
         sliced_msg = ad.slice_op(node=msg,
                                  begin=(split_at, 0),
                                  size=(subgraph_size[i],
                                        self.out_features))
         split_at += subgraph_size[i]
         msgs.append(sliced_msg)
         if use_sparse[i]:
             output = ad.csrmm_op(self.mp[i][i], sliced_msg)
         else:
             output = ad.matmul_op(self.mp[i][i], sliced_msg)
         output_nodes.append(output)
     # message passing between subgraphs
     for i in range(self.npart):
         for j in range(self.npart):
             if i == j:
                 continue
             output_nodes[j] = output_nodes[j] + ad.csrmm_op(
                 self.mp[i][j], msgs[i])
     # concat all the remaining nodes
     result = output_nodes[0]
     for i in range(1, self.npart):
         result = ad.concat_op(result, output_nodes[i])
     return result
Пример #5
0
    def version_1(cls,node,tensor_dict,**kwargs):
        a = tensor_dict[node.input_tensor_names[0]]
        b = tensor_dict[node.input_tensor_names[1]]
        axis = node.get_attr_value('axis')

        y = ad.concat_op(a,b,axis=axis)
        tensor_dict[node.output_tensor_names[0]] = y
        return y
Пример #6
0
Файл: RNN.py Проект: sj1104/Het
def rnn(x, y_):
    '''
    RNN model, for MNIST dataset.

    Parameters:
        x: Variable(hetu.gpu_ops.Node.Node), shape (N, dims)
        y_: Variable(hetu.gpu_ops.Node.Node), shape (N, num_classes)
    Return:
        loss: Variable(hetu.gpu_ops.Node.Node), shape (1,)
        y: Variable(hetu.gpu_ops.Node.Node), shape (N, num_classes)
    '''

    print("Building RNN model...")
    diminput = 28
    dimhidden = 128
    dimoutput = 10
    nsteps = 28

    weight1 = init.random_normal(shape=(diminput, dimhidden),
                                 stddev=0.1,
                                 name='rnn_weight1')
    bias1 = init.random_normal(shape=(dimhidden, ),
                               stddev=0.1,
                               name='rnn_bias1')
    weight2 = init.random_normal(shape=(dimhidden + dimhidden, dimhidden),
                                 stddev=0.1,
                                 name='rnn_weight2')
    bias2 = init.random_normal(shape=(dimhidden, ),
                               stddev=0.1,
                               name='rnn_bias2')
    weight3 = init.random_normal(shape=(dimhidden, dimoutput),
                                 stddev=0.1,
                                 name='rnn_weight3')
    bias3 = init.random_normal(shape=(dimoutput, ),
                               stddev=0.1,
                               name='rnn_bias3')
    last_state = ad.Variable(value=np.zeros((1, )).astype(np.float32),
                             name='initial_state',
                             trainable=False)

    for i in range(nsteps):
        cur_x = ad.slice_op(x, (0, i * diminput), (-1, diminput))
        h = ad.matmul_op(cur_x, weight1)
        h = h + ad.broadcastto_op(bias1, h)

        if i == 0:
            last_state = ad.broadcastto_op(last_state, h)
        s = ad.concat_op(h, last_state, axis=1)
        s = ad.matmul_op(s, weight2)
        s = s + ad.broadcastto_op(bias2, s)
        last_state = ad.relu_op(s)

    final_state = last_state
    x = ad.matmul_op(final_state, weight3)
    y = x + ad.broadcastto_op(bias3, x)
    loss = ad.softmaxcrossentropy_op(y, y_)
    loss = ad.reduce_mean_op(loss, [0])
    return loss, y
Пример #7
0
def dcn_criteo(dense_input, sparse_input, y_):
    feature_dimension = 33762577
    embedding_size = 128
    learning_rate = 0.003

    Embedding = init.random_normal([feature_dimension, embedding_size],
                                   stddev=0.01,
                                   name="snd_order_embedding",
                                   ctx=ndarray.cpu(0))
    sparse_input = ad.embedding_lookup_op(Embedding,
                                          sparse_input,
                                          ctx=ndarray.cpu(0))
    sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size))
    x = ad.concat_op(sparse_input, dense_input, axis=1)
    # Cross Network
    cross_output = build_cross_layer(x, num_layers=3)

    #DNN
    flatten = x
    W1 = init.random_normal([26 * embedding_size + 13, 256],
                            stddev=0.01,
                            name="W1")
    W2 = init.random_normal([256, 256], stddev=0.01, name="W2")
    W3 = init.random_normal([256, 256], stddev=0.01, name="W3")

    W4 = init.random_normal([256 + 26 * embedding_size + 13, 1],
                            stddev=0.01,
                            name="W4")

    fc1 = ad.matmul_op(flatten, W1)
    relu1 = ad.relu_op(fc1)
    fc2 = ad.matmul_op(relu1, W2)
    relu2 = ad.relu_op(fc2)
    y3 = ad.matmul_op(relu2, W3)

    y4 = ad.concat_op(cross_output, y3, axis=1)
    y = ad.matmul_op(y4, W4)
    y = ad.sigmoid_op(y)

    loss = ad.binarycrossentropy_op(y, y_)
    loss = ad.reduce_mean_op(loss, [0])
    opt = optimizer.SGDOptimizer(learning_rate=learning_rate)
    train_op = opt.minimize(loss)

    return loss, y, y_, train_op
Пример #8
0
def test_Concat():
    A = ad.Variable(name="A")
    B = ad.Variable(name="B")
    y = ad.concat_op(A, B, axis=1)
    executor = ad.Executor([y], ctx=ctx)
    A_val = rand.normal(scale=0.1, size=(2, 3)).astype(np.float32)
    B_val = rand.normal(scale=0.1, size=(2, 3)).astype(np.float32)

    res = executor.run(feed_dict={A: A_val, B: B_val})
    Check(executor, res, [A, B], [y], [A_val, B_val])
    print(sys._getframe().f_code.co_name, 'pass!')
Пример #9
0
def wdl_criteo(dense, sparse, labels):
    batch_size = 128
    feature_dimension = 33762577
    embedding_size = 128
    learning_rate = 0.01
    if isinstance(dense, tuple):
        dense_input = dl.dataloader_op([[dense[0], batch_size, 'train'],
                                        [dense[1], batch_size, 'validate']])
        sparse_input = dl.dataloader_op([[sparse[0], batch_size, 'train'],
                                         [sparse[1], batch_size, 'validate']])
        y_ = dl.dataloader_op([[labels[0], batch_size, 'train'],
                               [labels[1], batch_size, 'validate']])
    else:
        dense_input = dl.dataloader_op([[dense, batch_size, 'train']])
        sparse_input = dl.dataloader_op([[sparse, batch_size, 'train']])
        y_ = dl.dataloader_op([[labels, batch_size, 'train']])
    print("Data loaded.")
    Embedding = init.random_normal([feature_dimension, embedding_size],
                                   stddev=0.01,
                                   name="snd_order_embedding",
                                   ctx=ndarray.cpu(0))
    sparse_input = ad.embedding_lookup_op(Embedding,
                                          sparse_input,
                                          ctx=ndarray.cpu(0))
    sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size))

    #DNN
    flatten = dense_input
    W1 = init.random_normal([13, 256], stddev=0.01, name="W1")
    W2 = init.random_normal([256, 256], stddev=0.01, name="W2")
    W3 = init.random_normal([256, 256], stddev=0.01, name="W3")

    W4 = init.random_normal([256 + 26 * embedding_size, 1],
                            stddev=0.01,
                            name="W4")

    fc1 = ad.matmul_op(flatten, W1)
    relu1 = ad.relu_op(fc1)
    fc2 = ad.matmul_op(relu1, W2)
    relu2 = ad.relu_op(fc2)
    y3 = ad.matmul_op(relu2, W3)

    y4 = ad.concat_op(sparse_input, y3, axis=1)
    y = ad.matmul_op(y4, W4)
    y = ad.sigmoid_op(y)

    loss = ad.binarycrossentropy_op(y, y_)
    loss = ad.reduce_mean_op(loss, [0])
    opt = optimizer.SGDOptimizer(learning_rate=learning_rate)
    train_op = opt.minimize(loss)

    return loss, y, y_, train_op
Пример #10
0
def get_token_embeddings(vocab_size,
                         num_units,
                         initializer=init.xavier_normal,
                         zero_pad=True):
    if zero_pad:
        embedding_part = initializer(name='embedding_table',
                                     shape=(vocab_size - 1, num_units))
        padding_zero = init.zeros(name='padding_zero',
                                  shape=(1, num_units),
                                  trainable=False)
        embeddings = ad.concat_op(padding_zero, embedding_part)
    else:
        embeddings = initializer(name='embedding_table',
                                 shape=(vocab_size, num_units))
    return embeddings
Пример #11
0
    def __call__(self, x):
        """
            Build the computation graph, return the output node
        """
        feat = x
        if self.dropout > 0:
            x = ad.dropout_op(x, 1 - self.dropout)

        x = ad.CuSparse.csrmm_op(self.mp, x)
        x = ad.matmul_op(x, self.weight)
        x = x + ad.broadcastto_op(self.bias, x)
        if self.activation == "relu":
            x = ad.relu_op(x)
        elif self.activation is not None:
            raise NotImplementedError
        return ad.concat_op(x, ad.matmul_op(feat, self.weight2), axis=1)
Пример #12
0
def dc_criteo(dense, sparse, labels):

    batch_size = 128
    feature_dimension = 33762577
    embedding_size = 8
    learning_rate = 0.001
    if isinstance(dense, tuple):
        dense_input = dl.dataloader_op([[dense[0], batch_size, 'train'],
                                        [dense[1], batch_size, 'validate']])
        sparse_input = dl.dataloader_op([[sparse[0], batch_size, 'train'],
                                         [sparse[1], batch_size, 'validate']])
        y_ = dl.dataloader_op([[labels[0], batch_size, 'train'],
                               [labels[1], batch_size, 'validate']])
    else:
        dense_input = dl.dataloader_op([[dense, batch_size, 'train']])
        sparse_input = dl.dataloader_op([[sparse, batch_size, 'train']])
        y_ = dl.dataloader_op([[labels, batch_size, 'train']])
    print("Data loaded.")

    Embedding = init.random_normal([feature_dimension, embedding_size],
                                   stddev=0.01,
                                   name="snd_order_embedding")
    sparse_input = ad.embedding_lookup_op(Embedding, sparse_input)
    sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size))

    ## dc_model
    x = ad.concat_op(sparse_input, dense_input, axis=1)

    input_dim = 26 * 8 + 13
    hidden_dim = input_dim
    residual_out = build_residual_layers(x,
                                         input_dim,
                                         hidden_dim,
                                         num_layers=5)

    W4 = init.random_normal([26 * embedding_size + 13, 1],
                            stddev=0.1,
                            name="W4")
    y = ad.matmul_op(residual_out, W4)
    y = ad.sigmoid_op(y)

    loss = ad.binarycrossentropy_op(y, y_)
    loss = ad.reduce_mean_op(loss, [0])
    opt = optimizer.SGDOptimizer(learning_rate=learning_rate)
    train_op = opt.minimize(loss)

    return loss, y, y_, train_op
Пример #13
0
def wdl_adult(whatever):
    batch_size = 128
    lr=5
    dim_wide = 809

    lr_ = lr / batch_size
    dim_deep = 68

    from .load_data import load_adult_data
    x_train_deep, x_train_wide, y_train, x_test_deep, x_test_wide, y_test = load_adult_data()

    W = init.random_normal([dim_wide+20, 2], stddev=0.1, name="W")
    W1 = init.random_normal([dim_deep, 50], stddev=0.1, name="W1")
    b1 = init.random_normal([50], stddev=0.1, name="b1")
    W2 = init.random_normal([50, 20], stddev=0.1, name="W2")
    b2 = init.random_normal([20], stddev=0.1, name="b2")

    X_wide = dl.dataloader_op([
        [x_train_wide, batch_size, 'train'],
        [x_test_wide, batch_size, 'validate'],
    ])
    y_ = dl.dataloader_op([
        [y_train, batch_size, 'train'],
        [y_test, batch_size, 'validate'],
    ])

    #deep
    Embedding = []
    X_deep = []
    X_deep_input = None

    for i in range(8):
        X_deep_name = "x_deep_" + str(i)
        Embedding_name = "Embedding_deep_" + str(i)
        X_deep.append(dl.dataloader_op([
            [x_train_deep[:,i], batch_size, 'train'],
            [x_test_deep[:,i], batch_size, 'validate'],
        ]))
        Embedding.append(init.random_normal([50, 8], stddev=0.1, name=Embedding_name))
        now = ad.embedding_lookup_op(Embedding[i], X_deep[i])
        now = ad.array_reshape_op(now, (-1, 8))
        if X_deep_input is None:
            X_deep_input = now
        else:
            X_deep_input = ad.concat_op(X_deep_input, now, 1)

    for i in range(4):
        X_deep_name = "x_deep_" + str(8+i)
        X_deep.append(dl.dataloader_op([
            [x_train_deep[:,8+i], batch_size, 'train'],
            [x_test_deep[:,8+i], batch_size, 'validate'],
        ]))
        now = ad.array_reshape_op(X_deep[i + 8], (batch_size, 1))
        X_deep_input = ad.concat_op(X_deep_input, now, 1)

    mat1 = ad.matmul_op(X_deep_input, W1)
    add1 = mat1 + ad.broadcastto_op(b1, mat1)
    relu1= ad.relu_op(add1)
    dropout1 = relu1 #ad.dropout_op(relu1, 0.5)
    mat2 = ad.matmul_op(dropout1, W2)
    add2 = mat2 + ad.broadcastto_op(b2, mat2)
    relu2= ad.relu_op(add2)
    dropout2 = relu2 #ad.dropout_op(relu2, 0.5)
    dmodel=dropout2

    # wide
    wmodel = ad.concat_op(X_wide, dmodel, 1)
    wmodel = ad.matmul_op(wmodel, W)

    prediction = wmodel
    loss = ad.softmaxcrossentropy_op(prediction, y_)
    loss = ad.reduce_mean_op(loss, [0])

    opt = optimizer.SGDOptimizer(learning_rate=lr_)
    train_op = opt.minimize(loss)

    return loss, prediction, y_, train_op