def dc_criteo(dense_input, sparse_input, y_): feature_dimension = 33762577 embedding_size = 8 learning_rate = 0.001 Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding") sparse_input = ad.embedding_lookup_op(Embedding, sparse_input) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) ## dc_model x = ad.concat_op(sparse_input, dense_input, axis=1) input_dim = 26 * 8 + 13 hidden_dim = input_dim residual_out = build_residual_layers(x, input_dim, hidden_dim, num_layers=5) W4 = init.random_normal([26 * embedding_size + 13, 1], stddev=0.1, name="W4") y = ad.matmul_op(residual_out, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def dfm_criteo(dense_input, sparse_input, y_): feature_dimension = 33762577 embedding_size = 128 learning_rate = 0.01 # FM Embedding1 = init.random_normal([feature_dimension, 1], stddev=0.01, name="fst_order_embedding", ctx=ndarray.cpu(0)) FM_W = init.random_normal([13, 1], stddev=0.01, name="dense_parameter") sparse_1dim_input = ad.embedding_lookup_op(Embedding1, sparse_input, ctx=ndarray.cpu(0)) fm_dense_part = ad.matmul_op(dense_input, FM_W) fm_sparse_part = ad.reduce_sum_op(sparse_1dim_input, axes=1) """ fst order output""" y1 = fm_dense_part + fm_sparse_part Embedding2 = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding", ctx=ndarray.cpu(0)) sparse_2dim_input = ad.embedding_lookup_op(Embedding2, sparse_input, ctx=ndarray.cpu(0)) sparse_2dim_sum = ad.reduce_sum_op(sparse_2dim_input, axes=1) sparse_2dim_sum_square = ad.mul_op(sparse_2dim_sum, sparse_2dim_sum) sparse_2dim_square = ad.mul_op(sparse_2dim_input, sparse_2dim_input) sparse_2dim_square_sum = ad.reduce_sum_op(sparse_2dim_square, axes=1) sparse_2dim = sparse_2dim_sum_square + -1 * sparse_2dim_square_sum sparse_2dim_half = sparse_2dim * 0.5 """snd order output""" y2 = ad.reduce_sum_op(sparse_2dim_half, axes=1, keepdims=True) #DNN flatten = ad.array_reshape_op(sparse_2dim_input, (-1, 26 * embedding_size)) W1 = init.random_normal([26 * embedding_size, 256], stddev=0.01, name="W1") W2 = init.random_normal([256, 256], stddev=0.01, name="W2") W3 = init.random_normal([256, 1], stddev=0.01, name="W3") fc1 = ad.matmul_op(flatten, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) y3 = ad.matmul_op(relu2, W3) y4 = y1 + y2 y = y4 + y3 y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def neural_mf(user_input, item_input, y_, num_users, num_items): batch_size = 256 embed_dim = 8 layers = [64, 32, 16, 8] learning_rate = 0.01 User_Embedding = init.random_normal( (num_users, embed_dim + layers[0] // 2), stddev=0.01, name="user_embed", ctx=ndarray.cpu(0)) Item_Embedding = init.random_normal( (num_items, embed_dim + layers[0] // 2), stddev=0.01, name="item_embed", ctx=ndarray.cpu(0)) # MLP_User_Embedding = init.random_normal((num_users, layers[0] // 2), stddev=0.01, name="mlp_user_embed", ctx=ndarray.cpu(0)) # MLP_Item_Embedding = init.random_normal((num_items, layers[0] // 2), stddev=0.01, name="mlp_item_embed", ctx=ndarray.cpu(0)) user_latent = ad.embedding_lookup_op(User_Embedding, user_input, ctx=ndarray.cpu(0)) item_latent = ad.embedding_lookup_op(Item_Embedding, item_input, ctx=ndarray.cpu(0)) mf_user_latent = ad.slice_op(user_latent, (0, 0), (-1, embed_dim)) mlp_user_latent = ad.slice_op(user_latent, (0, embed_dim), (-1, -1)) mf_item_latent = ad.slice_op(item_latent, (0, 0), (-1, embed_dim)) mlp_item_latent = ad.slice_op(item_latent, (0, embed_dim), (-1, -1)) # mf_user_latent = ad.embedding_lookup_op(MF_User_Embedding, user_input, ctx=ndarray.cpu(0)) # mf_item_latent = ad.embedding_lookup_op(MF_Item_Embedding, item_input, ctx=ndarray.cpu(0)) # mlp_user_latent = ad.embedding_lookup_op(MLP_User_Embedding, user_input, ctx=ndarray.cpu(0)) # mlp_item_latent = ad.embedding_lookup_op(MLP_Item_Embedding, item_input, ctx=ndarray.cpu(0)) W1 = init.random_normal((layers[0], layers[1]), stddev=0.1, name='W1') W2 = init.random_normal((layers[1], layers[2]), stddev=0.1, name='W2') W3 = init.random_normal((layers[2], layers[3]), stddev=0.1, name='W3') W4 = init.random_normal((embed_dim + layers[3], 1), stddev=0.1, name='W4') mf_vector = ad.mul_op(mf_user_latent, mf_item_latent) mlp_vector = ad.concat_op(mlp_user_latent, mlp_item_latent, axis=1) fc1 = ad.matmul_op(mlp_vector, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) fc3 = ad.matmul_op(relu2, W3) relu3 = ad.relu_op(fc3) concat_vector = ad.concat_op(mf_vector, relu3, axis=1) y = ad.matmul_op(concat_vector, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) # opt = optimizer.AdamOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, train_op
def wdl_criteo(dense, sparse, labels): batch_size = 128 feature_dimension = 33762577 embedding_size = 128 learning_rate = 0.01 if isinstance(dense, tuple): dense_input = dl.dataloader_op([[dense[0], batch_size, 'train'], [dense[1], batch_size, 'validate']]) sparse_input = dl.dataloader_op([[sparse[0], batch_size, 'train'], [sparse[1], batch_size, 'validate']]) y_ = dl.dataloader_op([[labels[0], batch_size, 'train'], [labels[1], batch_size, 'validate']]) else: dense_input = dl.dataloader_op([[dense, batch_size, 'train']]) sparse_input = dl.dataloader_op([[sparse, batch_size, 'train']]) y_ = dl.dataloader_op([[labels, batch_size, 'train']]) print("Data loaded.") Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding", ctx=ndarray.cpu(0)) sparse_input = ad.embedding_lookup_op(Embedding, sparse_input, ctx=ndarray.cpu(0)) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) #DNN flatten = dense_input W1 = init.random_normal([13, 256], stddev=0.01, name="W1") W2 = init.random_normal([256, 256], stddev=0.01, name="W2") W3 = init.random_normal([256, 256], stddev=0.01, name="W3") W4 = init.random_normal([256 + 26 * embedding_size, 1], stddev=0.01, name="W4") fc1 = ad.matmul_op(flatten, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) y3 = ad.matmul_op(relu2, W3) y4 = ad.concat_op(sparse_input, y3, axis=1) y = ad.matmul_op(y4, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def dc_criteo(dense, sparse, labels): batch_size = 128 feature_dimension = 33762577 embedding_size = 8 learning_rate = 0.001 if isinstance(dense, tuple): dense_input = dl.dataloader_op([[dense[0], batch_size, 'train'], [dense[1], batch_size, 'validate']]) sparse_input = dl.dataloader_op([[sparse[0], batch_size, 'train'], [sparse[1], batch_size, 'validate']]) y_ = dl.dataloader_op([[labels[0], batch_size, 'train'], [labels[1], batch_size, 'validate']]) else: dense_input = dl.dataloader_op([[dense, batch_size, 'train']]) sparse_input = dl.dataloader_op([[sparse, batch_size, 'train']]) y_ = dl.dataloader_op([[labels, batch_size, 'train']]) print("Data loaded.") Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding") sparse_input = ad.embedding_lookup_op(Embedding, sparse_input) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) ## dc_model x = ad.concat_op(sparse_input, dense_input, axis=1) input_dim = 26 * 8 + 13 hidden_dim = input_dim residual_out = build_residual_layers(x, input_dim, hidden_dim, num_layers=5) W4 = init.random_normal([26 * embedding_size + 13, 1], stddev=0.1, name="W4") y = ad.matmul_op(residual_out, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def dcn_criteo(dense_input, sparse_input, y_): feature_dimension = 33762577 embedding_size = 128 learning_rate = 0.003 Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding", ctx=ndarray.cpu(0)) sparse_input = ad.embedding_lookup_op(Embedding, sparse_input, ctx=ndarray.cpu(0)) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) x = ad.concat_op(sparse_input, dense_input, axis=1) # Cross Network cross_output = build_cross_layer(x, num_layers=3) #DNN flatten = x W1 = init.random_normal([26 * embedding_size + 13, 256], stddev=0.01, name="W1") W2 = init.random_normal([256, 256], stddev=0.01, name="W2") W3 = init.random_normal([256, 256], stddev=0.01, name="W3") W4 = init.random_normal([256 + 26 * embedding_size + 13, 1], stddev=0.01, name="W4") fc1 = ad.matmul_op(flatten, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) y3 = ad.matmul_op(relu2, W3) y4 = ad.concat_op(cross_output, y3, axis=1) y = ad.matmul_op(y4, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op