def test_MatrixMult(): X = ad.Variable(name="X") W1 = init.random_normal((10, 5), stddev=0.1, name='W1') y = ad.matmul_op(X, W1) executor = ad.Executor([y], ctx=ctx) X_val = rand.normal(scale=0.1, size=(batch_size, 10)).astype(np.float32) res = executor.run(feed_dict={X: X_val}) Check(executor, res, [X], [y], [X_val]) #test transpose_A X = ad.Variable(name="X") W1 = init.random_normal((10, 5), stddev=0.1, name='W1') y = ad.matmul_op(X, W1, True) executor = ad.Executor([y], ctx=ctx) X_val = rand.normal(scale=0.1, size=(10, batch_size)).astype(np.float32) res = executor.run(feed_dict={X: X_val}) Check(executor, res, [X], [y], [X_val]) #test transpose_B X = ad.Variable(name="X") W1 = init.random_normal((5, 10), stddev=0.1, name='W1') y = ad.matmul_op(X, W1, trans_B=True) executor = ad.Executor([y], ctx=ctx) X_val = rand.normal(scale=0.1, size=(batch_size, 10)).astype(np.float32) res = executor.run(feed_dict={X: X_val}) Check(executor, res, [X], [y], [X_val]) print(sys._getframe().f_code.co_name, 'pass!')
def fc(x, shape): weight = init.random_normal(shape=shape, stddev=0.1) bias = init.random_normal(shape=shape[-1:], stddev=0.1) x = ad.array_reshape_op(x, (-1, shape[0])) x = ad.matmul_op(x, weight) y = x + ad.broadcastto_op(bias, x) return y
def dc_criteo(dense_input, sparse_input, y_): feature_dimension = 33762577 embedding_size = 8 learning_rate = 0.001 Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding") sparse_input = ad.embedding_lookup_op(Embedding, sparse_input) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) ## dc_model x = ad.concat_op(sparse_input, dense_input, axis=1) input_dim = 26 * 8 + 13 hidden_dim = input_dim residual_out = build_residual_layers(x, input_dim, hidden_dim, num_layers=5) W4 = init.random_normal([26 * embedding_size + 13, 1], stddev=0.1, name="W4") y = ad.matmul_op(residual_out, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def fc(x, shape, name): weight = init.random_normal(shape=shape, stddev=0.1, name=name + '_weight') bias = init.random_normal(shape=shape[-1:], stddev=0.1, name=name + '_bias') x = ad.matmul_op(x, weight) x = x + ad.broadcastto_op(bias, x) return x
def vgg_fc(x, in_feat, out_feat, name): weight = init.random_normal(shape=(in_feat, out_feat), stddev=0.1, name=name + '_weight') bias = init.random_normal(shape=(out_feat, ), stddev=0.1, name=name + '_bias') x = ad.matmul_op(x, weight) x = x + ad.broadcastto_op(bias, x) return x
def batch_norm_with_relu(x, hidden, name): scale = init.random_normal(shape=(1, hidden, 1, 1), stddev=0.1, name=name + '_scale') bias = init.random_normal(shape=(1, hidden, 1, 1), stddev=0.1, name=name + '_bias') x = ad.batch_normalization_op(x, scale, bias) x = ad.relu_op(x) return x
def wdl_criteo(dense, sparse, labels): batch_size = 128 feature_dimension = 33762577 embedding_size = 128 learning_rate = 0.01 if isinstance(dense, tuple): dense_input = dl.dataloader_op([[dense[0], batch_size, 'train'], [dense[1], batch_size, 'validate']]) sparse_input = dl.dataloader_op([[sparse[0], batch_size, 'train'], [sparse[1], batch_size, 'validate']]) y_ = dl.dataloader_op([[labels[0], batch_size, 'train'], [labels[1], batch_size, 'validate']]) else: dense_input = dl.dataloader_op([[dense, batch_size, 'train']]) sparse_input = dl.dataloader_op([[sparse, batch_size, 'train']]) y_ = dl.dataloader_op([[labels, batch_size, 'train']]) print("Data loaded.") Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding", ctx=ndarray.cpu(0)) sparse_input = ad.embedding_lookup_op(Embedding, sparse_input, ctx=ndarray.cpu(0)) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) #DNN flatten = dense_input W1 = init.random_normal([13, 256], stddev=0.01, name="W1") W2 = init.random_normal([256, 256], stddev=0.01, name="W2") W3 = init.random_normal([256, 256], stddev=0.01, name="W3") W4 = init.random_normal([256 + 26 * embedding_size, 1], stddev=0.01, name="W4") fc1 = ad.matmul_op(flatten, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) y3 = ad.matmul_op(relu2, W3) y4 = ad.concat_op(sparse_input, y3, axis=1) y = ad.matmul_op(y4, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def cross_layer(x0, x1): # x0: input embedding feature (batch_size, 26 * embedding_size + 13) # x1: the output of last layer (batch_size, 26 * embedding_size + 13) embedding_len = 26 * 128 + 13 weight = init.random_normal(shape=(embedding_len, 1), stddev=0.01, name='weight') bias = init.random_normal(shape=(embedding_len, ), stddev=0.01, name='bias') x1w = ad.matmul_op(x1, weight) #(batch_size, 1) y = ad.mul_op(x0, ad.broadcastto_op(x1w, x0)) y = y + x1 + ad.broadcastto_op(bias, y) return y
def conv_bn_relu(x, in_channel, out_channel, name): weight = init.random_normal(shape=(out_channel, in_channel, 3, 3), stddev=0.1, name=name + '_weight') bn_scale = init.random_normal(shape=(1, out_channel, 1, 1), stddev=0.1, name=name + '_bn_scale') bn_bias = init.random_normal(shape=(1, out_channel, 1, 1), stddev=0.1, name=name + '_bn_bias') conv = ad.conv2d_op(x, weight, padding=1, stride=1) bn = ad.batch_normalization_op(conv, bn_scale, bn_bias) act = ad.relu_op(bn) return act
def test_BatchNorm(): X = ad.Variable(name="X") bn_scale = init.random_normal((64, ), stddev=0.1, name='bn_scale') bn_bias = init.random_normal((64, ), stddev=0.1, name='bn_bias') y = ad.batch_normalization_op(X, bn_scale, bn_bias) executor = ad.Executor([y], ctx=ctx) X_val = rand.normal(scale=0.1, size=(batch_size, 64, 28, 28)).astype(np.float32) res = executor.run(feed_dict={X: X_val}) Check(executor, res, [X, bn_scale, bn_bias], [y], [X_val, bn_scale.tensor_value, bn_bias.tensor_value]) print(sys._getframe().f_code.co_name, 'pass!')
def dc_criteo(dense, sparse, labels): batch_size = 128 feature_dimension = 33762577 embedding_size = 8 learning_rate = 0.001 if isinstance(dense, tuple): dense_input = dl.dataloader_op([[dense[0], batch_size, 'train'], [dense[1], batch_size, 'validate']]) sparse_input = dl.dataloader_op([[sparse[0], batch_size, 'train'], [sparse[1], batch_size, 'validate']]) y_ = dl.dataloader_op([[labels[0], batch_size, 'train'], [labels[1], batch_size, 'validate']]) else: dense_input = dl.dataloader_op([[dense, batch_size, 'train']]) sparse_input = dl.dataloader_op([[sparse, batch_size, 'train']]) y_ = dl.dataloader_op([[labels, batch_size, 'train']]) print("Data loaded.") Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding") sparse_input = ad.embedding_lookup_op(Embedding, sparse_input) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) ## dc_model x = ad.concat_op(sparse_input, dense_input, axis=1) input_dim = 26 * 8 + 13 hidden_dim = input_dim residual_out = build_residual_layers(x, input_dim, hidden_dim, num_layers=5) W4 = init.random_normal([26 * embedding_size + 13, 1], stddev=0.1, name="W4") y = ad.matmul_op(residual_out, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def conv_pool(x, in_channel, out_channel, name): weight = init.random_normal(shape=(out_channel, in_channel, 5, 5), stddev=0.1, name=name + '_weight') x = ad.conv2d_op(x, weight, padding=2, stride=1) x = ad.relu_op(x) x = ad.max_pool2d_op(x, kernel_H=2, kernel_W=2, padding=0, stride=2) return x
def dcn_criteo(dense_input, sparse_input, y_): feature_dimension = 33762577 embedding_size = 128 learning_rate = 0.003 Embedding = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding", ctx=ndarray.cpu(0)) sparse_input = ad.embedding_lookup_op(Embedding, sparse_input, ctx=ndarray.cpu(0)) sparse_input = ad.array_reshape_op(sparse_input, (-1, 26 * embedding_size)) x = ad.concat_op(sparse_input, dense_input, axis=1) # Cross Network cross_output = build_cross_layer(x, num_layers=3) #DNN flatten = x W1 = init.random_normal([26 * embedding_size + 13, 256], stddev=0.01, name="W1") W2 = init.random_normal([256, 256], stddev=0.01, name="W2") W3 = init.random_normal([256, 256], stddev=0.01, name="W3") W4 = init.random_normal([256 + 26 * embedding_size + 13, 1], stddev=0.01, name="W4") fc1 = ad.matmul_op(flatten, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) y3 = ad.matmul_op(relu2, W3) y4 = ad.concat_op(cross_output, y3, axis=1) y = ad.matmul_op(y4, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def test_AddElewise(): X = ad.Variable(name="X") b3 = init.random_normal((10, ), stddev=0.1, name='b3') y = X + b3 executor = ad.Executor([y], ctx=ctx, enable_lazy=False) X_val = rand.normal(scale=0.1, size=(batch_size, 10)).astype(np.float32) res = executor.run(feed_dict={X: X_val}) Check(executor, res, [X], [y], [X_val]) print(sys._getframe().f_code.co_name, 'pass!')
def test_Conv2d(): X = ad.Variable(name="X") W1 = init.random_normal((32, 1, 5, 5), stddev=0.1, name='W1') y = ad.conv2d_op(X, W1, padding=2, stride=1) executor = ad.Executor([y], ctx=ctx) X_val = rand.normal(scale=0.1, size=(batch_size, 1, 28, 28)).astype(np.float32) res = executor.run(feed_dict={X: X_val}) Check(executor, res, [X], [y], [X_val]) print(sys._getframe().f_code.co_name, 'pass!')
def cnn(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False): print("Build CNN model...") W1 = init.random_normal((32, 1, 5, 5), stddev=0.1, name='W1') W2 = init.random_normal((64, 32, 5, 5), stddev=0.1, name='W2') W3 = init.random_normal((7 * 7 * 64, 10), stddev=0.1, name='W3') b3 = init.random_normal((10, ), stddev=0.1, name='b3') X = ad.Variable(name="X") z1 = ad.conv2d_op(X, W1, padding=2, stride=1) z2 = ad.relu_op(z1) z3 = ad.avg_pool2d_op(z2, kernel_H=2, kernel_W=2, padding=0, stride=2) z4 = ad.conv2d_op(z3, W2, padding=2, stride=1) z5 = ad.relu_op(z4) z6 = ad.avg_pool2d_op(z5, kernel_H=2, kernel_W=2, padding=0, stride=2) z6_flat = ad.array_reshape_op(z6, (-1, 7 * 7 * 64)) y = ad.matmul_op(z6_flat, W3) + b3 executor = ad.Executor([y], ctx=executor_ctx) rand = np.random.RandomState(seed=123) X_val = rand.normal(scale=0.1, size=(batch_size, 1, 28, 28)).astype(np.float32) ath = executor.run(feed_dict={X: X_val}) hx.hetu2onnx.export(executor, [X], [y], 'ath.onnx') # # sess = rt.InferenceSession("ath.onnx") input = sess.get_inputs()[0].name pre = sess.run(None, {input: X_val.astype(np.float32)})[0] np.testing.assert_allclose(ath[0].asnumpy(), pre, rtol=1e-2)
def conv_bn_relu_pool(x, in_channel, out_channel, name, with_relu=True, with_pool=False): weight = init.random_normal(shape=(out_channel, in_channel, 3, 3), stddev=0.1, name=name + '_weight') bn_scale = init.random_normal(shape=(1, out_channel, 1, 1), stddev=0.1, name=name + '_bn_scale') bn_bias = init.random_normal(shape=(1, out_channel, 1, 1), stddev=0.1, name=name + '_bn_bias') x = ad.conv2d_op(x, weight, stride=1, padding=1) x = ad.batch_normalization_op(x, bn_scale, bn_bias) if with_relu: x = ad.relu_op(x) if with_pool: x = ad.max_pool2d_op(x, kernel_H=2, kernel_W=2, stride=2, padding=0) return x
def rnn(x, y_): ''' RNN model, for MNIST dataset. Parameters: x: Variable(hetu.gpu_ops.Node.Node), shape (N, dims) y_: Variable(hetu.gpu_ops.Node.Node), shape (N, num_classes) Return: loss: Variable(hetu.gpu_ops.Node.Node), shape (1,) y: Variable(hetu.gpu_ops.Node.Node), shape (N, num_classes) ''' print("Building RNN model...") diminput = 28 dimhidden = 128 dimoutput = 10 nsteps = 28 weight1 = init.random_normal(shape=(diminput, dimhidden), stddev=0.1, name='rnn_weight1') bias1 = init.random_normal(shape=(dimhidden, ), stddev=0.1, name='rnn_bias1') weight2 = init.random_normal(shape=(dimhidden + dimhidden, dimhidden), stddev=0.1, name='rnn_weight2') bias2 = init.random_normal(shape=(dimhidden, ), stddev=0.1, name='rnn_bias2') weight3 = init.random_normal(shape=(dimhidden, dimoutput), stddev=0.1, name='rnn_weight3') bias3 = init.random_normal(shape=(dimoutput, ), stddev=0.1, name='rnn_bias3') last_state = ad.Variable(value=np.zeros((1, )).astype(np.float32), name='initial_state', trainable=False) for i in range(nsteps): cur_x = ad.slice_op(x, (0, i * diminput), (-1, diminput)) h = ad.matmul_op(cur_x, weight1) h = h + ad.broadcastto_op(bias1, h) if i == 0: last_state = ad.broadcastto_op(last_state, h) s = ad.concat_op(h, last_state, axis=1) s = ad.matmul_op(s, weight2) s = s + ad.broadcastto_op(bias2, s) last_state = ad.relu_op(s) final_state = last_state x = ad.matmul_op(final_state, weight3) y = x + ad.broadcastto_op(bias3, x) loss = ad.softmaxcrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) return loss, y
def neural_mf(user_input, item_input, y_, num_users, num_items): batch_size = 256 embed_dim = 8 layers = [64, 32, 16, 8] learning_rate = 0.01 User_Embedding = init.random_normal( (num_users, embed_dim + layers[0] // 2), stddev=0.01, name="user_embed", ctx=ndarray.cpu(0)) Item_Embedding = init.random_normal( (num_items, embed_dim + layers[0] // 2), stddev=0.01, name="item_embed", ctx=ndarray.cpu(0)) # MLP_User_Embedding = init.random_normal((num_users, layers[0] // 2), stddev=0.01, name="mlp_user_embed", ctx=ndarray.cpu(0)) # MLP_Item_Embedding = init.random_normal((num_items, layers[0] // 2), stddev=0.01, name="mlp_item_embed", ctx=ndarray.cpu(0)) user_latent = ad.embedding_lookup_op(User_Embedding, user_input, ctx=ndarray.cpu(0)) item_latent = ad.embedding_lookup_op(Item_Embedding, item_input, ctx=ndarray.cpu(0)) mf_user_latent = ad.slice_op(user_latent, (0, 0), (-1, embed_dim)) mlp_user_latent = ad.slice_op(user_latent, (0, embed_dim), (-1, -1)) mf_item_latent = ad.slice_op(item_latent, (0, 0), (-1, embed_dim)) mlp_item_latent = ad.slice_op(item_latent, (0, embed_dim), (-1, -1)) # mf_user_latent = ad.embedding_lookup_op(MF_User_Embedding, user_input, ctx=ndarray.cpu(0)) # mf_item_latent = ad.embedding_lookup_op(MF_Item_Embedding, item_input, ctx=ndarray.cpu(0)) # mlp_user_latent = ad.embedding_lookup_op(MLP_User_Embedding, user_input, ctx=ndarray.cpu(0)) # mlp_item_latent = ad.embedding_lookup_op(MLP_Item_Embedding, item_input, ctx=ndarray.cpu(0)) W1 = init.random_normal((layers[0], layers[1]), stddev=0.1, name='W1') W2 = init.random_normal((layers[1], layers[2]), stddev=0.1, name='W2') W3 = init.random_normal((layers[2], layers[3]), stddev=0.1, name='W3') W4 = init.random_normal((embed_dim + layers[3], 1), stddev=0.1, name='W4') mf_vector = ad.mul_op(mf_user_latent, mf_item_latent) mlp_vector = ad.concat_op(mlp_user_latent, mlp_item_latent, axis=1) fc1 = ad.matmul_op(mlp_vector, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) fc3 = ad.matmul_op(relu2, W3) relu3 = ad.relu_op(fc3) concat_vector = ad.concat_op(mf_vector, relu3, axis=1) y = ad.matmul_op(concat_vector, W4) y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) # opt = optimizer.AdamOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, train_op
def dfm_criteo(dense_input, sparse_input, y_): feature_dimension = 33762577 embedding_size = 128 learning_rate = 0.01 # FM Embedding1 = init.random_normal([feature_dimension, 1], stddev=0.01, name="fst_order_embedding", ctx=ndarray.cpu(0)) FM_W = init.random_normal([13, 1], stddev=0.01, name="dense_parameter") sparse_1dim_input = ad.embedding_lookup_op(Embedding1, sparse_input, ctx=ndarray.cpu(0)) fm_dense_part = ad.matmul_op(dense_input, FM_W) fm_sparse_part = ad.reduce_sum_op(sparse_1dim_input, axes=1) """ fst order output""" y1 = fm_dense_part + fm_sparse_part Embedding2 = init.random_normal([feature_dimension, embedding_size], stddev=0.01, name="snd_order_embedding", ctx=ndarray.cpu(0)) sparse_2dim_input = ad.embedding_lookup_op(Embedding2, sparse_input, ctx=ndarray.cpu(0)) sparse_2dim_sum = ad.reduce_sum_op(sparse_2dim_input, axes=1) sparse_2dim_sum_square = ad.mul_op(sparse_2dim_sum, sparse_2dim_sum) sparse_2dim_square = ad.mul_op(sparse_2dim_input, sparse_2dim_input) sparse_2dim_square_sum = ad.reduce_sum_op(sparse_2dim_square, axes=1) sparse_2dim = sparse_2dim_sum_square + -1 * sparse_2dim_square_sum sparse_2dim_half = sparse_2dim * 0.5 """snd order output""" y2 = ad.reduce_sum_op(sparse_2dim_half, axes=1, keepdims=True) #DNN flatten = ad.array_reshape_op(sparse_2dim_input, (-1, 26 * embedding_size)) W1 = init.random_normal([26 * embedding_size, 256], stddev=0.01, name="W1") W2 = init.random_normal([256, 256], stddev=0.01, name="W2") W3 = init.random_normal([256, 1], stddev=0.01, name="W3") fc1 = ad.matmul_op(flatten, W1) relu1 = ad.relu_op(fc1) fc2 = ad.matmul_op(relu1, W2) relu2 = ad.relu_op(fc2) y3 = ad.matmul_op(relu2, W3) y4 = y1 + y2 y = y4 + y3 y = ad.sigmoid_op(y) loss = ad.binarycrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=learning_rate) train_op = opt.minimize(loss) return loss, y, y_, train_op
def residual_layer(x0, input_dim, hidden_dim): embedding_len = input_dim weight_1 = init.random_normal(shape=(input_dim, hidden_dim), stddev=0.1, name='weight_1') bias_1 = init.random_normal(shape=(hidden_dim, ), stddev=0.1, name='bias_1') weight_2 = init.random_normal(shape=(hidden_dim, input_dim), stddev=0.1, name='weight_2') bias_2 = init.random_normal(shape=(input_dim, ), stddev=0.1, name='bias_2') x0w = ad.matmul_op(x0, weight_1) #(batch, hidden_dim) x0w_b = x0w + ad.broadcastto_op(bias_1, x0w) relu1 = ad.relu_op(x0w_b) x1w = ad.matmul_op(relu1, weight_2) #(batch, input_dim) x1w_b = x1w + ad.broadcastto_op(bias_2, x1w) residual = x1w_b + x0 y = ad.relu_op(residual) return y
def wdl_adult(X_deep, X_wide, y_): lr = 5 / 128 dim_wide = 809 dim_deep = 68 W = init.random_normal([dim_wide+20, 2], stddev=0.1, name="W") W1 = init.random_normal([dim_deep, 50], stddev=0.1, name="W1") b1 = init.random_normal([50], stddev=0.1, name="b1") W2 = init.random_normal([50, 20], stddev=0.1, name="W2") b2 = init.random_normal([20], stddev=0.1, name="b2") #deep Embedding = [] X_deep_input = None for i in range(8): Embedding_name = "Embedding_deep_" + str(i) Embedding.append(init.random_normal([50, 8], stddev=0.1, name=Embedding_name)) now = ad.embedding_lookup_op(Embedding[i], X_deep[i]) now = ad.array_reshape_op(now, (-1, 8)) if X_deep_input is None: X_deep_input = now else: X_deep_input = ad.concat_op(X_deep_input, now, 1) for i in range(4): now = ad.array_reshape_op(X_deep[i + 8], (-1, 1)) X_deep_input = ad.concat_op(X_deep_input, now, 1) mat1 = ad.matmul_op(X_deep_input, W1) add1 = mat1 + ad.broadcastto_op(b1, mat1) relu1= ad.relu_op(add1) dropout1 = relu1 #ad.dropout_op(relu1, 0.5) mat2 = ad.matmul_op(dropout1, W2) add2 = mat2 + ad.broadcastto_op(b2, mat2) relu2= ad.relu_op(add2) dropout2 = relu2 #ad.dropout_op(relu2, 0.5) dmodel=dropout2 # wide wmodel = ad.concat_op(X_wide, dmodel, 1) wmodel = ad.matmul_op(wmodel, W) prediction = wmodel loss = ad.softmaxcrossentropy_op(prediction, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=lr) train_op = opt.minimize(loss) return loss, prediction, y_, train_op
def mnist_mlp(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False): print("Build 3-layer MLP model...") W1 = init.random_normal((784, 256), stddev=0.1, name='W1') W2 = init.random_normal((256, 256), stddev=0.1, name='W2') W3 = init.random_normal((256, 10), stddev=0.1, name='W3') b1 = init.random_normal((256, ), stddev=0.1, name='b1') b2 = init.random_normal((256, ), stddev=0.1, name='b2') b3 = init.random_normal((10, ), stddev=0.1, name='b3') X = ad.Variable(name="X") # relu(X W1+b1) z1 = ad.matmul_op(X, W1) + b1 z2 = ad.relu_op(z1) # relu(z3 W2+b2) z3 = ad.matmul_op(z2, W2) + b2 z4 = ad.relu_op(z3) # softmax(z5 W2+b2) y = ad.matmul_op(z4, W3) + b3 executor = ad.Executor([y], ctx=executor_ctx) rand = np.random.RandomState(seed=123) X_val = rand.normal(scale=0.1, size=(batch_size, 784)).astype(np.float32) ath = executor.run(feed_dict={X: X_val}) ax.hetu2onnx.export(executor, [X], [y], 'ath.onnx') # # sess = rt.InferenceSession("ath.onnx") input = sess.get_inputs()[0].name pre = sess.run(None, {input: X_val.astype(np.float32)})[0] np.testing.assert_allclose(pre, ath[0], rtol=1e-2)
def wdl_adult(whatever): batch_size = 128 lr=5 dim_wide = 809 lr_ = lr / batch_size dim_deep = 68 from .load_data import load_adult_data x_train_deep, x_train_wide, y_train, x_test_deep, x_test_wide, y_test = load_adult_data() W = init.random_normal([dim_wide+20, 2], stddev=0.1, name="W") W1 = init.random_normal([dim_deep, 50], stddev=0.1, name="W1") b1 = init.random_normal([50], stddev=0.1, name="b1") W2 = init.random_normal([50, 20], stddev=0.1, name="W2") b2 = init.random_normal([20], stddev=0.1, name="b2") X_wide = dl.dataloader_op([ [x_train_wide, batch_size, 'train'], [x_test_wide, batch_size, 'validate'], ]) y_ = dl.dataloader_op([ [y_train, batch_size, 'train'], [y_test, batch_size, 'validate'], ]) #deep Embedding = [] X_deep = [] X_deep_input = None for i in range(8): X_deep_name = "x_deep_" + str(i) Embedding_name = "Embedding_deep_" + str(i) X_deep.append(dl.dataloader_op([ [x_train_deep[:,i], batch_size, 'train'], [x_test_deep[:,i], batch_size, 'validate'], ])) Embedding.append(init.random_normal([50, 8], stddev=0.1, name=Embedding_name)) now = ad.embedding_lookup_op(Embedding[i], X_deep[i]) now = ad.array_reshape_op(now, (-1, 8)) if X_deep_input is None: X_deep_input = now else: X_deep_input = ad.concat_op(X_deep_input, now, 1) for i in range(4): X_deep_name = "x_deep_" + str(8+i) X_deep.append(dl.dataloader_op([ [x_train_deep[:,8+i], batch_size, 'train'], [x_test_deep[:,8+i], batch_size, 'validate'], ])) now = ad.array_reshape_op(X_deep[i + 8], (batch_size, 1)) X_deep_input = ad.concat_op(X_deep_input, now, 1) mat1 = ad.matmul_op(X_deep_input, W1) add1 = mat1 + ad.broadcastto_op(b1, mat1) relu1= ad.relu_op(add1) dropout1 = relu1 #ad.dropout_op(relu1, 0.5) mat2 = ad.matmul_op(dropout1, W2) add2 = mat2 + ad.broadcastto_op(b2, mat2) relu2= ad.relu_op(add2) dropout2 = relu2 #ad.dropout_op(relu2, 0.5) dmodel=dropout2 # wide wmodel = ad.concat_op(X_wide, dmodel, 1) wmodel = ad.matmul_op(wmodel, W) prediction = wmodel loss = ad.softmaxcrossentropy_op(prediction, y_) loss = ad.reduce_mean_op(loss, [0]) opt = optimizer.SGDOptimizer(learning_rate=lr_) train_op = opt.minimize(loss) return loss, prediction, y_, train_op
def conv2d(x, in_channel, out_channel, stride=1, padding=1, name=''): weight = init.random_normal(shape=(out_channel, in_channel, 3, 3), stddev=0.1, name=name + '_weight') x = ad.conv2d_op(x, weight, stride=stride, padding=padding) return x
def lstm(x, y_): ''' LSTM model, for MNIST dataset. Parameters: x: Variable(hetu.gpu_ops.Node.Node), shape (N, dims) y_: Variable(hetu.gpu_ops.Node.Node), shape (N, num_classes) Return: loss: Variable(hetu.gpu_ops.Node.Node), shape (1,) y: Variable(hetu.gpu_ops.Node.Node), shape (N, num_classes) ''' print("Building LSTM model...") diminput = 28 dimhidden = 128 dimoutput = 10 nsteps = 28 forget_gate_w = init.random_normal(shape=(diminput, dimhidden), stddev=0.1, name="lstm_forget_gate_w") forget_gate_u = init.random_normal(shape=(dimhidden, dimhidden), stddev=0.1, name="lstm_forget_gate_u") forget_gate_b = init.random_normal(shape=(dimhidden, ), stddev=0.1, name="lstm_forget_gate_b") input_gate_w = init.random_normal(shape=(diminput, dimhidden), stddev=0.1, name="lstm_input_gate_w") input_gate_u = init.random_normal(shape=(dimhidden, dimhidden), stddev=0.1, name="lstm_input_gate_u") input_gate_b = init.random_normal(shape=(dimhidden, ), stddev=0.1, name="lstm_input_gate_b") output_gate_w = init.random_normal(shape=(diminput, dimhidden), stddev=0.1, name="lstm_output_gate_w") output_gate_u = init.random_normal(shape=(dimhidden, dimhidden), stddev=0.1, name="lstm_output_gate_u") output_gate_b = init.random_normal(shape=(dimhidden, ), stddev=0.1, name="lstm_output_gate_b") tanh_w = init.random_normal(shape=(diminput, dimhidden), stddev=0.1, name="lstm_tanh_w") tanh_u = init.random_normal(shape=(dimhidden, dimhidden), stddev=0.1, name="lstm_tanh_u") tanh_b = init.random_normal(shape=(dimhidden, ), stddev=0.1, name="lstm_tanh_b") out_weights = init.random_normal(shape=(dimhidden, dimoutput), stddev=0.1, name="lstm_out_weight") out_bias = init.random_normal(shape=(dimoutput, ), stddev=0.1, name="lstm_out_bias") initial_state = ad.Variable(value=np.zeros((1, )).astype(np.float32), name='initial_state', trainable=False) for i in range(nsteps): cur_x = ad.slice_op(x, (0, i * diminput), (-1, diminput)) # forget gate if i == 0: temp = ad.matmul_op(cur_x, forget_gate_w) last_c_state = ad.broadcastto_op(initial_state, temp) last_h_state = ad.broadcastto_op(initial_state, temp) cur_forget = ad.matmul_op(last_h_state, forget_gate_u) + temp else: cur_forget = ad.matmul_op(last_h_state, forget_gate_u) + ad.matmul_op( cur_x, forget_gate_w) cur_forget = cur_forget + ad.broadcastto_op(forget_gate_b, cur_forget) cur_forget = ad.sigmoid_op(cur_forget) # input gate cur_input = ad.matmul_op(last_h_state, input_gate_u) + ad.matmul_op( cur_x, input_gate_w) cur_input = cur_input + ad.broadcastto_op(input_gate_b, cur_input) cur_input = ad.sigmoid_op(cur_input) # output gate cur_output = ad.matmul_op(last_h_state, output_gate_u) + ad.matmul_op( cur_x, output_gate_w) cur_output = cur_output + ad.broadcastto_op(output_gate_b, cur_output) cur_output = ad.sigmoid_op(cur_output) # tanh cur_tanh = ad.matmul_op(last_h_state, tanh_u) + ad.matmul_op( cur_x, tanh_w) cur_tanh = cur_tanh + ad.broadcastto_op(tanh_b, cur_tanh) cur_tanh = ad.tanh_op(cur_tanh) last_c_state = ad.mul_op(last_c_state, cur_forget) + ad.mul_op( cur_input, cur_tanh) last_h_state = ad.tanh_op(last_c_state) * cur_output x = ad.matmul_op(last_h_state, out_weights) y = x + ad.broadcastto_op(out_bias, x) loss = ad.softmaxcrossentropy_op(y, y_) loss = ad.reduce_mean_op(loss, [0]) return loss, y
def conv_relu_avg(x, shape): weight = init.random_normal(shape=shape, stddev=0.1) x = ad.conv2d_op(x, weight, padding=2, stride=1) x = ad.relu_op(x) x = ad.avg_pool2d_op(x, kernel_H=2, kernel_W=2, padding=0, stride=2) return x