def test_SelectedMaskSoftmax(): X_ph = tf.placeholder('float32', [None, 20]) mask_ph = tf.placeholder('float32', [20]) X_sn = tg.StartNode(input_vars=[X_ph]) mask_sn = tg.StartNode(input_vars=[mask_ph]) merge_hn = tg.HiddenNode(prev=[X_sn, mask_sn], input_merge_mode=SelectedMaskSoftmax()) y_en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, mask_sn], end=[y_en]) y_sb, = graph.train_fprop() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) mask_arr = np.zeros(20) mask_arr[[2, 3, 4]] = 1 # import pdb; pdb.set_trace() feed_dict = {X_ph: np.random.rand(3, 20), mask_ph: mask_arr} out = sess.run(y_sb, feed_dict=feed_dict) assert (out.sum(1) == 1).any() print(out) print('test passed!')
def test_SequenceMask(): X_ph = tf.placeholder('float32', [None, 5, 6, 7]) seq_ph = tf.placeholder('int32', [None]) X_sn = tg.StartNode(input_vars=[X_ph]) seq_sn = tg.StartNode(input_vars=[seq_ph]) merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=SequenceMask(maxlen=5)) out_en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, seq_sn], end=[out_en]) y_train_sb = graph.train_fprop() y_test_sb = graph.test_fprop() with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) feed_dict = {X_ph: np.random.rand(3, 5, 6, 7), seq_ph: [2, 3, 4]} y_train = sess.run(y_train_sb, feed_dict=feed_dict)[0] y_test = sess.run(y_test_sb, feed_dict=feed_dict)[0] assert y_train.sum() == y_test.sum() assert y_train[0, :2].sum() > 0 and y_train[0, 2:].sum() == 0 assert y_train[1, :3].sum() > 0 and y_train[1, 3:].sum() == 0 assert y_train[2, :4].sum() > 0 and y_train[2, 4:].sum() == 0 print('test passed!')
def test_MaskSoftmax(): X_ph = tf.placeholder('float32', [None, 20]) seq_ph = tf.placeholder('int32', [None]) X_sn = tg.StartNode(input_vars=[X_ph]) seq_sn = tg.StartNode(input_vars=[seq_ph]) merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=MaskSoftmax()) y_en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, seq_sn], end=[y_en]) y_sb, = graph.train_fprop() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) feed_dict = {X_ph: np.random.rand(3, 20), seq_ph: [5, 8, 0]} out = sess.run(y_sb, feed_dict=feed_dict) assert (out[0][5:].sum() - 0)**2 < 1e-6 assert (out[0][:5].sum() - 1)**2 < 1e-6 assert (out[1][8:].sum() - 0)**2 < 1e-6 assert (out[1][:8].sum() - 1)**2 < 1e-6 assert (out[2].sum() - 0)**2 < 1e-6 print('test passed!')
def __init__(self, h, w, c, nclass): layers = [] layers.append(CBR(h,w,c)) layers.append(Flatten()) layers.append(Linear(1*h*w, nclass)) self.startnode = tg.StartNode(input_vars=[None]) hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[hn])
def model(word_len, sent_len, nclass): unicode_size = 1000 ch_embed_dim = 20 h, w = valid(ch_embed_dim, word_len, stride=(1, 1), kernel_size=(ch_embed_dim, 5)) h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5)) h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5)) conv_out_dim = int(h * w * 60) X_ph = tf.placeholder('int32', [None, sent_len, word_len]) input_sn = tg.StartNode(input_vars=[X_ph]) charcnn_hn = tg.HiddenNode(prev=[input_sn], layers=[ Reshape(shape=(-1, word_len)), Embedding(cat_dim=unicode_size, encode_dim=ch_embed_dim, zero_pad=True), Reshape(shape=(-1, ch_embed_dim, word_len, 1)), Conv2D(input_channels=1, num_filters=20, padding='VALID', kernel_size=(ch_embed_dim, 5), stride=(1, 1)), RELU(), Conv2D(input_channels=20, num_filters=40, padding='VALID', kernel_size=(1, 5), stride=(1, 1)), RELU(), Conv2D(input_channels=40, num_filters=60, padding='VALID', kernel_size=(1, 5), stride=(1, 2)), RELU(), Flatten(), Linear(conv_out_dim, nclass), Reshape((-1, sent_len, nclass)), ReduceSum(1), Softmax() ]) output_en = tg.EndNode(prev=[charcnn_hn]) graph = tg.Graph(start=[input_sn], end=[output_en]) y_train_sb = graph.train_fprop()[0] y_test_sb = graph.test_fprop()[0] return X_ph, y_train_sb, y_test_sb
def __init__(self, h, w, c): layers1 = [] layers1.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) layers1.append(BatchNormalization(input_shape=[h,w,1])) layers1.append(RELU()) layers2 = [] layers2.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) layers2.append(BatchNormalization(input_shape=[h,w,1])) layers2.append(RELU()) self.startnode = tg.StartNode(input_vars=[None]) hn1 = tg.HiddenNode(prev=[self.startnode], layers=layers1) hn2 = tg.HiddenNode(prev=[self.startnode], layers=layers2) hn3 = tg.HiddenNode(prev=[hn1, hn2], input_merge_mode=Sum()) self.endnode = tg.EndNode(prev=[hn3])