def test_MaskSoftmax(): X_ph = tf.placeholder('float32', [None, 20]) seq_ph = tf.placeholder('int32', [None]) X_sn = tg.StartNode(input_vars=[X_ph]) seq_sn = tg.StartNode(input_vars=[seq_ph]) merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=MaskSoftmax()) y_en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, seq_sn], end=[y_en]) y_sb, = graph.train_fprop() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) feed_dict = {X_ph:np.random.rand(3, 20), seq_ph:[5, 8, 0]} out = sess.run(y_sb, feed_dict=feed_dict) assert (out[0][5:].sum() - 0)**2 < 1e-6 assert (out[0][:5].sum() - 1)**2 < 1e-6 assert (out[1][8:].sum() - 0)**2 < 1e-6 assert (out[1][:8].sum() - 1)**2 < 1e-6 assert (out[2].sum() - 0)**2 < 1e-6 print('test passed!')
def test_SequenceMask(): X_ph = tf.placeholder('float32', [None, 5, 6, 7]) seq_ph = tf.placeholder('int32', [None]) X_sn = tg.StartNode(input_vars=[X_ph]) seq_sn = tg.StartNode(input_vars=[seq_ph]) merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=SequenceMask(maxlen=5)) out_en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, seq_sn], end=[out_en]) y_train_sb = graph.train_fprop() y_test_sb = graph.test_fprop() with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) feed_dict = {X_ph:np.random.rand(3,5,6,7), seq_ph:[2,3,4]} y_train = sess.run(y_train_sb, feed_dict=feed_dict)[0] y_test = sess.run(y_test_sb, feed_dict=feed_dict)[0] assert y_train.sum() == y_test.sum() assert y_train[0, :2].sum() > 0 and y_train[0, 2:].sum() == 0 assert y_train[1, :3].sum() > 0 and y_train[1, 3:].sum() == 0 assert y_train[2, :4].sum() > 0 and y_train[2, 4:].sum() == 0 print('test passed!')
def test_SelectedMaskSoftmax(): X_ph = tf.placeholder('float32', [None, 20]) mask_ph = tf.placeholder('float32', [20]) X_sn = tg.StartNode(input_vars=[X_ph]) mask_sn = tg.StartNode(input_vars=[mask_ph]) merge_hn = tg.HiddenNode(prev=[X_sn, mask_sn], input_merge_mode=SelectedMaskSoftmax()) y_en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, mask_sn], end=[y_en]) y_sb, = graph.train_fprop() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) mask_arr = np.zeros(20) mask_arr[[2,3,4]] = 1 # import pdb; pdb.set_trace() feed_dict = {X_ph:np.random.rand(3, 20), mask_ph:mask_arr} out = sess.run(y_sb, feed_dict=feed_dict) assert (out.sum(1) == 1).any() print(out) print('test passed!')
def discriminator(self, X): X_sn = tg.StartNode(input_vars=[X]) hn1 = tg.HiddenNode(prev=[X_sn], layers=self.d1_layers) hn2 = tg.HiddenNode(prev=[hn1], layers=self.d2_layers) hn3 = tg.HiddenNode(prev=[hn2], layers=self.d3_layers) hn4 = tg.HiddenNode(prev=[hn3], layers=self.d4_layers) hn5 = tg.HiddenNode(prev=[hn4], layers=self.d5_layers) en1 = tg.EndNode(prev=[hn1]) en2 = tg.EndNode(prev=[hn2]) en3 = tg.EndNode(prev=[hn3]) en4 = tg.EndNode(prev=[hn4]) en5 = tg.EndNode(prev=[hn5]) graph = tg.Graph(start=[X_sn], end=[en1, en2, en3, en4, en5]) y1, y2, y3, y4, y5 = graph.train_fprop() y1_test, y2_test, y3_test, y4_test, y5_test = graph.test_fprop() return y1, y2, y3, y4, y5, y1_test, y2_test, y3_test, y4_test, y5_test
def __init__(self, h, w, c): layers1 = [] layers1.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) layers1.append(BatchNormalization(input_shape=[h,w,1])) layers1.append(RELU()) layers2 = [] layers2.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME')) layers2.append(BatchNormalization(input_shape=[h,w,1])) layers2.append(RELU()) self.startnode = tg.StartNode(input_vars=[None]) hn1 = tg.HiddenNode(prev=[self.startnode], layers=layers1) hn2 = tg.HiddenNode(prev=[self.startnode], layers=layers2) hn3 = tg.HiddenNode(prev=[hn1, hn2], input_merge_mode=Sum()) self.endnode = tg.EndNode(prev=[hn3])
def classfication(self, X): X_sn = tg.StartNode(input_vars=[X]) hn = tg.HiddenNode(prev=[X_sn], layers=[]) en = tg.EndNode(prev=[hn]) graph = tg.Graph(start=[X_sn], end=[en]) y_train, = graph.train_fprop() y_test, = graph.test_fprop() return y_train, y_test
def __init__(self, nclass, h, w, c): layers = [] layers.append(AllCNN(nclass, h, w, c)) layers.append(Linear(nclass, nclass)) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def generator(self, z_fake): z_fake_sn = tg.StartNode(input_vars=[z_fake]) hn = tg.HiddenNode(prev=[z_fake_sn], layers=self.g_layers) en = tg.EndNode(prev=[hn]) graph = tg.Graph(start=[z_fake_sn], end=[en]) X_fake_train_sb, = graph.train_fprop() X_fake_test_sb, = graph.test_fprop() return X_fake_train_sb, X_fake_test_sb
def __init__(self, h, w, c, nclass): layers = [] layers.append(CBR(h,w,c)) layers.append(Flatten()) layers.append(Linear(1*h*w, nclass)) self.startnode = tg.StartNode(input_vars=[None]) hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[hn])
def __init__(self): self.startnode = tg.StartNode(input_vars=[None]) layers1 = [] layers1.append(Linear(5)) hn1 = tg.HiddenNode(prev=[self.startnode], input_merge_mode=Select(0), layers=layers1) layers2 = [] layers2.append(Linear(8)) hn2 = tg.HiddenNode(prev=[self.startnode], input_merge_mode=Select(1), layers=layers2) merge = tg.HiddenNode(prev=[hn1, hn2], input_merge_mode=Concat(axis=1)) layers1a = [] layers1a.append(Linear(20)) hn1a = tg.HiddenNode(prev=[merge], layers=layers1a) layers2a = [] layers2a.append(Linear(30)) hn2a = tg.HiddenNode(prev=[merge], layers=layers2a) self.endnode = tg.EndNode(prev=[hn1a, hn2a])
def model(word_len, sent_len, nclass): unicode_size = 1000 ch_embed_dim = 20 h, w = valid(ch_embed_dim, word_len, stride=(1, 1), kernel_size=(ch_embed_dim, 5)) h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5)) h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5)) conv_out_dim = int(h * w * 60) X_ph = tf.placeholder('int32', [None, sent_len, word_len]) input_sn = tg.StartNode(input_vars=[X_ph]) charcnn_hn = tg.HiddenNode(prev=[input_sn], layers=[ Reshape(shape=(-1, word_len)), Embedding(cat_dim=unicode_size, encode_dim=ch_embed_dim, zero_pad=True), Reshape(shape=(-1, ch_embed_dim, word_len, 1)), Conv2D(input_channels=1, num_filters=20, padding='VALID', kernel_size=(ch_embed_dim, 5), stride=(1, 1)), RELU(), Conv2D(input_channels=20, num_filters=40, padding='VALID', kernel_size=(1, 5), stride=(1, 1)), RELU(), Conv2D(input_channels=40, num_filters=60, padding='VALID', kernel_size=(1, 5), stride=(1, 2)), RELU(), Flatten(), Linear(conv_out_dim, nclass), Reshape((-1, sent_len, nclass)), ReduceSum(1), Softmax() ]) output_en = tg.EndNode(prev=[charcnn_hn]) graph = tg.Graph(start=[input_sn], end=[output_en]) y_train_sb = graph.train_fprop()[0] y_test_sb = graph.test_fprop()[0] return X_ph, y_train_sb, y_test_sb
def resnet_crf_rnn(x_ph): s_n = tg.StartNode(input_vars=[x_ph]) h1_n = tg.HiddenNode(prev=[s_n], layers=[ ResNet(num_blocks=5), Conv2D(input_channels=3, num_filters=1, kernel_size=(5, 5), stride=(1, 1), padding='SAME'), Sigmoid() ]) h2_n = tg.HiddenNode(prev=[s_n, h1_n], input_merge_mode=NoChange(), layers=[CRF_RNN(num_iter=2)]) end_n = tg.EndNode(prev=[h2_n]) graph = tg.Graph(start=[s_n], end=[end_n]) # import pdb; pdb.set_trace() train_out = graph.train_fprop() test_out = graph.test_fprop() return train_out, test_out
def __init__(self, nclass, h, w, c): layers = [] template = TemplateModel(nclass, h, w, c) layers.append(template) layers.append(Flatten()) layers.append(Linear(template.output_dim, 200)) layers.append(RELU()) layers.append(Linear(200, nclass)) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] identityblk = IdentityBlock(input_channels=c, input_shape=[h, w], nlayers=10) layers.append(identityblk) layers.append( Conv2D(input_channels=c, num_filters=16, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 16])) denseblk = DenseBlock(input_channels=16, input_shape=[h, w], growth_rate=4, nlayers=4) layers.append(denseblk) layers.append( Conv2D(input_channels=denseblk.output_channels, num_filters=32, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=32, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] model = UNet(input_channels=c, input_shape=(h, w)) layers.append(model) layers.append( MaxPooling(poolsize=tuple(model.output_shape), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Linear(model.output_channels, nclass)) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] # model = DenseNet(input_channels=c, input_shape=(h, w), ndense=1, growth_rate=1, nlayer1blk=1) model = DenseNet(input_channels=c, input_shape=(h, w), ndense=3, growth_rate=4, nlayer1blk=4) layers.append(model) self.output_dim = np.prod(model.output_shape) * model.output_channels # import pdb; pdb.set_trace() self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def discriminator(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) h1, w1 = valid(self.char_embed_dim, self.word_len, kernel_size=(self.char_embed_dim, 3), stride=(1, 1)) print('h1:{}, w1:{}'.format(h1, w1)) h2, w2 = valid(h1, w1, kernel_size=(1, 3), stride=(1, 1)) print('h2:{}, w2:{}'.format(h2, w2)) h3, w3 = valid(h2, w2, kernel_size=(1, 3), stride=(1, 1)) print('h3:{}, w3:{}'.format(h3, w3)) # h4, w4 = valid(h3, w3, kernel_size=(1,6), stride=(1,1)) # print('h4:{}, w4:{}'.format(h4, w4)) # hf, wf = h4, w4 hf, wf = h3, w3 n_filters = 100 real_sn = tg.StartNode(input_vars=[self.real_ph]) real_hn = tg.HiddenNode(prev=[real_sn], layers=[ OneHot(self.char_embed_dim), Transpose(perm=[0, 3, 2, 1]) ]) disc_hn = tg.HiddenNode( prev=[real_hn, self.gen_hn], layers=[ Conv2D(input_channels=self.sent_len, num_filters=100, kernel_size=(self.char_embed_dim, 3), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d1'), LeakyRELU(), Conv2D(input_channels=100, num_filters=100, kernel_size=(1, 3), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d2'), LeakyRELU(), Conv2D(input_channels=100, num_filters=100, kernel_size=(1, 3), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d3'), LeakyRELU(), # Conv2D(input_channels=32, num_filters=128, kernel_size=(1,6), stride=(1,1), padding='VALID'), # RELU(), Flatten(), Linear(int(hf * wf * n_filters), self.bottleneck_dim), TFBatchNormalization(name=scope + '/d4'), LeakyRELU(), ]) class_hn = tg.HiddenNode(prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, self.nclass), Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def discriminator(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) dis_real_sn = tg.StartNode(input_vars=[self.real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) disc_hn = tg.HiddenNode( prev=[dis_real_sn, self.gen_hn], layers=[ Conv2D(input_channels=self.c, num_filters=32, kernel_size=(5, 5), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d1'), # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/d2'), # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/d3'), # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01), LeakyRELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), Flatten(), Linear(flat_dim, self.bottleneck_dim), # BatchNormalization(layer_type='fc', dim=self.bottleneck_dim, short_memory=0.01), TFBatchNormalization(name=scope + '/d4'), LeakyRELU(), # Dropout(0.5), ]) class_hn = tg.HiddenNode(prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, self.nclass), Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[Linear(self.bottleneck_dim, 1), Sigmoid()]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[dis_real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def Vanilla_Classifier(X_train, y_train, X_valid, y_valid, restore): batchsize = 100 learning_rate = 0.001 _, h, w, c = X_train.shape _, nclass = y_train.shape g = tf.Graph() with g.as_default(): data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize) data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize) X_ph = tf.placeholder('float32', [None, h, w, c]) # y_ph = tf.placeholder('float32', [None, nclass]) y_phs = [] for comp in [nclass]: y_phs.append(tf.placeholder('float32', [None, comp])) dim = int(h*w*c) scope = 'encoder' start = tg.StartNode(input_vars=[X_ph]) h1_Node = tg.HiddenNode(prev=[start], layers=[Sigmoid(), TFBatchNormalization(name= scope + '/vanilla1'), RELU(), Flatten(), Sigmoid(), TFBatchNormalization(name=scope + '/vanilla2')]) h2_Node = tg.HiddenNode(prev=[h1_Node], layers=[Linear(prev_dim=dim, this_dim=nclass), Softmax()]) end_nodes = [tg.EndNode(prev=[h2_Node])] graph = Graph(start=[start], end=end_nodes) train_outs_sb = graph.train_fprop() test_outs = graph.test_fprop() ttl_mse = [] # import pdb; pdb.set_trace() for y_ph, out in zip(y_phs, train_outs_sb): #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out))) ttl_mse.append(tf.reduce_mean((y_ph-out)**2)) mse = sum(ttl_mse) #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) saver = tf.train.Saver() vardir = './var/2' if not os.path.exists(vardir): os.makedirs(vardir) gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9) tf.set_random_seed(1) init = tf.global_variables_initializer() with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess: # print '=======session start' sess.run(init) if restore == 1: re_saver = tf.train.Saver() re_saver.restore(sess, vardir + "/model.ckpt") print("Model restored.") max_epoch = 100 temp_acc = [] for epoch in range(max_epoch): train_error = 0 train_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_train: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch sess.run(optimizer, feed_dict=feed_dict) train_outs = sess.run(train_outs_sb, feed_dict=feed_dict) train_error += total_mse(train_outs, [ys])[0] train_accuracy += total_accuracy(train_outs, [ys])[0] ttl_examples += len(X_batch) valid_error = 0 valid_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_valid: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch valid_outs = sess.run(test_outs, feed_dict=feed_dict) valid_error += total_mse(valid_outs, [ys])[0] valid_accuracy += total_accuracy(valid_outs, [ys])[0] ttl_examples += len(X_batch) save_path = saver.save(sess, vardir + "/model.ckpt") # print("Model saved in file: %s" % save_path) temp_acc.append(valid_accuracy/float(ttl_examples)) print 'max accuracy is:\t', max(temp_acc)
def Encoder_Classifier(X_train, y_train, X_valid, y_valid, restore): batchsize = 100 learning_rate = 0.001 _, h, w, c = X_train.shape _, nclass = y_train.shape g = tf.Graph() with g.as_default(): data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize) data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize) X_ph = tf.placeholder('float32', [None, h, w, c]) y_phs = [] for comp in [nclass]: y_phs.append(tf.placeholder('float32', [None, comp])) start = tg.StartNode(input_vars=[X_ph]) h1, w1 = valid(h, w, filters=(5,5), strides=(1,1)) h2, w2 = valid(h1, w1, filters=(5,5), strides=(2,2)) h3, w3 = valid(h2, w2, filters=(5,5), strides=(2,2)) flat_dim = int(h3*w3*32) scope = 'encoder' bottleneck_dim = 300 enc_hn = tg.HiddenNode(prev=[start], layers=[Conv2D(input_channels=c, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'), TFBatchNormalization(name=scope + '/genc1'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), TFBatchNormalization(name=scope + '/genc2'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), TFBatchNormalization(name=scope + '/genc3'), RELU(), Flatten(), Linear(flat_dim, 300), TFBatchNormalization(name=scope + '/genc4'), RELU(), Linear(300, bottleneck_dim), Tanh() ]) h2_Node = tg.HiddenNode(prev=[enc_hn], layers=[Linear(prev_dim=bottleneck_dim, this_dim=nclass), Softmax()]) end_nodes = [tg.EndNode(prev=[h2_Node])] graph = Graph(start=[start], end=end_nodes) train_outs_sb = graph.train_fprop() test_outs = graph.test_fprop() ttl_mse = [] # import pdb; pdb.set_trace() for y_ph, out in zip(y_phs, train_outs_sb): #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out))) ttl_mse.append(tf.reduce_mean((y_ph-out)**2)) mse = sum(ttl_mse) #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9) # saver_init = tf.train.Saver() saver = tf.train.Saver() vardir = './var/1' if not os.path.exists(vardir): os.makedirs(vardir) tf.set_random_seed(1) init = tf.global_variables_initializer() with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess: sess.run(init) if restore == 1: re_saver = tf.train.Saver() re_saver.restore(sess, vardir + "/model.ckpt") print("Model restored.") # save_path = saver_init.save(sess, vardir + "/init.ckpt") # print("Model saved in file: %s" % save_path) max_epoch = 2 temp_acc = [] for epoch in range(max_epoch): # print 'epoch:', epoch train_error = 0 train_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_train: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch # import pdb; pdb.set_trace() sess.run(optimizer, feed_dict=feed_dict) train_outs = sess.run(train_outs_sb, feed_dict=feed_dict) train_error += total_mse(train_outs, [ys])[0] train_accuracy += total_accuracy(train_outs, [ys])[0] ttl_examples += len(X_batch) valid_error = 0 valid_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_valid: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch valid_outs = sess.run(test_outs, feed_dict=feed_dict) valid_error += total_mse(valid_outs, [ys])[0] valid_accuracy += total_accuracy(valid_outs, [ys])[0] ttl_examples += len(X_batch) temp_acc.append(valid_accuracy/float(ttl_examples)) save_path = saver.save(sess, vardir + "/model.ckpt") print("Model saved in file: %s" % save_path) print 'max accuracy is:\t', max(temp_acc)
def CNN_Classifier(X_train, y_train, X_valid, y_valid): batchsize = 64 learning_rate = 0.001 _, h, w, c = X_train.shape _, nclass = y_train.shape data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize) data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize) X_ph = tf.placeholder('float32', [None, h, w, c]) y_phs = [] for comp in [nclass]: y_phs.append(tf.placeholder('float32', [None, comp])) start = tg.StartNode(input_vars=[X_ph]) h, w = same(in_height=h, in_width=w, strides=(1,1), filters=(2,2)) h, w = same(in_height=h, in_width=w, strides=(2,2), filters=(2,2)) #h1, w1 = valid(ch_embed_dim, word_len, strides=(1,1), filters=(ch_embed_dim,4)) dim = int(h * w * c * 10) h1_Node = tg.HiddenNode(prev=[start], layers=[Conv2D(input_channels=c, num_filters=10, padding='SAME', kernel_size=(2,2), stride=(1,1)), MaxPooling(poolsize=(2,2), stride=(2,2), padding='SAME'), Reshape(shape=(-1, dim))] ) h2_Node = tg.HiddenNode(prev=[h1_Node], layers=[Linear(prev_dim=dim, this_dim=nclass), Softmax()]) end_nodes = [tg.EndNode(prev=[h2_Node])] graph = Graph(start=[start], end=end_nodes) train_outs_sb = graph.train_fprop() test_outs = graph.test_fprop() ttl_mse = [] # import pdb; pdb.set_trace() for y_ph, out in zip(y_phs, train_outs_sb): #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out))) ttl_mse.append(tf.reduce_mean((y_ph-out)**2)) mse = sum(ttl_mse) #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9) saver = tf.train.Saver() vardir = './var/1' if not os.path.exists(vardir): os.makedirs(vardir) with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess: init = tf.global_variables_initializer() sess.run(init) max_epoch = 100 temp_acc = [] for epoch in range(max_epoch): # print 'epoch:', epoch train_error = 0 train_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_train: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch # import pdb; pdb.set_trace() sess.run(optimizer, feed_dict=feed_dict) train_outs = sess.run(train_outs_sb, feed_dict=feed_dict) train_error += total_mse(train_outs, [ys])[0] train_accuracy += total_accuracy(train_outs, [ys])[0] ttl_examples += len(X_batch) # print 'train mse', train_error/float(ttl_examples) # print 'train accuracy', train_accuracy/float(ttl_examples) valid_error = 0 valid_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_valid: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch valid_outs = sess.run(test_outs, feed_dict=feed_dict) valid_error += total_mse(valid_outs, [ys])[0] valid_accuracy += total_accuracy(valid_outs, [ys])[0] ttl_examples += len(X_batch) # print 'valid mse', valid_error/float(ttl_examples) # print 'valid accuracy', valid_accuracy/float(ttl_examples) temp_acc.append(valid_accuracy/float(ttl_examples)) save_path = saver.save(sess, vardir + "/model.ckpt") print("Model saved in file: %s" % save_path) print 'max accuracy is:\t', max(temp_acc)
def __init__(self, nclass, h, w, c): layers = [] layers.append( Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def discriminator_allconv(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1)) # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2)) # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2)) # flat_dim = int(h3*w3*32) dis_real_sn = tg.StartNode(input_vars=[self.real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) h, w = same(in_height=self.h, in_width=self.w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) print('h, w', h, w) print('===============') # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w)) disc_hn = tg.HiddenNode( prev=[dis_real_sn, self.gen_hn], layers=[ Dropout(0.2), # TFBatchNormalization(name='b0'), Conv2D(input_channels=self.c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b1'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b2'), Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b3'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b4'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b5'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b6'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b7'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b8'), Dropout(0.5), Conv2D(input_channels=192, num_filters=self.nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b9'), # Dropout(0.5), AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'), Flatten(), ]) print('h,w', h, w) print('==============') class_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, self.nclass), # Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[dis_real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def generator(self): self.generator_called = True with self.tf_graph.as_default(): scope = 'Generator' with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) print('h1:{}, w1:{}'.format(h1, w1)) print('h2:{}, w2:{}'.format(h2, w2)) print('h3:{}, w3:{}'.format(h3, w3)) print('flat dim:{}'.format(flat_dim)) self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph]) enc_hn = tg.HiddenNode( prev=[self.gen_real_sn], layers=[ Conv2D(input_channels=self.c, num_filters=32, kernel_size=(5, 5), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/genc1'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/genc2'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/genc3'), RELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), Flatten(), Linear(flat_dim, 300), TFBatchNormalization(name=scope + '/genc4'), RELU(), Linear(300, self.bottleneck_dim), Tanh(), ]) self.noise_sn = tg.StartNode(input_vars=[self.noise_ph]) self.gen_hn = tg.HiddenNode( prev=[self.noise_sn, enc_hn], input_merge_mode=Sum(), layers=[ Linear(self.bottleneck_dim, flat_dim), RELU(), ######[ Method 0 ]###### # Reshape((-1, h3, w3, 32)), # Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2), # kernel_size=(5,5), stride=(2,2), padding='VALID'), ######[ End Method 0 ]###### ######[ Method 1 ]###### Reshape((-1, 1, 1, flat_dim)), # Reshape((-1, h)) Conv2D_Transpose(input_channels=flat_dim, num_filters=200, output_shape=(h3, w3), kernel_size=(h3, w3), stride=(1, 1), padding='VALID'), # BatchNormalization(layer_type='conv', dim=200, short_memory=0.01), TFBatchNormalization(name=scope + '/g1'), RELU(), Conv2D_Transpose(input_channels=200, num_filters=100, output_shape=(h2, w2), kernel_size=(5, 5), stride=(2, 2), padding='VALID'), # BatchNormalization(layer_type='conv', dim=100, short_memory=0.01), ######[ End Method 1 ]###### TFBatchNormalization(name=scope + '/g2'), RELU(), Conv2D_Transpose(input_channels=100, num_filters=50, output_shape=(h1, w1), kernel_size=(5, 5), stride=(2, 2), padding='VALID'), # BatchNormalization(layer_type='conv', dim=50, short_memory=0.01), TFBatchNormalization(name=scope + '/g3'), RELU(), Conv2D_Transpose(input_channels=50, num_filters=self.c, output_shape=(self.h, self.w), kernel_size=(5, 5), stride=(1, 1), padding='VALID'), SetShape((-1, self.h, self.w, self.c)), Sigmoid() ]) h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2)) h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2)) h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1)) y_en = tg.EndNode(prev=[self.gen_hn]) graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[y_en]) G_train_sb = graph.train_fprop()[0] G_test_sb = graph.test_fprop()[0] gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list
def generator(self): self.generator_called = True with self.tf_graph.as_default(): scope = 'Generator' with tf.name_scope(scope): # X_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='X') # X_sn = tg.StartNode(input_vars=[X_ph]) noise_ph = tf.placeholder('float32', [None, self.bottleneck_dim], name='noise') self.noise_sn = tg.StartNode(input_vars=[noise_ph]) h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) print('h1:{}, w1:{}'.format(h1, w1)) print('h2:{}, w2:{}'.format(h2, w2)) print('h3:{}, w3:{}'.format(h3, w3)) print('flat dim:{}'.format(flat_dim)) # enc_hn = tg.HiddenNode(prev=[X_sn], # layers=[Conv2D(input_channels=1, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'), # RELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), # # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # # RELU(), # Flatten(), # Linear(flat_dim, 300), # RELU(), # # seq.add(Dropout(0.5)) # Linear(300, self.bottleneck_dim), # Tanh(), # ]) y_ph = tf.placeholder('float32', [None, self.nclass], name='y') self.y_sn = tg.StartNode(input_vars=[y_ph]) noise_hn = tg.HiddenNode(prev=[self.noise_sn, self.y_sn], input_merge_mode=Concat(1)) self.gen_hn = tg.HiddenNode( prev=[noise_hn], layers=[ Linear(self.bottleneck_dim + 10, flat_dim), RELU(), ######[ Method 0 ]###### # Reshape((-1, h3, w3, 32)), # Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2), # kernel_size=(5,5), stride=(2,2), padding='VALID'), ######[ End Method 0 ]###### ######[ Method 1 ]###### Reshape((-1, 1, 1, flat_dim)), Conv2D_Transpose(input_channels=flat_dim, num_filters=200, output_shape=(2, 2), kernel_size=(2, 2), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/dc1'), RELU(), Conv2D_Transpose(input_channels=200, num_filters=100, output_shape=(h2, w2), kernel_size=(9, 9), stride=(1, 1), padding='VALID'), ######[ End Method 1 ]###### TFBatchNormalization(name=scope + '/dc2'), RELU(), Conv2D_Transpose(input_channels=100, num_filters=50, output_shape=(h1, w1), kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/dc3'), RELU(), Conv2D_Transpose(input_channels=50, num_filters=1, output_shape=(self.h, self.w), kernel_size=(5, 5), stride=(1, 1), padding='VALID'), SetShape((-1, self.h, self.w, 1)), Sigmoid() ]) y_en = tg.EndNode(prev=[self.gen_hn]) graph = tg.Graph(start=[self.noise_sn, self.y_sn], end=[y_en]) G_train_sb = graph.train_fprop()[0] G_test_sb = graph.test_fprop()[0] # import pdb; pdb.set_trace() gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return y_ph, noise_ph, G_train_sb, G_test_sb, gen_var_list
def classifier(X_ph, X_gen_ph, h, w): with tf.variable_scope('Classifier'): X_sn = tg.StartNode(input_vars=[X_ph]) X_gen_sn = tg.StartNode(input_vars=[X_gen_ph]) h1, w1 = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h2, w2 = same(in_height=h1, in_width=w1, stride=(2, 2), kernel_size=(2, 2)) h3, w3 = same(in_height=h2, in_width=w2, stride=(1, 1), kernel_size=(3, 3)) h4, w4 = same(in_height=h3, in_width=w3, stride=(2, 2), kernel_size=(2, 2)) print('---', h, w) X_hn = tg.HiddenNode(prev=[X_sn], layers=[ Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h1, w1, 32]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), LRN(), Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h3, w3, 64]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), Flatten(), ]) X_gen_hn = tg.HiddenNode( prev=[X_gen_sn], layers=[ Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h1, w1, 32]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), LRN(), Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h3, w3, 64]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), Flatten(), ]) print('===', h4 * w4 * 64 * 2) merge_hn = tg.HiddenNode(prev=[X_hn, X_gen_hn], input_merge_mode=Concat(), layers=[ Linear(h4 * w4 * 64 * 2, 100), RELU(), BatchNormalization(input_shape=[100]), Linear(100, 1), Sigmoid() ]) en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, X_gen_sn], end=[en]) y_train, = graph.train_fprop() y_test, = graph.test_fprop() return y_train, y_test
def discriminator(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) real_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='real') real_sn = tg.StartNode(input_vars=[real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) disc_hn = tg.HiddenNode( prev=[real_sn, self.gen_hn], layers=[ Conv2D(input_channels=1, num_filters=32, kernel_size=(5, 5), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/c1'), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/c2'), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/c3'), LeakyRELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), Flatten(), Linear(flat_dim, self.bottleneck_dim), TFBatchNormalization(name=scope + '/l1'), LeakyRELU(), # Dropout(0.5), ]) class_hn = tg.HiddenNode(prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, self.nclass), Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() # dis_var_list = graph.variables # for var in dis_var_list: # print var.name graph = tg.Graph(start=[self.noise_sn, self.y_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() # print('========') # for var in graph.variables: # print var.name dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) # for var in dis_var_list: # print(var.name) # # print('=========') # for var in tf.global_variables(): # print(var.name) # import pdb; pdb.set_trace() # print() # graph = tg.Graph(start=[G_sn], end=[class_en, judge_en]) # class_train_sb, judge_train_sb = graph.train_fprop() # symbolic outputs # class_test_sb, judge_test_sb = graph.test_fprop() # symbolic outputs return real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list