def model(): with tf.name_scope('MnistCNN'): seq = tg.Sequential() seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(BatchNormalization()) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME')) seq.add(LRN()) seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(BatchNormalization()) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME')) seq.add(LRN()) seq.add(Flatten()) seq.add(Linear(128)) seq.add(BatchNormalization()) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(256)) seq.add(BatchNormalization()) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(10)) seq.add(Softmax()) return seq
def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b1')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b3')) h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b5')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b7')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b9')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Softmax()) return seq
def model(): with tf.name_scope('MnistCNN'): seq = tg.Sequential() seq.add( Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) h, w = same(in_height=28, in_width=28, stride=(1, 1), kernel_size=(3, 3)) seq.add(BatchNormalization(input_shape=[h, w, 32])) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME')) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2)) seq.add(LRN()) seq.add( Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) seq.add(BatchNormalization(input_shape=[h, w, 64])) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME')) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2)) seq.add(LRN()) seq.add(Flatten()) seq.add(Linear(int(h * w * 64), 128)) seq.add(BatchNormalization(input_shape=[128])) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(128, 256)) seq.add(BatchNormalization(input_shape=[256])) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(256, 10)) seq.add(Softmax()) return seq
def test_Dropout(): X_ph = tf.placeholder('float32', [None, 32]) seq = tg.Sequential() seq.add(Linear(20)) seq.add(Dropout(0.2, noise_shape=[-1, 20])) out = seq.train_fprop(X_ph) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) out = sess.run(out, feed_dict={X_ph: np.random.rand(1, 32)}) print(out) print(out.shape)
def __init__(self, nclass, h, w, c): layers = [] identityblk = IdentityBlock(input_channels=c, input_shape=[h, w], nlayers=10) layers.append(identityblk) layers.append( Conv2D(input_channels=c, num_filters=16, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 16])) denseblk = DenseBlock(input_channels=16, input_shape=[h, w], growth_rate=4, nlayers=4) layers.append(denseblk) layers.append( Conv2D(input_channels=denseblk.output_channels, num_filters=32, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=32, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] layers.append( Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def discriminator_allconv(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1)) # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2)) # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2)) # flat_dim = int(h3*w3*32) dis_real_sn = tg.StartNode(input_vars=[self.real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) h, w = same(in_height=self.h, in_width=self.w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) print('h, w', h, w) print('===============') # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w)) disc_hn = tg.HiddenNode( prev=[dis_real_sn, self.gen_hn], layers=[ Dropout(0.2), # TFBatchNormalization(name='b0'), Conv2D(input_channels=self.c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b1'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b2'), Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b3'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b4'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b5'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b6'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b7'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b8'), Dropout(0.5), Conv2D(input_channels=192, num_filters=self.nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b9'), # Dropout(0.5), AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'), Flatten(), ]) print('h,w', h, w) print('==============') class_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, self.nclass), # Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[dis_real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def CNN_Classifier(X_train, y_train, X_valid, y_valid, restore): batchsize = 64 learning_rate = 0.001 _, h, w, c = X_train.shape _, nclass = y_train.shape g = tf.Graph() with g.as_default(): data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize) data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize) X_ph = tf.placeholder('float32', [None, h, w, c]) y_phs = [] for comp in [nclass]: y_phs.append(tf.placeholder('float32', [None, comp])) start = tg.StartNode(input_vars=[X_ph]) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2)) h, w = valid(in_height=h, in_width=w, strides=(2, 2), filters=(2, 2)) # import pdb; pdb.set_trace() #h1, w1 = valid(ch_embed_dim, word_len, strides=(1,1), filters=(ch_embed_dim,4)) num = 32 dim = int(h * w * num) h1_Node = tg.HiddenNode(prev=[start], layers=[ Conv2D(input_channels=c, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer1'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer2'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer3'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer4'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer5'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer6'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer7'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer8'), RELU(), Dropout(dropout_below=0.5), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer9'), RELU(), Conv2D(input_channels=num, num_filters=num, padding='VALID', kernel_size=(2, 2), stride=(1, 1)), TFBatchNormalization(name='layer10'), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='VALID'), Reshape(shape=(-1, dim)) ]) h2_Node = tg.HiddenNode( prev=[h1_Node], layers=[Linear(prev_dim=dim, this_dim=nclass), Softmax()]) end_nodes = [tg.EndNode(prev=[h2_Node])] graph = Graph(start=[start], end=end_nodes) train_outs_sb = graph.train_fprop() test_outs = graph.test_fprop() ttl_mse = [] # import pdb; pdb.set_trace() for y_ph, out in zip(y_phs, train_outs_sb): #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out))) ttl_mse.append(tf.reduce_mean((y_ph - out)**2)) mse = sum(ttl_mse) #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9) saver = tf.train.Saver() vardir = './var/5' if not os.path.exists(vardir): os.makedirs(vardir) tf.set_random_seed(1) init = tf.global_variables_initializer() with tf.Session(config=tf.ConfigProto( gpu_options=gpu_options)) as sess: sess.run(init) if restore == 1: re_saver = tf.train.Saver() re_saver.restore(sess, vardir + "/model.ckpt") print("Model restored.") max_epoch = 100 temp_acc = [] for epoch in range(max_epoch): # print 'epoch:', epoch train_error = 0 train_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_train: feed_dict = {X_ph: X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch # import pdb; pdb.set_trace() sess.run(optimizer, feed_dict=feed_dict) train_outs = sess.run(train_outs_sb, feed_dict=feed_dict) train_error += total_mse(train_outs, [ys])[0] train_accuracy += total_accuracy(train_outs, [ys])[0] ttl_examples += len(X_batch) valid_error = 0 valid_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_valid: feed_dict = {X_ph: X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch valid_outs = sess.run(test_outs, feed_dict=feed_dict) valid_error += total_mse(valid_outs, [ys])[0] valid_accuracy += total_accuracy(valid_outs, [ys])[0] ttl_examples += len(X_batch) temp_acc.append(valid_accuracy / float(ttl_examples)) save_path = saver.save(sess, vardir + "/model.ckpt") print("Model saved in file: %s" % save_path) print 'max accuracy is:\t', max(temp_acc)
def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add(AvgPooling(poolsize=(8, 8), stride=(1, 1), padding='VALID')) seq.add(Flatten()) seq.add(Softmax()) return seq