def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b1')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b3')) h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b5')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b7')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b9')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Softmax()) return seq
def __init__(self, nclass, h, w, c): layers = [] identityblk = IdentityBlock(input_channels=c, input_shape=[h, w], nlayers=10) layers.append(identityblk) layers.append( Conv2D(input_channels=c, num_filters=16, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 16])) denseblk = DenseBlock(input_channels=16, input_shape=[h, w], growth_rate=4, nlayers=4) layers.append(denseblk) layers.append( Conv2D(input_channels=denseblk.output_channels, num_filters=32, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=32, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] layers.append( Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def discriminator_allconv(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1)) # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2)) # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2)) # flat_dim = int(h3*w3*32) dis_real_sn = tg.StartNode(input_vars=[self.real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) h, w = same(in_height=self.h, in_width=self.w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) print('h, w', h, w) print('===============') # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w)) disc_hn = tg.HiddenNode( prev=[dis_real_sn, self.gen_hn], layers=[ Dropout(0.2), # TFBatchNormalization(name='b0'), Conv2D(input_channels=self.c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b1'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b2'), Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b3'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b4'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b5'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b6'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b7'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b8'), Dropout(0.5), Conv2D(input_channels=192, num_filters=self.nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b9'), # Dropout(0.5), AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'), Flatten(), ]) print('h,w', h, w) print('==============') class_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, self.nclass), # Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[dis_real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add(AvgPooling(poolsize=(8, 8), stride=(1, 1), padding='VALID')) seq.add(Flatten()) seq.add(Softmax()) return seq