def test_VGG19(): seq = tg.Sequential() seq.add(VGG19()) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) seq.add(Softmax()) train(seq)
def model(): with tf.name_scope('MnistCNN'): seq = tg.Sequential() seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(BatchNormalization()) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME')) seq.add(LRN()) seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(BatchNormalization()) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME')) seq.add(LRN()) seq.add(Flatten()) seq.add(Linear(128)) seq.add(BatchNormalization()) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(256)) seq.add(BatchNormalization()) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(10)) seq.add(Softmax()) return seq
def test_ResNetBase(): seq = tg.Sequential() seq.add(ResNetBase(config=[1,1,1,1])) seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) seq.add(Softmax()) train(seq)
def test_DenseNet(): seq = tg.Sequential() seq.add(DenseNet(ndense=1, growth_rate=1, nlayer1blk=1)) seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) seq.add(Softmax()) train(seq)
def test_UNet(): seq = tg.Sequential() seq.add(UNet(input_shape=(h, w))) seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(this_dim=nclass)) seq.add(Softmax()) train(seq)
def __init__(self, h, w, c, nclass): layers = [] layers.append(CBR(h,w,c)) layers.append(Flatten()) layers.append(Linear(1*h*w, nclass)) self.startnode = tg.StartNode(input_vars=[None]) hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[hn])
def train_with_Resnet(): from tensorgraph.trainobject import train as mytrain config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False, whiten=False) _, h, w, c = X_train.shape _, nclass = y_train.shape print('X max', np.max(X_train)) print('X min', np.min(X_train)) seq = tg.Sequential() id1 = IdentityBlock(input_channels=c, input_shape=(h, w), nlayers=4, filters=[32, 64]) seq.add(id1) trans1 = TransitionLayer(input_channels=id1.output_channels, input_shape=id1.output_shape) seq.add(trans1) id2 = IdentityBlock(input_channels=trans1.output_channels, input_shape=trans1.output_shape, nlayers=4, filters=[64, 128]) seq.add(id2) trans2 = TransitionLayer(input_channels=id2.output_channels, input_shape=id2.output_shape) seq.add(trans2) seq.add(Flatten()) ldim = trans2.output_channels * np.prod(trans2.output_shape) seq.add(Linear(ldim, nclass)) seq.add(Softmax()) X_ph = tf.placeholder('float32', [None, h, w, c]) y_ph = tf.placeholder('float32', [None, nclass]) y_train_sb = seq.train_fprop(X_ph) y_test_sb = seq.test_fprop(X_ph) train_cost_sb = entropy(y_ph, y_train_sb) optimizer = tf.train.AdamOptimizer(0.001) test_accu_sb = accuracy(y_ph, y_test_sb) mytrain(session=sess, feed_dict={ X_ph: X_train, y_ph: y_train }, train_cost_sb=train_cost_sb, valid_cost_sb=-test_accu_sb, optimizer=optimizer, epoch_look_back=5, max_epoch=100, percent_decrease=0, train_valid_ratio=[5, 1], batchsize=64, randomize_split=False)
def test_VGG19(): seq = tg.Sequential() vgg = VGG19(input_channels=c, input_shape=(h, w)) print('output channels:', vgg.output_channels) print('output shape:', vgg.output_shape) out_dim = np.prod(vgg.output_shape) * vgg.output_channels seq.add(vgg) seq.add(Flatten()) seq.add(Linear(int(out_dim), nclass)) seq.add(Softmax()) train(seq)
def train_with_Densenet(): from tensorgraph.trainobject import train as mytrain config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False, whiten=False) _, h, w, c = X_train.shape _, nclass = y_train.shape print('X max', np.max(X_train)) print('X min', np.min(X_train)) seq = tg.Sequential() dense = DenseNet(input_channels=c, input_shape=(h, w), ndense=3, growth_rate=4, nlayer1blk=4) seq.add(dense) seq.add(Flatten()) ldim = dense.output_channels seq.add(Linear(ldim, nclass)) seq.add(Softmax()) X_ph = tf.placeholder('float32', [None, h, w, c]) y_ph = tf.placeholder('float32', [None, nclass]) y_train_sb = seq.train_fprop(X_ph) y_test_sb = seq.test_fprop(X_ph) train_cost_sb = entropy(y_ph, y_train_sb) optimizer = tf.train.AdamOptimizer(0.001) test_accu_sb = accuracy(y_ph, y_test_sb) print(tf.global_variables()) print('..total number of global variables: {}'.format( len(tf.global_variables()))) count = 0 for var in tf.global_variables(): count += int(np.prod(var.get_shape())) print('..total number of global parameters: {}'.format(count)) mytrain(session=sess, feed_dict={ X_ph: X_train, y_ph: y_train }, train_cost_sb=train_cost_sb, valid_cost_sb=-test_accu_sb, optimizer=optimizer, epoch_look_back=5, max_epoch=100, percent_decrease=0, train_valid_ratio=[5, 1], batchsize=64, randomize_split=False)
def test_DenseNet(): seq = tg.Sequential() model = DenseNet(input_channels=c, input_shape=(h, w), ndense=1, growth_rate=1, nlayer1blk=1) print('output channels:', model.output_channels) print('output shape:', model.output_shape) seq.add(model) seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(model.output_channels, nclass)) seq.add(Softmax()) train(seq)
def model(word_len, sent_len, nclass): unicode_size = 1000 ch_embed_dim = 20 h, w = valid(ch_embed_dim, word_len, stride=(1, 1), kernel_size=(ch_embed_dim, 5)) h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5)) h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5)) conv_out_dim = int(h * w * 60) X_ph = tf.placeholder('int32', [None, sent_len, word_len]) input_sn = tg.StartNode(input_vars=[X_ph]) charcnn_hn = tg.HiddenNode(prev=[input_sn], layers=[ Reshape(shape=(-1, word_len)), Embedding(cat_dim=unicode_size, encode_dim=ch_embed_dim, zero_pad=True), Reshape(shape=(-1, ch_embed_dim, word_len, 1)), Conv2D(input_channels=1, num_filters=20, padding='VALID', kernel_size=(ch_embed_dim, 5), stride=(1, 1)), RELU(), Conv2D(input_channels=20, num_filters=40, padding='VALID', kernel_size=(1, 5), stride=(1, 1)), RELU(), Conv2D(input_channels=40, num_filters=60, padding='VALID', kernel_size=(1, 5), stride=(1, 2)), RELU(), Flatten(), Linear(conv_out_dim, nclass), Reshape((-1, sent_len, nclass)), ReduceSum(1), Softmax() ]) output_en = tg.EndNode(prev=[charcnn_hn]) graph = tg.Graph(start=[input_sn], end=[output_en]) y_train_sb = graph.train_fprop()[0] y_test_sb = graph.test_fprop()[0] return X_ph, y_train_sb, y_test_sb
def test_UNet(): seq = tg.Sequential() model = UNet(input_channels=c, input_shape=(h, w)) print('output channels:', model.output_channels) print('output shape:', model.output_shape) out_dim = np.prod(model.output_shape) * model.output_channels seq.add(model) seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Linear(model.output_channels, nclass)) seq.add(Softmax()) train(seq)
def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b1')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b3')) h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b5')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b7')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3)) seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) seq.add(Dropout(0.5)) seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(TFBatchNormalization(name='b9')) h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1)) seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID')) seq.add(Flatten()) seq.add(Softmax()) return seq
def __init__(self, nclass, h, w, c): layers = [] template = TemplateModel(nclass, h, w, c) layers.append(template) layers.append(Flatten()) layers.append(Linear(template.output_dim, 200)) layers.append(RELU()) layers.append(Linear(200, nclass)) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] identityblk = IdentityBlock(input_channels=c, input_shape=[h, w], nlayers=10) layers.append(identityblk) layers.append( Conv2D(input_channels=c, num_filters=16, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 16])) denseblk = DenseBlock(input_channels=16, input_shape=[h, w], growth_rate=4, nlayers=4) layers.append(denseblk) layers.append( Conv2D(input_channels=denseblk.output_channels, num_filters=32, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=32, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def __init__(self, nclass, h, w, c): layers = [] model = UNet(input_channels=c, input_shape=(h, w)) layers.append(model) layers.append( MaxPooling(poolsize=tuple(model.output_shape), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Linear(model.output_channels, nclass)) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def test_ResNetBase(): seq = tg.Sequential() model = ResNetBase(input_channels=c, input_shape=(h, w), config=[1,1,1,1]) print('output channels:', model.output_channels) print('output shape:', model.output_shape) seq.add(model) seq.add(MaxPooling(poolsize=tuple(model.output_shape), stride=(1,1), padding='VALID')) outshape = valid_nd(model.output_shape, kernel_size=model.output_shape, stride=(1,1)) print(outshape) out_dim = model.output_channels seq.add(Flatten()) seq.add(Linear(int(out_dim), nclass)) seq.add(Softmax()) train(seq)
def model(): with tf.name_scope('MnistCNN'): seq = tg.Sequential() seq.add( Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) h, w = same(in_height=28, in_width=28, stride=(1, 1), kernel_size=(3, 3)) seq.add(BatchNormalization(input_shape=[h, w, 32])) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME')) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2)) seq.add(LRN()) seq.add( Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) seq.add(BatchNormalization(input_shape=[h, w, 64])) seq.add(RELU()) seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME')) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2)) seq.add(LRN()) seq.add(Flatten()) seq.add(Linear(int(h * w * 64), 128)) seq.add(BatchNormalization(input_shape=[128])) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(128, 256)) seq.add(BatchNormalization(input_shape=[256])) seq.add(Tanh()) seq.add(Dropout(0.8)) seq.add(Linear(256, 10)) seq.add(Softmax()) return seq
def train_with_VGG(): from tensorgraph.trainobject import train as mytrain config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False, whiten=False) _, h, w, c = X_train.shape _, nclass = y_train.shape print('X max', np.max(X_train)) print('X min', np.min(X_train)) from tensorgraph.layers import VGG19 seq = tg.Sequential() layer = VGG19(input_channels=c, input_shape=(h, w)) seq.add(layer) seq.add(Flatten()) seq.add(Linear(512, nclass)) seq.add(Softmax()) X_ph = tf.placeholder('float32', [None, h, w, c]) y_ph = tf.placeholder('float32', [None, nclass]) y_train_sb = seq.train_fprop(X_ph) y_test_sb = seq.test_fprop(X_ph) train_cost_sb = entropy(y_ph, y_train_sb) optimizer = tf.train.AdamOptimizer(0.001) test_accu_sb = accuracy(y_ph, y_test_sb) mytrain(session=sess, feed_dict={ X_ph: X_train, y_ph: y_train }, train_cost_sb=train_cost_sb, valid_cost_sb=-test_accu_sb, optimizer=optimizer, epoch_look_back=5, max_epoch=100, percent_decrease=0, train_valid_ratio=[5, 1], batchsize=64, randomize_split=False)
def generator(self): self.generator_called = True with self.tf_graph.as_default(): scope = 'Generator' with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) print('h1:{}, w1:{}'.format(h1, w1)) print('h2:{}, w2:{}'.format(h2, w2)) print('h3:{}, w3:{}'.format(h3, w3)) print('flat dim:{}'.format(flat_dim)) self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph]) enc_hn = tg.HiddenNode( prev=[self.gen_real_sn], layers=[ Conv2D(input_channels=self.c, num_filters=32, kernel_size=(5, 5), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/genc1'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/genc2'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/genc3'), RELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), Flatten(), Linear(flat_dim, 300), TFBatchNormalization(name=scope + '/genc4'), RELU(), Linear(300, self.bottleneck_dim), Tanh(), ]) self.noise_sn = tg.StartNode(input_vars=[self.noise_ph]) self.gen_hn = tg.HiddenNode( prev=[self.noise_sn, enc_hn], input_merge_mode=Sum(), layers=[ Linear(self.bottleneck_dim, flat_dim), RELU(), ######[ Method 0 ]###### # Reshape((-1, h3, w3, 32)), # Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2), # kernel_size=(5,5), stride=(2,2), padding='VALID'), ######[ End Method 0 ]###### ######[ Method 1 ]###### Reshape((-1, 1, 1, flat_dim)), # Reshape((-1, h)) Conv2D_Transpose(input_channels=flat_dim, num_filters=200, output_shape=(h3, w3), kernel_size=(h3, w3), stride=(1, 1), padding='VALID'), # BatchNormalization(layer_type='conv', dim=200, short_memory=0.01), TFBatchNormalization(name=scope + '/g1'), RELU(), Conv2D_Transpose(input_channels=200, num_filters=100, output_shape=(h2, w2), kernel_size=(5, 5), stride=(2, 2), padding='VALID'), # BatchNormalization(layer_type='conv', dim=100, short_memory=0.01), ######[ End Method 1 ]###### TFBatchNormalization(name=scope + '/g2'), RELU(), Conv2D_Transpose(input_channels=100, num_filters=50, output_shape=(h1, w1), kernel_size=(5, 5), stride=(2, 2), padding='VALID'), # BatchNormalization(layer_type='conv', dim=50, short_memory=0.01), TFBatchNormalization(name=scope + '/g3'), RELU(), Conv2D_Transpose(input_channels=50, num_filters=self.c, output_shape=(self.h, self.w), kernel_size=(5, 5), stride=(1, 1), padding='VALID'), SetShape((-1, self.h, self.w, self.c)), Sigmoid() ]) h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2)) h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2)) h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1)) y_en = tg.EndNode(prev=[self.gen_hn]) graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[y_en]) G_train_sb = graph.train_fprop()[0] G_test_sb = graph.test_fprop()[0] gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list
def discriminator(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) h1, w1 = valid(self.char_embed_dim, self.word_len, kernel_size=(self.char_embed_dim, 3), stride=(1, 1)) print('h1:{}, w1:{}'.format(h1, w1)) h2, w2 = valid(h1, w1, kernel_size=(1, 3), stride=(1, 1)) print('h2:{}, w2:{}'.format(h2, w2)) h3, w3 = valid(h2, w2, kernel_size=(1, 3), stride=(1, 1)) print('h3:{}, w3:{}'.format(h3, w3)) # h4, w4 = valid(h3, w3, kernel_size=(1,6), stride=(1,1)) # print('h4:{}, w4:{}'.format(h4, w4)) # hf, wf = h4, w4 hf, wf = h3, w3 n_filters = 100 real_sn = tg.StartNode(input_vars=[self.real_ph]) real_hn = tg.HiddenNode(prev=[real_sn], layers=[ OneHot(self.char_embed_dim), Transpose(perm=[0, 3, 2, 1]) ]) disc_hn = tg.HiddenNode( prev=[real_hn, self.gen_hn], layers=[ Conv2D(input_channels=self.sent_len, num_filters=100, kernel_size=(self.char_embed_dim, 3), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d1'), LeakyRELU(), Conv2D(input_channels=100, num_filters=100, kernel_size=(1, 3), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d2'), LeakyRELU(), Conv2D(input_channels=100, num_filters=100, kernel_size=(1, 3), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d3'), LeakyRELU(), # Conv2D(input_channels=32, num_filters=128, kernel_size=(1,6), stride=(1,1), padding='VALID'), # RELU(), Flatten(), Linear(int(hf * wf * n_filters), self.bottleneck_dim), TFBatchNormalization(name=scope + '/d4'), LeakyRELU(), ]) class_hn = tg.HiddenNode(prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, self.nclass), Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def discriminator(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) dis_real_sn = tg.StartNode(input_vars=[self.real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) disc_hn = tg.HiddenNode( prev=[dis_real_sn, self.gen_hn], layers=[ Conv2D(input_channels=self.c, num_filters=32, kernel_size=(5, 5), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/d1'), # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/d2'), # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/d3'), # BatchNormalization(layer_type='conv', dim=32, short_memory=0.01), LeakyRELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), Flatten(), Linear(flat_dim, self.bottleneck_dim), # BatchNormalization(layer_type='fc', dim=self.bottleneck_dim, short_memory=0.01), TFBatchNormalization(name=scope + '/d4'), LeakyRELU(), # Dropout(0.5), ]) class_hn = tg.HiddenNode(prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, self.nclass), Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[Linear(self.bottleneck_dim, 1), Sigmoid()]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[dis_real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def Vanilla_Classifier(X_train, y_train, X_valid, y_valid, restore): batchsize = 100 learning_rate = 0.001 _, h, w, c = X_train.shape _, nclass = y_train.shape g = tf.Graph() with g.as_default(): data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize) data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize) X_ph = tf.placeholder('float32', [None, h, w, c]) # y_ph = tf.placeholder('float32', [None, nclass]) y_phs = [] for comp in [nclass]: y_phs.append(tf.placeholder('float32', [None, comp])) dim = int(h*w*c) scope = 'encoder' start = tg.StartNode(input_vars=[X_ph]) h1_Node = tg.HiddenNode(prev=[start], layers=[Sigmoid(), TFBatchNormalization(name= scope + '/vanilla1'), RELU(), Flatten(), Sigmoid(), TFBatchNormalization(name=scope + '/vanilla2')]) h2_Node = tg.HiddenNode(prev=[h1_Node], layers=[Linear(prev_dim=dim, this_dim=nclass), Softmax()]) end_nodes = [tg.EndNode(prev=[h2_Node])] graph = Graph(start=[start], end=end_nodes) train_outs_sb = graph.train_fprop() test_outs = graph.test_fprop() ttl_mse = [] # import pdb; pdb.set_trace() for y_ph, out in zip(y_phs, train_outs_sb): #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out))) ttl_mse.append(tf.reduce_mean((y_ph-out)**2)) mse = sum(ttl_mse) #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) saver = tf.train.Saver() vardir = './var/2' if not os.path.exists(vardir): os.makedirs(vardir) gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9) tf.set_random_seed(1) init = tf.global_variables_initializer() with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess: # print '=======session start' sess.run(init) if restore == 1: re_saver = tf.train.Saver() re_saver.restore(sess, vardir + "/model.ckpt") print("Model restored.") max_epoch = 100 temp_acc = [] for epoch in range(max_epoch): train_error = 0 train_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_train: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch sess.run(optimizer, feed_dict=feed_dict) train_outs = sess.run(train_outs_sb, feed_dict=feed_dict) train_error += total_mse(train_outs, [ys])[0] train_accuracy += total_accuracy(train_outs, [ys])[0] ttl_examples += len(X_batch) valid_error = 0 valid_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_valid: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch valid_outs = sess.run(test_outs, feed_dict=feed_dict) valid_error += total_mse(valid_outs, [ys])[0] valid_accuracy += total_accuracy(valid_outs, [ys])[0] ttl_examples += len(X_batch) save_path = saver.save(sess, vardir + "/model.ckpt") # print("Model saved in file: %s" % save_path) temp_acc.append(valid_accuracy/float(ttl_examples)) print 'max accuracy is:\t', max(temp_acc)
def Encoder_Classifier(X_train, y_train, X_valid, y_valid, restore): batchsize = 100 learning_rate = 0.001 _, h, w, c = X_train.shape _, nclass = y_train.shape g = tf.Graph() with g.as_default(): data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize) data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize) X_ph = tf.placeholder('float32', [None, h, w, c]) y_phs = [] for comp in [nclass]: y_phs.append(tf.placeholder('float32', [None, comp])) start = tg.StartNode(input_vars=[X_ph]) h1, w1 = valid(h, w, filters=(5,5), strides=(1,1)) h2, w2 = valid(h1, w1, filters=(5,5), strides=(2,2)) h3, w3 = valid(h2, w2, filters=(5,5), strides=(2,2)) flat_dim = int(h3*w3*32) scope = 'encoder' bottleneck_dim = 300 enc_hn = tg.HiddenNode(prev=[start], layers=[Conv2D(input_channels=c, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'), TFBatchNormalization(name=scope + '/genc1'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), TFBatchNormalization(name=scope + '/genc2'), RELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), TFBatchNormalization(name=scope + '/genc3'), RELU(), Flatten(), Linear(flat_dim, 300), TFBatchNormalization(name=scope + '/genc4'), RELU(), Linear(300, bottleneck_dim), Tanh() ]) h2_Node = tg.HiddenNode(prev=[enc_hn], layers=[Linear(prev_dim=bottleneck_dim, this_dim=nclass), Softmax()]) end_nodes = [tg.EndNode(prev=[h2_Node])] graph = Graph(start=[start], end=end_nodes) train_outs_sb = graph.train_fprop() test_outs = graph.test_fprop() ttl_mse = [] # import pdb; pdb.set_trace() for y_ph, out in zip(y_phs, train_outs_sb): #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out))) ttl_mse.append(tf.reduce_mean((y_ph-out)**2)) mse = sum(ttl_mse) #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9) # saver_init = tf.train.Saver() saver = tf.train.Saver() vardir = './var/1' if not os.path.exists(vardir): os.makedirs(vardir) tf.set_random_seed(1) init = tf.global_variables_initializer() with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess: sess.run(init) if restore == 1: re_saver = tf.train.Saver() re_saver.restore(sess, vardir + "/model.ckpt") print("Model restored.") # save_path = saver_init.save(sess, vardir + "/init.ckpt") # print("Model saved in file: %s" % save_path) max_epoch = 2 temp_acc = [] for epoch in range(max_epoch): # print 'epoch:', epoch train_error = 0 train_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_train: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch # import pdb; pdb.set_trace() sess.run(optimizer, feed_dict=feed_dict) train_outs = sess.run(train_outs_sb, feed_dict=feed_dict) train_error += total_mse(train_outs, [ys])[0] train_accuracy += total_accuracy(train_outs, [ys])[0] ttl_examples += len(X_batch) valid_error = 0 valid_accuracy = 0 ttl_examples = 0 for X_batch, ys in data_valid: feed_dict = {X_ph:X_batch} for y_ph, y_batch in zip(y_phs, [ys]): feed_dict[y_ph] = y_batch valid_outs = sess.run(test_outs, feed_dict=feed_dict) valid_error += total_mse(valid_outs, [ys])[0] valid_accuracy += total_accuracy(valid_outs, [ys])[0] ttl_examples += len(X_batch) temp_acc.append(valid_accuracy/float(ttl_examples)) save_path = saver.save(sess, vardir + "/model.ckpt") print("Model saved in file: %s" % save_path) print 'max accuracy is:\t', max(temp_acc)
def __init__(self, nclass, h, w, c): layers = [] layers.append( Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 96])) layers.append( Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) layers.append(BatchNormalization(input_shape=[h, w, 192])) layers.append( Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(Dropout(0.5)) layers.append( Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) layers.append(RELU()) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) layers.append(BatchNormalization(input_shape=[h, w, nclass])) layers.append( AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID')) layers.append(Flatten()) layers.append(Softmax()) self.startnode = tg.StartNode(input_vars=[None]) model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers) self.endnode = tg.EndNode(prev=[model_hn])
def discriminator_allconv(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): # h1, w1 = valid(self.h, self.w, kernel_size=(5,5), stride=(1,1)) # h2, w2 = valid(h1, w1, kernel_size=(5,5), stride=(2,2)) # h3, w3 = valid(h2, w2, kernel_size=(5,5), stride=(2,2)) # flat_dim = int(h3*w3*32) dis_real_sn = tg.StartNode(input_vars=[self.real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) h, w = same(in_height=self.h, in_width=self.w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1)) print('h, w', h, w) print('===============') # h, w = valid(in_height=h, in_width=w, stride=(1,1), kernel_size=(h,w)) disc_hn = tg.HiddenNode( prev=[dis_real_sn, self.gen_hn], layers=[ Dropout(0.2), # TFBatchNormalization(name='b0'), Conv2D(input_channels=self.c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b1'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b2'), Dropout(0.5), Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b3'), # Dropout(0.5), Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b4'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b5'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b6'), Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b7'), # Dropout(0.5), Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), # TFBatchNormalization(name='b8'), Dropout(0.5), Conv2D(input_channels=192, num_filters=self.nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'), LeakyRELU(), TFBatchNormalization(name='b9'), # Dropout(0.5), AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'), Flatten(), ]) print('h,w', h, w) print('==============') class_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, self.nclass), # Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.nclass, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[dis_real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) return self.real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def __init__(self, h, w, c, z_dim=100, gf_dim=64, df_dim=64): self.z_dim = z_dim out_shape2 = same_nd([h, w], kernel_size=(5, 5), stride=(2, 2)) out_shape4 = same_nd(out_shape2, kernel_size=(5, 5), stride=(2, 2)) out_shape8 = same_nd(out_shape4, kernel_size=(5, 5), stride=(2, 2)) out_shape16 = same_nd(out_shape8, kernel_size=(5, 5), stride=(2, 2)) h16, w16 = out_shape16 with tf.variable_scope('Generator'): self.g_layers = [ Linear(z_dim, 8 * gf_dim * h16 * w16), Reshape([-1, h16, w16, 8 * gf_dim]), # TFBatchNormalization(name='gbn1'), BatchNormalization(input_shape=[h16, w16, 8 * gf_dim]), RELU(), Conv2D_Transpose(input_channels=8 * gf_dim, num_filters=4 * gf_dim, output_shape=out_shape8, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), # TFBatchNormalization(name='gbn2'), BatchNormalization(input_shape=out_shape8 + [4 * gf_dim]), RELU(), Conv2D_Transpose(input_channels=4 * gf_dim, num_filters=2 * gf_dim, output_shape=out_shape4, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), # TFBatchNormalization(name='gbn3'), BatchNormalization(input_shape=out_shape4 + [2 * gf_dim]), RELU(), Conv2D_Transpose(input_channels=2 * gf_dim, num_filters=gf_dim, output_shape=out_shape2, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), # TFBatchNormalization(name='gbn4'), BatchNormalization(input_shape=out_shape2 + [gf_dim]), RELU(), Conv2D_Transpose(input_channels=gf_dim, num_filters=c, output_shape=(h, w), kernel_size=(5, 5), stride=(2, 2), padding='SAME'), # Sigmoid() ] out_shape2 = same_nd([h, w], kernel_size=(5, 5), stride=(2, 2)) out_shape4 = same_nd(out_shape2, kernel_size=(5, 5), stride=(2, 2)) out_shape8 = same_nd(out_shape4, kernel_size=(5, 5), stride=(2, 2)) out_shape16 = same_nd(out_shape8, kernel_size=(5, 5), stride=(2, 2)) h16, w16 = out_shape16 with tf.variable_scope('Discriminator'): self.d1_layers = [ Conv2D(input_channels=c, num_filters=df_dim, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), LeakyRELU(), Conv2D(input_channels=df_dim, num_filters=2 * df_dim, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), ] # TFBatchNormalization(name='dbn1'), self.d2_layers = [ BatchNormalization(input_shape=out_shape4 + [2 * df_dim]), LeakyRELU(), Conv2D(input_channels=2 * df_dim, num_filters=4 * df_dim, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), ] self.d3_layers = [ # TFBatchNormalization(name='dbn2'), BatchNormalization(input_shape=out_shape8 + [4 * df_dim]), LeakyRELU(), Conv2D(input_channels=4 * df_dim, num_filters=8 * df_dim, kernel_size=(5, 5), stride=(2, 2), padding='SAME'), ] self.d4_layers = [ # TFBatchNormalization(name='dbn3'), BatchNormalization(input_shape=out_shape16 + [8 * df_dim]), LeakyRELU(), ReduceMax(reduction_indices=[1, 2]), ] self.d5_layers = [ Flatten(), Linear(8 * df_dim, 1), # LeakyRELU(), # Linear(1000, 1) # Sigmoid() ] print('====:', 8 * df_dim)
def classifier(X_ph, X_gen_ph, h, w): with tf.variable_scope('Classifier'): X_sn = tg.StartNode(input_vars=[X_ph]) X_gen_sn = tg.StartNode(input_vars=[X_gen_ph]) h1, w1 = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3)) h2, w2 = same(in_height=h1, in_width=w1, stride=(2, 2), kernel_size=(2, 2)) h3, w3 = same(in_height=h2, in_width=w2, stride=(1, 1), kernel_size=(3, 3)) h4, w4 = same(in_height=h3, in_width=w3, stride=(2, 2), kernel_size=(2, 2)) print('---', h, w) X_hn = tg.HiddenNode(prev=[X_sn], layers=[ Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h1, w1, 32]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), LRN(), Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h3, w3, 64]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), Flatten(), ]) X_gen_hn = tg.HiddenNode( prev=[X_gen_sn], layers=[ Conv2D(input_channels=1, num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h1, w1, 32]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), LRN(), Conv2D(input_channels=32, num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'), BatchNormalization(input_shape=[h3, w3, 64]), RELU(), MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'), Flatten(), ]) print('===', h4 * w4 * 64 * 2) merge_hn = tg.HiddenNode(prev=[X_hn, X_gen_hn], input_merge_mode=Concat(), layers=[ Linear(h4 * w4 * 64 * 2, 100), RELU(), BatchNormalization(input_shape=[100]), Linear(100, 1), Sigmoid() ]) en = tg.EndNode(prev=[merge_hn]) graph = tg.Graph(start=[X_sn, X_gen_sn], end=[en]) y_train, = graph.train_fprop() y_test, = graph.test_fprop() return y_train, y_test
def discriminator(self): if not self.generator_called: raise Exception( 'self.generator() has to be called first before self.discriminator()' ) scope = 'Discriminator' with self.tf_graph.as_default(): with tf.name_scope(scope): h1, w1 = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1)) h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2)) h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2)) flat_dim = int(h3 * w3 * 32) real_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='real') real_sn = tg.StartNode(input_vars=[real_ph]) # fake_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='fake') # fake_sn = tg.StartNode(input_vars=[fake_ph]) disc_hn = tg.HiddenNode( prev=[real_sn, self.gen_hn], layers=[ Conv2D(input_channels=1, num_filters=32, kernel_size=(5, 5), stride=(1, 1), padding='VALID'), TFBatchNormalization(name=scope + '/c1'), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/c2'), LeakyRELU(), Conv2D(input_channels=32, num_filters=32, kernel_size=(5, 5), stride=(2, 2), padding='VALID'), TFBatchNormalization(name=scope + '/c3'), LeakyRELU(), # Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'), # RELU(), Flatten(), Linear(flat_dim, self.bottleneck_dim), TFBatchNormalization(name=scope + '/l1'), LeakyRELU(), # Dropout(0.5), ]) class_hn = tg.HiddenNode(prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, self.nclass), Softmax() ]) judge_hn = tg.HiddenNode( prev=[disc_hn], layers=[ Linear(self.bottleneck_dim, 1), # Sigmoid() ]) real_class_en = tg.EndNode(prev=[class_hn]) real_judge_en = tg.EndNode(prev=[judge_hn]) fake_class_en = tg.EndNode(prev=[class_hn]) fake_judge_en = tg.EndNode(prev=[judge_hn]) graph = tg.Graph(start=[real_sn], end=[real_class_en, real_judge_en]) real_train = graph.train_fprop() real_valid = graph.test_fprop() # dis_var_list = graph.variables # for var in dis_var_list: # print var.name graph = tg.Graph(start=[self.noise_sn, self.y_sn], end=[fake_class_en, fake_judge_en]) fake_train = graph.train_fprop() fake_valid = graph.test_fprop() # print('========') # for var in graph.variables: # print var.name dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) # for var in dis_var_list: # print(var.name) # # print('=========') # for var in tf.global_variables(): # print(var.name) # import pdb; pdb.set_trace() # print() # graph = tg.Graph(start=[G_sn], end=[class_en, judge_en]) # class_train_sb, judge_train_sb = graph.train_fprop() # symbolic outputs # class_test_sb, judge_test_sb = graph.test_fprop() # symbolic outputs return real_ph, real_train, real_valid, fake_train, fake_valid, dis_var_list
def model(nclass, h, w, c): with tf.name_scope('Cifar10AllCNN'): seq = tg.Sequential() seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add( Conv2D(num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(Dropout(0.5)) seq.add( Conv2D(num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME')) seq.add(RELU()) seq.add(BatchNormalization()) seq.add(AvgPooling(poolsize=(8, 8), stride=(1, 1), padding='VALID')) seq.add(Flatten()) seq.add(Softmax()) return seq