def resNet_v1(self): model_params = { "conv1": [5, 5, 64], "rb1_1": [3, 3, 64], "rb1_2": [3, 3, 64], "rb2_1": [3, 3, 128], "rb2_2": [3, 3, 128], "fc3": 10, } with tf.name_scope("resNet_v1"): net = nf.convolution_layer(self.inputs, model_params["conv1"], [1, 2, 2, 1], name="conv1") id_rb1 = tf.nn.max_pool(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') net = nf.convolution_layer(id_rb1, model_params["rb1_1"], [1, 1, 1, 1], name="rb1_1") id_rb2 = nf.convolution_layer(net, model_params["rb1_2"], [1, 1, 1, 1], name="rb1_2") id_rb2 = nf.shortcut(id_rb2, id_rb1, name="rb1") net = nf.convolution_layer(id_rb2, model_params["rb2_1"], [1, 2, 2, 1], padding="SAME", name="rb2_1") id_rb3 = nf.convolution_layer(net, model_params["rb2_2"], [1, 1, 1, 1], name="rb2_2") id_rb3 = nf.shortcut(id_rb3, id_rb2, name="rb2") net = nf.global_avg_pooling(id_rb3, flatten=True) net = tf.layers.dropout(net, rate=self.dropout, training=self.is_training, name='dropout2') logits = nf.fc_layer(net, model_params["fc3"], name="logits", activat_fn=None) return logits
def baseline(self, kwargs={}): init = tf.random_normal_initializer(stddev=0.01) feature_size = 64 model_params = { 'conv1': [11, 11, feature_size * 2], 'conv2': [5, 5, feature_size * 4], 'resblock': [3, 3, feature_size * 4], 'conv3': [3, 3, feature_size * 4], 'fc4': 64 * 64 * 4, 'fc5': 64 * 64 * 2, 'fc_code': 1024, } ### Generator num_resblock = 16 g_input = self.inputs with tf.name_scope("Detector"): # 256x256x1 x = nf.convolution_layer(g_input, model_params["conv1"], [1, 2, 2, 1], name="conv1", activat_fn=tf.nn.relu, initializer=init) conv_1 = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') print("conv_1: %s" % conv_1.get_shape()) x = nf.convolution_layer(conv_1, model_params["conv2"], [1, 2, 2, 1], name="conv2", activat_fn=tf.nn.relu, initializer=init) conv_2 = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') print("conv_2: %s" % conv_2.get_shape()) x = conv_2 # 128x128xfeature_size with tf.variable_scope("detector_resblock", reuse=False): #Add the residual blocks to the model for i in range(num_resblock): x = nf.resBlock(x, feature_size * 4, scale=1, reuse=False, idx=i, initializer=init) x = nf.convolution_layer(x, model_params["conv3"], [1, 1, 1, 1], name="conv3", activat_fn=tf.nn.relu, initializer=init) x += conv_2 print("conv_3: %s" % x.get_shape()) x = nf.convolution_layer(x, model_params["conv3"], [1, 2, 2, 1], name="conv4", activat_fn=tf.nn.relu, initializer=init, flatten=True) print("conv_4: %s" % x.get_shape()) fc_4 = nf.fc_layer(x, model_params["fc4"], name="fc_4", activat_fn=tf.nn.relu) print("fc_4: %s" % fc_4.get_shape()) fc_5 = nf.fc_layer(fc_4, model_params["fc5"], name="fc_5", activat_fn=tf.nn.relu) print("fc_5: %s" % fc_5.get_shape()) fc_code = nf.fc_layer(fc_5, model_params["fc_code"], name="fc_code", activat_fn=None) print("fc_code: %s" % fc_code.get_shape()) return fc_code
def alex_net(self, kwargs={}): init = tf.random_normal_initializer(stddev=0.01) l2_reg = tf.contrib.layers.l2_regularizer(1e-5) model_params = { 'conv1': [11, 11, 96], 'conv2': [5, 5, 256], 'conv3': [3, 3, 384], 'conv4': [3, 3, 384], 'conv5': [3, 3, 256], 'fc6': 8192, 'fc7': 8192, 'fc_code': 4096, # 'fc7': 4096, # 'fc_code': 1024, } with tf.name_scope("Detector"): conv_1 = nf.convolution_layer(self.inputs, model_params["conv1"], [1, 4, 4, 1], name="conv1", activat_fn=tf.nn.relu, initializer=init, reg=l2_reg, padding='VALID') conv_1 = tf.nn.max_pool(conv_1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv_2 = nf.convolution_layer(conv_1, model_params["conv2"], [1, 1, 1, 1], name="conv2", activat_fn=tf.nn.relu, initializer=init, reg=l2_reg, padding='SAME') conv_2 = tf.nn.max_pool(conv_2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv_3 = nf.convolution_layer(conv_2, model_params["conv3"], [1, 1, 1, 1], name="conv3", activat_fn=tf.nn.relu, initializer=init, reg=l2_reg, padding='SAME') conv_4 = nf.convolution_layer(conv_3, model_params["conv4"], [1, 1, 1, 1], name="conv4", activat_fn=tf.nn.relu, initializer=init, reg=l2_reg, padding='SAME') conv_5 = nf.convolution_layer(conv_4, model_params["conv5"], [1, 1, 1, 1], name="conv5", activat_fn=tf.nn.relu, initializer=init, reg=l2_reg, padding='SAME') conv_5 = tf.nn.max_pool(conv_5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv_5 = tf.reshape(conv_5, [-1, int(np.prod(conv_5.get_shape()[1:]))], name="conv5_flatout") fc6 = nf.fc_layer(conv_5, model_params["fc6"], name="fc6", activat_fn=tf.nn.relu, reg=l2_reg) fc7 = nf.fc_layer(fc6, model_params["fc7"], name="fc7", activat_fn=tf.nn.relu, reg=l2_reg) dropout = tf.layers.dropout(fc7, rate=self.dropout, training=self.is_training, name='dropout2') fc_code = nf.fc_layer(dropout, model_params["fc_code"], name="fc_code", activat_fn=None, reg=l2_reg) return fc_code
def googleLeNet_v1(self): model_params = { "conv1": [5, 5, 64], "conv2": [3, 3, 128], "inception_1": { "1x1": 64, "3x3": { "1x1": 96, "3x3": 128 }, "5x5": { "1x1": 16, "5x5": 32 }, "s1x1": 32 }, "inception_2": { "1x1": 128, "3x3": { "1x1": 128, "3x3": 192 }, "5x5": { "1x1": 32, "5x5": 96 }, "s1x1": 64 }, "fc3": 10, } with tf.name_scope("googleLeNet_v1"): net = nf.convolution_layer(self.inputs, model_params["conv1"], [1, 2, 2, 1], name="conv1") net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') net = tf.nn.local_response_normalization( net, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='LocalResponseNormalization') net = nf.convolution_layer(net, model_params["conv2"], [1, 1, 1, 1], name="conv2", flatten=False) net = tf.nn.local_response_normalization( net, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75, name='LocalResponseNormalization') net = nf.inception_v1(net, model_params, name="inception_1", flatten=False) net = nf.inception_v1(net, model_params, name="inception_2", flatten=False) net = tf.nn.avg_pool(net, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID') net = tf.reshape(net, [-1, int(np.prod(net.get_shape()[1:]))]) net = tf.layers.dropout(net, rate=self.dropout, training=self.is_training, name='dropout2') logits = nf.fc_layer(net, model_params["fc3"], name="logits", activat_fn=None) return logits
def attention_network(image_input, layers, channels, is_training): with tf.variable_scope("attention"): att_net = nf.convolution_layer(image_input, [3, 3, 64], [1, 2, 2, 1], name="conv1-1") att_net = nf.convolution_layer(att_net, [3, 3, 64], [1, 1, 1, 1], name="conv1-2") att_net = tf.nn.max_pool(att_net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') att_net = nf.convolution_layer(att_net, [3, 3, 128], [1, 1, 1, 1], name="conv2-1") att_net = nf.convolution_layer(att_net, [3, 3, 128], [1, 1, 1, 1], name="conv2-2") att_net = tf.nn.max_pool(att_net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') att_net = nf.convolution_layer(att_net, [3, 3, 256], [1, 1, 1, 1], name="conv3-1") att_net = nf.convolution_layer(att_net, [3, 3, 256], [1, 1, 1, 1], name="conv3-2") att_net = tf.nn.max_pool(att_net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') att_net = nf.convolution_layer(att_net, [3, 3, 512], [1, 1, 1, 1], name="conv4-1") #att_net = nf.convolution_layer(att_net, [3,3,512], [1,1,1,1],name="conv4-2") att_net = tf.nn.max_pool(att_net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') bsize, a, b, c = att_net.get_shape().as_list() if bsize == None: bsize = -1 att_net = tf.reshape( att_net, [bsize, int(np.prod(att_net.get_shape()[1:]))]) att_net = nf.fc_layer(att_net, 2048, name="fc1") att_net = nf.fc_layer(att_net, 2048, name="fc2") #att_net = tf.layers.dropout(att_net, rate=dropout, training=is_training, name='dropout1') logits = nf.fc_layer(att_net, channels * layers, name="logits", activat_fn=None) bsize = tf.shape(logits)[0] #logits = tf.reshape(logits, (bsize,1,1,channels*layers)) logits = tf.reshape(logits, (bsize, 1, 1, channels, layers)) weighting = tf.nn.softmax(logits) """ max_index = tf.argmax(tf.nn.softmax(logits),4) weighting = tf.one_hot(max_index, depth=layers, on_value=1.0, axis = -1) """ return weighting
def EXAMPLE_CNN(self, kwargs): model_params = { "conv_1": [3, 3, 128], "conv_2": [3, 3, 256], "fc_1": 1024, "fc_2": 512, "fc_out": 10, } reuse = kwargs["reuse"] l2_reg = tf.contrib.layers.l2_regularizer(1e-5) print( "===================================================================" ) with tf.variable_scope("CNN", reuse=reuse): input = kwargs["input"] print("[EXAMPLE_CNN] input: %s" % input.get_shape()) conv_1_1 = nf.convolution_layer(input, model_params["conv_1"], [1, 1, 1, 1], name="conv_1_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) conv_1_2 = nf.convolution_layer(conv_1_1, model_params["conv_1"], [1, 1, 1, 1], name="conv_1_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) conv_1 = conv_1_1 + conv_1_2 conv_1 = tf.nn.max_pool(conv_1, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID') conv_1 = tf.layers.dropout(conv_1, rate=self.dropout, training=self.is_training, name='conv_1_dropout') print("conv_1: %s" % conv_1.get_shape()) conv_2_1 = nf.convolution_layer(conv_1, model_params["conv_2"], [1, 1, 1, 1], name="conv_2_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) conv_2_2 = nf.convolution_layer(conv_2_1, model_params["conv_2"], [1, 1, 1, 1], name="conv_2_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) conv_2 = conv_2_1 + conv_2_2 conv_2 = tf.nn.max_pool(conv_2, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID') conv_2 = tf.layers.dropout(conv_2, rate=self.dropout, training=self.is_training, name='conv_2_dropout') print("conv_2: %s" % conv_2.get_shape()) conv_code = tf.reshape(conv_2, [tf.shape(self.inputs)[0], 7 * 7 * 256]) fc_1 = nf.fc_layer(conv_code, model_params["fc_1"], name="fc_1", activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) fc_1 = tf.layers.dropout(fc_1, rate=self.dropout, training=self.is_training, name='fc_1_dropout') print("fc_1: %s" % fc_1.get_shape()) fc_2 = nf.fc_layer(fc_1, model_params["fc_2"], name="fc_2", activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) fc_2 = tf.layers.dropout(fc_2, rate=self.dropout, training=self.is_training, name='fc_2_dropout') print("fc_2: %s" % fc_2.get_shape()) fc_out = nf.fc_layer(fc_2, model_params["fc_out"], name="fc_out", activat_fn=None) print("fc_out: %s" % fc_out.get_shape()) return fc_out
def ResNet10(self, kwargs): model_params = { "conv1": [7, 7, 64], "conv2_1": [3, 3, 64], "conv2_2": [3, 3, 64], "conv3_1": [3, 3, 128], "conv3_2": [3, 3, 128], "conv3_sc": [1, 1, 128], "conv4_1": [3, 3, 256], "conv4_2": [3, 3, 256], "conv4_sc": [1, 1, 256], "conv5_1": [3, 3, 512], "conv5_2": [3, 3, 512], "conv5_sc": [1, 1, 512], "convM_1": [3, 3, 512], "convM_2": [3, 3, 512], "fcM_3": 8, "fcM_4": 2, } reuse = kwargs["reuse"] l2_reg = tf.contrib.layers.l2_regularizer(1e-5) print( "===================================================================" ) with tf.variable_scope("ResNet10", reuse=reuse): input = kwargs["input"] print("[ResNet-10] input: %s" % input.get_shape()) # conv 1 x = nf.convolution_layer(input, model_params["conv1"], [1, 2, 2, 1], name="conv1", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training) x = tf.nn.max_pool(x, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME', name='max_1') print("conv1: %s" % x.get_shape()) conv1 = x # conv 2 sc_2 = x x = nf.convolution_layer(x, model_params["conv2_1"], [1, 1, 1, 1], name="conv2_1", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv2_2"], [1, 1, 1, 1], name="conv2_2", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = tf.add(x, sc_2) x = tf.nn.relu(x, name="conv2_2" + "_out") print("[residual_simple_block] conv2: %s" % x.get_shape()) # conv 3 sc_3 = nf.convolution_layer(x, model_params["conv3_sc"], [1, 2, 2, 1], name="conv3_sc", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv3_1"], [1, 2, 2, 1], name="conv3_1", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv3_2"], [1, 1, 1, 1], name="conv3_2", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = tf.add(x, sc_3) x = tf.nn.relu(x, name="conv3_2" + "_out") print("[residual_simple_block] conv3: %s" % x.get_shape()) # conv 4 sc_4 = nf.convolution_layer(x, model_params["conv4_sc"], [1, 2, 2, 1], name="conv4_sc", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv4_1"], [1, 2, 2, 1], name="conv4_1", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv4_2"], [1, 1, 1, 1], name="conv4_2", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = tf.add(x, sc_4) x = tf.nn.relu(x, name="conv4_2" + "_out") print("[residual_simple_block] conv4: %s" % x.get_shape()) # conv 5 sc_5 = nf.convolution_layer(x, model_params["conv5_sc"], [1, 2, 2, 1], name="conv5_sc", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv5_1"], [1, 2, 2, 1], name="conv5_1", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training) x = nf.convolution_layer(x, model_params["conv5_2"], [1, 1, 1, 1], name="conv5_2", padding='SAME', activat_fn=None, is_bn=True, is_training=self.is_training) x = tf.add(x, sc_5) x = tf.nn.relu(x, name="conv5_2" + "_out") print("[residual_simple_block] conv5: %s" % x.get_shape()) # module conv 1 x = nf.convolution_layer(x, model_params["convM_1"], [1, 1, 1, 1], name="convM_1", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training, reg=l2_reg) x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID', name='max_1') print("[relation_module] convM_1: %s" % x.get_shape()) # module conv 2 x = nf.convolution_layer(x, model_params["convM_2"], [1, 1, 1, 1], name="convM_2", padding='SAME', activat_fn=tf.nn.relu, is_bn=True, is_training=self.is_training, reg=l2_reg) x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID', name='max_2') print("[relation_module] convM_2: %s" % x.get_shape()) # module fc 3 x = tf.reshape(x, [tf.shape(input)[0], 3 * 1 * 512]) x = nf.fc_layer(x, model_params["fcM_3"], name="fcM_3", activat_fn=tf.nn.relu, is_bn=False, is_training=self.is_training, reg=l2_reg) print("[relation_module] fcM_3: %s" % x.get_shape()) # module fc 4 x = nf.fc_layer(x, model_params["fcM_4"], name="fcM_4", activat_fn=None, is_bn=False, is_training=self.is_training, reg=l2_reg) print("[relation_module] fcM_4: %s" % x.get_shape()) return x, conv1
def CNN_1st_v1(self, kwargs): model_params = { "conv_1": [11, 11, 128], "conv_2": [5, 5, 256], "conv_3": [3, 3, 512], "conv_4": [3, 3, 1024], "conv_5": [3, 3, 1024], "fc_1": 1024, "fc_2": 512, "fc_out": 2, } reuse = kwargs["reuse"] l2_reg = tf.contrib.layers.l2_regularizer(1e-5) print( "===================================================================" ) with tf.variable_scope("CNN", reuse=reuse): input = kwargs["input"] print("[CNN_1st_v1] input: %s" % input.get_shape()) conv_1_1 = nf.convolution_layer(input, model_params["conv_1"], [1, 4, 4, 1], name="conv_1_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_1_2 = nf.convolution_layer(conv_1_1, model_params["conv_1"], [1,1,1,1], name="conv_1_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_1 = conv_1_1 + conv_1_2 conv_1 = tf.nn.max_pool(conv_1_1, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID') conv_1 = tf.layers.dropout(conv_1, rate=self.dropout, training=self.is_training, name='conv_1_dropout') print("conv_1: %s" % conv_1.get_shape()) conv_2_1 = nf.convolution_layer(conv_1, model_params["conv_2"], [1, 2, 2, 1], name="conv_2_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_2_2 = nf.convolution_layer(conv_2_1, model_params["conv_2"], [1,1,1,1], name="conv_2_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_2 = conv_2_1 + conv_2_2 conv_2 = tf.nn.max_pool(conv_2_1, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID') conv_2 = tf.layers.dropout(conv_2, rate=self.dropout, training=self.is_training, name='conv_2_dropout') print("conv_2: %s" % conv_2.get_shape()) conv_3_1 = nf.convolution_layer(conv_2, model_params["conv_3"], [1, 1, 1, 1], name="conv_3_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_3_2 = nf.convolution_layer(conv_3_1, model_params["conv_3"], [1,1,1,1], name="conv_3_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_3 = conv_3_1 + conv_3_2 conv_3 = tf.layers.dropout(conv_3_1, rate=self.dropout, training=self.is_training, name='conv_3_dropout') print("conv_3: %s" % conv_3.get_shape()) #conv_4_1 = nf.convolution_layer(conv_3, model_params["conv_4"], [1,1,1,1], name="conv_4_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_4_2 = nf.convolution_layer(conv_4_1, model_params["conv_4"], [1,1,1,1], name="conv_4_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_4 = conv_4_1 + conv_4_2 #conv_4 = tf.layers.dropout(conv_4, rate=self.dropout, training=self.is_training, name='conv_4_dropout') #print("conv_4: %s" % conv_4.get_shape()) conv_5_1 = nf.convolution_layer(conv_3, model_params["conv_5"], [1, 1, 1, 1], name="conv_5_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_5_2 = nf.convolution_layer(conv_5_1, model_params["conv_5"], [1,1,1,1], name="conv_5_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) #conv_5 = conv_5_1 + conv_5_2 conv_5 = tf.nn.max_pool(conv_5_1, [1, 3, 3, 1], [1, 2, 2, 1], padding='VALID') conv_5 = tf.layers.dropout(conv_5, rate=self.dropout, training=self.is_training, name='conv_5_dropout') print("conv_5: %s" % conv_5.get_shape()) conv_code = tf.reshape(conv_5, [tf.shape(self.inputs)[0], 4 * 2 * 1024]) fc_1 = nf.fc_layer(conv_code, model_params["fc_1"], name="fc_1", activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) fc_1 = tf.layers.dropout(fc_1, rate=self.dropout, training=self.is_training, name='fc_1_dropout') print("fc_1: %s" % fc_1.get_shape()) fc_2 = nf.fc_layer(fc_1, model_params["fc_2"], name="fc_2", activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg) fc_2 = tf.layers.dropout(fc_2, rate=self.dropout, training=self.is_training, name='fc_2_dropout') print("fc_2: %s" % fc_2.get_shape()) fc_out = nf.fc_layer(fc_2, model_params["fc_out"], name="fc_out", activat_fn=None) print("fc_out: %s" % fc_out.get_shape()) return fc_out, conv_1