def _stage_conv(self, data_ph, last_stage, stage_idx, model_params): input_ph = data_ph.get_input() leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] with tf.variable_scope("stage%d" % stage_idx): concat = tf.concat(3, [input_ph, last_stage]) conv1 = mf.add_leaky_relu( mf.convolution_2d_layer(concat, [3, 3, 4, 64], [1, 1], "SAME", wd, "conv1"), leaky_param) conv2 = mf.add_leaky_relu( mf.convolution_2d_layer(conv1, [3, 3, 64, 64], [1, 1], "SAME", wd, "conv2"), leaky_param) conv3 = mf.add_leaky_relu( mf.convolution_2d_layer(conv2, [1, 1, 64, 1], [1, 1], "SAME", wd, "conv3"), leaky_param) return conv3
def _create_network(self, input_ph, keep_prob): current = input_ph v_idx = 0 # Index variable. # Last block is the classification layer. for b_idx in xrange(len(dilations) - 1): for l_idx, dilation in enumerate(dilations[b_idx]): w = self.variables[v_idx * 2] b = self.variables[v_idx * 2 + 1] if dilation == 1: conv = tf.nn.conv2d(current, w, strides=[1, 1, 1, 1], padding='SAME') else: conv = tf.nn.atrous_conv2d(current, w, dilation, padding='SAME') current = tf.nn.relu(tf.nn.bias_add(conv, b)) v_idx += 1 # Optional pooling and dropout after each block. if b_idx < 3: current = tf.nn.max_pool(current, ksize=[1, ks, ks, 1], strides=[1, 2, 2, 1], padding='SAME') elif b_idx == 3: current = tf.nn.max_pool(current, ksize=[1, ks, ks, 1], strides=[1, 1, 1, 1], padding='SAME') elif b_idx == 4: current = tf.nn.max_pool(current, ksize=[1, ks, ks, 1], strides=[1, 1, 1, 1], padding='SAME') current = tf.nn.avg_pool(current, ksize=[1, ks, ks, 1], strides=[1, 1, 1, 1], padding='SAME') elif b_idx <= 6: current = tf.nn.dropout(current, keep_prob=keep_prob) # TODO: change to a regression layer, shape now (batch, w/8, h/8, 1024) # Up-sample current = tf.image.resize_bilinear(current, tf.shape(input_ph)[1:3]) current = mf.convolution_2d_layer(current, [1, 1, 1024, 1], [1, 1], "SAME", wd=self.model_params["weight_decay"], layer_name="Conv2D_24") return current
def _single_hydra_cnn(self, input_ph, model_params, stage): leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] batch_size = model_params["batch_size"] with tf.variable_scope("stage_%d" % stage): conv1 = mf.add_leaky_relu( mf.convolution_2d_layer(input_ph, [7, 7, 3, 32], [1, 1], "SAME", wd, "conv1"), leaky_param) conv1_maxpool = mf.maxpool_2d_layer(conv1, [2, 2], [2, 2], "maxpool1") conv2 = mf.add_leaky_relu( mf.convolution_2d_layer(conv1_maxpool, [7, 7, 32, 32], [1, 1], "SAME", wd, "conv2"), leaky_param) conv2_maxpool = mf.maxpool_2d_layer(conv2, [2, 2], [2, 2], "maxpool2") conv3 = mf.add_leaky_relu( mf.convolution_2d_layer(conv2_maxpool, [3, 3, 32, 32], [1, 1], "SAME", wd, "conv3"), leaky_param) conv4 = mf.add_leaky_relu( mf.convolution_2d_layer(conv3, [1, 1, 32, 1000], [1, 1], "SAME", wd, "conv4"), leaky_param) conv5 = mf.add_leaky_relu( mf.convolution_2d_layer(conv4, [1, 1, 1000, 400], [1, 1], "SAME", wd, "conv5"), leaky_param) reshape_fc = tf.reshape(conv5, [batch_size, -1]) return reshape_fc
def _model_infer_cnn_single(self, input_ph, model_params): leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] hyper_list = list() print(input_ph) conv11 = mf.add_leaky_relu( mf.convolution_2d_layer(input_ph, [3, 3, 3, 64], [1, 1], "SAME", wd, "conv1_1"), leaky_param) conv12 = mf.add_leaky_relu( mf.convolution_2d_layer(conv11, [3, 3, 64, 64], [1, 1], "SAME", wd, "conv1_2"), leaky_param) conv12_maxpool = mf.maxpool_2d_layer(conv12, [3, 3], [2, 2], "maxpool1") print(conv12_maxpool) conv21 = mf.add_leaky_relu( mf.convolution_2d_layer(conv12_maxpool, [3, 3, 64, 128], [1, 1], "SAME", wd, "conv2_1"), leaky_param) conv22 = mf.add_leaky_relu( mf.convolution_2d_layer(conv21, [3, 3, 128, 128], [1, 1], "SAME", wd, "conv2_2"), leaky_param) conv22_maxpool = mf.maxpool_2d_layer(conv22, [3, 3], [2, 2], "maxpool2") print(conv22_maxpool) hyper_list.append(conv22_maxpool) conv31 = mf.add_leaky_relu( mf.convolution_2d_layer(conv22_maxpool, [3, 3, 128, 256], [1, 1], "SAME", wd, "conv3_1"), leaky_param) conv32 = mf.add_leaky_relu( mf.convolution_2d_layer(conv31, [3, 3, 256, 256], [1, 1], "SAME", wd, "conv3_2"), leaky_param) atrous3 = mf.add_leaky_relu( mf.atrous_convolution_layer(conv32, [3, 3, 256, 256], 2, "SAME", wd, "atrous3"), leaky_param) print(atrous3) hyper_list.append(atrous3) conv41 = mf.add_leaky_relu( mf.convolution_2d_layer(atrous3, [3, 3, 256, 512], [1, 1], "SAME", wd, "conv4_1"), leaky_param) conv42 = mf.add_leaky_relu( mf.convolution_2d_layer(conv41, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv4_2"), leaky_param) atrous4 = mf.add_leaky_relu( mf.atrous_convolution_layer(conv42, [3, 3, 512, 512], 2, "SAME", wd, "atrous4"), leaky_param) print(atrous4) hyper_list.append(atrous4) atrous51 = mf.add_leaky_relu( mf.atrous_convolution_layer(atrous4, [3, 3, 512, 512], 2, "SAME", wd, "atrous5_1"), leaky_param) atrous52 = mf.add_leaky_relu( mf.atrous_convolution_layer(atrous51, [3, 3, 512, 512], 2, "SAME", wd, "atrous5_2"), leaky_param) print(atrous52) hyper_list.append(atrous52) hypercolumn = self._pack_tensor_list(hyper_list) print(hypercolumn) [b, w, h, c] = hypercolumn.get_shape().as_list() conv6 = mf.add_leaky_relu( mf.convolution_2d_layer(hypercolumn, [1, 1, c, 512], [1, 1], "SAME", wd, "conv6"), leaky_param) deconv1 = self._deconv2_wrapper(conv6, conv21, 256, wd, "deconv1") print(deconv1) deconv2 = self._deconv2_wrapper(deconv1, conv11, 64, wd, "deconv2") print(deconv2) conv7 = mf.add_leaky_relu( mf.convolution_2d_layer(deconv2, [1, 1, 64, 1], [1, 1], "SAME", wd, "conv7"), leaky_param) print(conv7) predict_list = list() predict_list.append(conv7) b = tf.shape(hypercolumn)[0] [bb, hh, ww, cc] = conv7.get_shape().as_list() dims = hh * ww * cc fc = tf.reshape(conv7, [b, dims], "vectorize") return predict_list, fc
def model_infer(self, data_ph, model_params): input_ph = data_ph.get_input() leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] hyper_list = list() print(input_ph) conv11 = mf.add_leaky_relu( mf.convolution_2d_layer(input_ph, [3, 3, 3, 64], [1, 1], "SAME", wd, "conv1_1"), leaky_param) conv12 = mf.add_leaky_relu( mf.convolution_2d_layer(conv11, [3, 3, 64, 64], [1, 1], "SAME", wd, "conv1_2"), leaky_param) conv12_maxpool = mf.maxpool_2d_layer(conv12, [3, 3], [2, 2], "maxpool1") print(conv12_maxpool) #hyper_list.append(conv12_maxpool) conv21 = mf.add_leaky_relu( mf.convolution_2d_layer(conv12_maxpool, [3, 3, 64, 128], [1, 1], "SAME", wd, "conv2_1"), leaky_param) conv22 = mf.add_leaky_relu( mf.convolution_2d_layer(conv21, [3, 3, 128, 128], [1, 1], "SAME", wd, "conv2_2"), leaky_param) conv22_maxpool = mf.maxpool_2d_layer(conv22, [3, 3], [2, 2], "maxpool2") print(conv22_maxpool) hyper_list.append(conv22_maxpool) conv31 = mf.add_leaky_relu( mf.convolution_2d_layer(conv22_maxpool, [3, 3, 128, 256], [1, 1], "SAME", wd, "conv3_1"), leaky_param) conv32 = mf.add_leaky_relu( mf.convolution_2d_layer(conv31, [3, 3, 256, 256], [1, 1], "SAME", wd, "conv3_2"), leaky_param) #atrous3 = mf.add_leaky_relu(mf.atrous_convolution_layer( # conv32, [3, 3, 256, 256], 2, # "SAME", wd, "atrous3"), leaky_param) #conv33 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv32, [3, 3, 256, 256], [1, 1], # "SAME", wd, "conv3_3"), leaky_param) #conv33_maxpool = mf.maxpool_2d_layer(conv33, [3, 3], # [2, 2], "maxpool3") #print(atrous3) hyper_list.append(conv32) conv41 = mf.add_leaky_relu( mf.convolution_2d_layer(conv32, [3, 3, 256, 512], [1, 1], "SAME", wd, "conv4_1"), leaky_param) conv42 = mf.add_leaky_relu( mf.convolution_2d_layer(conv41, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv4_2"), leaky_param) #atrous4 = mf.add_leaky_relu(mf.atrous_convolution_layer( # conv42, [3, 3, 512, 512], 2, # "SAME", wd, "atrous4"), leaky_param) #print(atrous4) hyper_list.append(conv42) #atrous51 = mf.add_leaky_relu(mf.atrous_convolution_layer( # atrous4, [3, 3, 512, 512], 2, # "SAME", wd, "atrous5_1"), leaky_param) #atrous52 = mf.add_leaky_relu(mf.atrous_convolution_layer( # atrous51, [3, 3, 512, 512], 2, # "SAME", wd, "atrous5_2"), leaky_param) #print(atrous52) #hyper_list.append(atrous52) #hyper_list.append(atrous52) hypercolumn = self._pack_tensor_list(hyper_list) print(hypercolumn) [b, w, h, c] = hypercolumn.get_shape().as_list() conv6 = mf.add_leaky_relu( mf.convolution_2d_layer(hypercolumn, [1, 1, c, 512], [1, 1], "SAME", wd, "conv6"), leaky_param) deconv1 = self._deconv2_wrapper(conv6, conv21, 256, wd, "deconv1") print(deconv1) deconv2 = self._deconv2_wrapper(deconv1, conv11, 64, wd, "deconv2") print(deconv2) # Add domain classifier if model_params['use_da']: da_conv1 = mf.add_leaky_relu( mf.convolution_2d_layer(deconv2, [1, 1, 64, 64], [1, 1], "SAME", wd, "da_conv1"), leaky_param) da_cls = mf.add_leaky_relu( mf.convolution_2d_layer(da_conv1, [1, 1, 64, 2], [1, 1], "SAME", wd, "da_cls"), leaky_param) self.da_cls = da_cls #deconv1 = mf.deconvolution_2d_layer(conv6, [3, 3, 256, 512], # [2, 2], [b, 111, 111, 256], 'VALID', wd, 'deconv1') #print(deconv1) #deconv2 = mf.deconvolution_2d_layer(deconv1, [3, 3, 64, 256], # [2, 2], [b, 224, 224, 64], 'VALID', wd, 'deconv2') #print(deconv2) conv7 = mf.add_leaky_relu( mf.convolution_2d_layer(deconv2, [1, 1, 64, 1], [1, 1], "SAME", wd, "conv7"), leaky_param) print(conv7) #tf.add_to_collection("image_to_write", data_ph.get_input()) #tf.add_to_collection("image_to_write", data_ph.get_label()) #tf.add_to_collection("image_to_write", data_ph.get_mask()) #tf.add_to_collection("image_to_write", conv7) with tf.variable_scope("image_sum"): self._add_image_sum(data_ph.get_input(), data_ph.get_label(), conv7, data_ph.get_mask()) self.predict_list = list() self.predict_list.append(conv7)
def model_infer(self, data_ph, model_params): input_ph = data_ph.get_input() leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] hyper_list = list() print(input_ph) conv11 = mf.add_leaky_relu(mf.convolution_2d_layer( input_ph, [3, 3, 3, 64], [1, 1], "SAME", wd, "conv1_1"), leaky_param) atrous1 = mf.add_leaky_relu(mf.atrous_convolution_layer( conv11, [3, 3, 64, 64], 2, "SAME", wd, "atrous1"), leaky_param) print(atrous1) hyper_list.append(atrous1) conv21 = mf.add_leaky_relu(mf.convolution_2d_layer( atrous1, [3, 3, 64, 128], [1, 1], "SAME", wd, "conv2_1"), leaky_param) atrous2 = mf.add_leaky_relu(mf.atrous_convolution_layer( conv21, [3, 3, 128, 128], 2, "SAME", wd, "atrous2"), leaky_param) print(atrous2) hyper_list.append(atrous2) conv31 = mf.add_leaky_relu(mf.convolution_2d_layer( atrous2, [3, 3, 128, 256], [1, 1], "SAME", wd, "conv3_1"), leaky_param) conv32 = mf.add_leaky_relu(mf.convolution_2d_layer( conv31, [3, 3, 256, 256], [1, 1], "SAME", wd, "conv3_2"), leaky_param) atrous3 = mf.add_leaky_relu(mf.atrous_convolution_layer( conv32, [3, 3, 256, 256], 2, "SAME", wd, "atrous3"), leaky_param) print(atrous3) hyper_list.append(atrous3) conv41 = mf.add_leaky_relu(mf.convolution_2d_layer( atrous3, [3, 3, 256, 512], [1, 1], "SAME", wd, "conv4_1"), leaky_param) conv42 = mf.add_leaky_relu(mf.convolution_2d_layer( conv41, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv4_2"), leaky_param) atrous4 = mf.add_leaky_relu(mf.atrous_convolution_layer( conv42, [3, 3, 512, 512], 2, "SAME", wd, "atrous4"), leaky_param) print(atrous4) hyper_list.append(atrous4) conv51 = mf.add_leaky_relu(mf.convolution_2d_layer( atrous4, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv5_1"), leaky_param) conv52 = mf.add_leaky_relu(mf.convolution_2d_layer( conv51, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv5_2"), leaky_param) atrous5 = mf.add_leaky_relu(mf.atrous_convolution_layer( conv52, [3, 3, 512, 512], 2, "SAME", wd, "atrous5"), leaky_param) print(atrous5) hyper_list.append(atrous5) hypercolumn = self.pack_tensor_list(hyper_list) c_dimension = hypercolumn.get_shape().as_list()[3] conv6 = mf.add_leaky_relu(mf.convolution_2d_layer( hypercolumn, [1, 1, c_dimension, 1], [1, 1], "SAME", wd, "conv6"), leaky_param) tf.add_to_collection("image_to_write", data_ph.get_input()) tf.add_to_collection("image_to_write", data_ph.get_label()) tf.add_to_collection("image_to_write", data_ph.get_mask()) tf.add_to_collection("image_to_write", conv6) self.predict_list = list() self.predict_list.append(conv6)
def model_infer(self, data_ph, model_params): input_ph = data_ph.get_input() lp = model_params["leaky_param"] wd = model_params["weight_decay"] # Disable the batch normalization bn = False # Once bn is disabled, there is no differece between train and test is_train = False hyper_list = list() print(input_ph) conv11 = mf.convolution_2d_layer(input_ph, 64, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv1_1") # conv11 = mf.add_leaky_relu(mf.convolution_2d_layer( # input_ph, [3, 3, 3, 64], [1, 1], # "SAME", wd, "conv1_1"), leaky_param) conv12 = mf.convolution_2d_layer(conv11, 64, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv1_2") # conv12 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv11, [3, 3, 64, 64], [1, 1], # "SAME", wd, "conv1_2"), leaky_param) conv12_maxpool = mf.maxpool_2d_layer(conv12, [3, 3], [2, 2], "NHWC", "maxpool1") print(conv12_maxpool) #hyper_list.append(conv12_maxpool) conv21 = mf.convolution_2d_layer(conv12_maxpool, 128, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv2_1") # conv21 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv12_maxpool, [3, 3, 64, 128], [1, 1], # "SAME", wd, "conv2_1"), leaky_param) conv22 = mf.convolution_2d_layer(conv21, 128, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv2_2") # conv22 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv21, [3, 3, 128, 128], [1, 1], # "SAME", wd, "conv2_2"), leaky_param) conv22_maxpool = mf.maxpool_2d_layer(conv22, [3, 3], [2, 2], "NHWC", "maxpool2") print(conv22_maxpool) hyper_list.append(conv22_maxpool) conv31 = mf.convolution_2d_layer(conv22_maxpool, 256, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv3_1") # conv31 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv22_maxpool, [3, 3, 128, 256], [1, 1], # "SAME", wd, "conv3_1"), leaky_param) conv32 = mf.convolution_2d_layer(conv31, 256, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv3_2") # conv32 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv31, [3, 3, 256, 256], [1, 1], # "SAME", wd, "conv3_2"), leaky_param) atrous3 = mf.atrous_convolution_layer(conv32, 256, [3, 3], 2, "SAME", "NHWC", bn, is_train, lp, wd, "atrous3") # atrous3 = mf.add_leaky_relu(mf.atrous_convolution_layer( # conv32, [3, 3, 256, 256], 2, # "SAME", wd, "atrous3"), leaky_param) #conv33 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv32, [3, 3, 256, 256], [1, 1], # "SAME", wd, "conv3_3"), leaky_param) #conv33_maxpool = mf.maxpool_2d_layer(conv33, [3, 3], # [2, 2], "maxpool3") print(atrous3) hyper_list.append(atrous3) conv41 = mf.convolution_2d_layer(atrous3, 512, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv4_1") conv42 = mf.convolution_2d_layer(conv41, 512, [3, 3], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv4_2") atrous4 = mf.atrous_convolution_layer(conv42, 512, [3, 3], 2, "SAME", "NHWC", bn, is_train, lp, wd, "atrous4") print(atrous4) hyper_list.append(atrous4) atrous51 = mf.atrous_convolution_layer(atrous4, 512, [3, 3], 2, "SAME", "NHWC", bn, is_train, lp, wd, "atrous5_1") atrous52 = mf.atrous_convolution_layer(atrous51, 512, [3, 3], 2, "SAME", "NHWC", bn, is_train, lp, wd, "atrous5_2") print(atrous52) #hyper_list.append(atrous52) hyper_list.append(atrous52) hypercolumn = self._pack_tensor_list(hyper_list) print(hypercolumn) [b, w, h, c] = hypercolumn.get_shape().as_list() conv6 = mf.convolution_2d_layer(hypercolumn, 512, [1, 1], [1, 1], "SAME", "NHWC", bn, is_train, lp, wd, "conv6") deconv1 = self._deconv2_wrapper(conv6, conv21, 256, wd, lp, "deconv1") print(deconv1) deconv2 = self._deconv2_wrapper(deconv1, conv11, 64, wd, lp, "deconv2") print(deconv2) conv7 = mf.convolution_2d_layer(deconv2, 1, [1, 1], [1, 1], "SAME", "NHWC", False, is_train, lp, wd, "conv7") print(conv7) #tf.add_to_collection("image_to_write", data_ph.get_input()) #tf.add_to_collection("image_to_write", data_ph.get_label()) #tf.add_to_collection("image_to_write", data_ph.get_mask()) #tf.add_to_collection("image_to_write", conv7) with tf.variable_scope("image_sum"): self._add_image_sum(data_ph.get_input(), data_ph.get_label(), conv7, data_ph.get_mask()) self.predict_list = list() self.predict_list.append(conv7)
def model_infer(self, data_ph, model_params): input_ph = data_ph.get_input() leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] hyper_list = list() deconv_list = list() print(input_ph) conv11 = mf.add_leaky_relu( mf.convolution_2d_layer(input_ph, [3, 3, 3, 64], [1, 1], "SAME", wd, "conv1_1"), leaky_param) conv12 = mf.add_leaky_relu( mf.convolution_2d_layer(conv11, [3, 3, 64, 64], [1, 1], "SAME", wd, "conv1_2"), leaky_param) conv12_maxpool = mf.maxpool_2d_layer(conv12, [2, 2], [2, 2], "maxpool1") print(conv12_maxpool) hyper_list.append(conv12_maxpool) conv21 = mf.add_leaky_relu( mf.convolution_2d_layer(conv12_maxpool, [3, 3, 64, 128], [1, 1], "SAME", wd, "conv2_1"), leaky_param) conv22 = mf.add_leaky_relu( mf.convolution_2d_layer(conv21, [3, 3, 128, 128], [1, 1], "SAME", wd, "conv2_2"), leaky_param) conv22_maxpool = mf.maxpool_2d_layer(conv22, [2, 2], [2, 2], "maxpool2") print(conv22_maxpool) hyper_list.append(conv22_maxpool) conv31 = mf.add_leaky_relu( mf.convolution_2d_layer(conv22_maxpool, [3, 3, 128, 256], [1, 1], "SAME", wd, "conv3_1"), leaky_param) conv32 = mf.add_leaky_relu( mf.convolution_2d_layer(conv31, [3, 3, 256, 256], [1, 1], "SAME", wd, "conv3_2"), leaky_param) conv33 = mf.add_leaky_relu( mf.convolution_2d_layer(conv32, [3, 3, 256, 256], [1, 1], "SAME", wd, "conv3_3"), leaky_param) conv33_maxpool = mf.maxpool_2d_layer(conv33, [2, 2], [2, 2], "maxpool3") print(conv33_maxpool) hyper_list.append(conv33_maxpool) conv41 = mf.add_leaky_relu( mf.convolution_2d_layer(conv33_maxpool, [3, 3, 256, 512], [1, 1], "SAME", wd, "conv4_1"), leaky_param) conv42 = mf.add_leaky_relu( mf.convolution_2d_layer(conv41, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv4_2"), leaky_param) conv43 = mf.add_leaky_relu( mf.convolution_2d_layer(conv42, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv4_3"), leaky_param) conv43_maxpool = mf.maxpool_2d_layer(conv43, [2, 2], [2, 2], "maxpool4") print(conv43_maxpool) hyper_list.append(conv43_maxpool) conv51 = mf.add_leaky_relu( mf.convolution_2d_layer(conv43_maxpool, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv5_1"), leaky_param) conv52 = mf.add_leaky_relu( mf.convolution_2d_layer(conv51, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv5_2"), leaky_param) conv53 = mf.add_leaky_relu( mf.convolution_2d_layer(conv52, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv5_3"), leaky_param) conv53_maxpool = mf.maxpool_2d_layer(conv53, [2, 2], [2, 2], "maxpool5") conv53_maxpool = conv53_maxpool print(conv53_maxpool) hyper_list.append(conv53_maxpool) concat1 = self.pack_tensor_list(hyper_list) # hypercolumn feature deconv1 = self.resize_deconv(concat1, [224, 224], 3, wd, 'deconv1') deconv_list.append(deconv1) concat2 = self.pack_tensor_list([concat1, deconv1]) deconv2 = self.resize_deconv(concat2, [224, 224], 3, wd, 'deconv2') deconv_list.append(deconv2) concat3 = self.pack_tensor_list([concat1, deconv2]) deconv3 = self.resize_deconv(concat3, [224, 224], 3, wd, 'deconv3') deconv_list.append(deconv3) self.deconv_list = deconv_list
def model_infer(self, data_ph, model_params): input_ph = data_ph.get_input() leaky_param = model_params["leaky_param"] wd = model_params["weight_decay"] hyper_list = list() print(input_ph) conv11 = mf.add_leaky_relu( mf.convolution_2d_layer(input_ph, [3, 3, 3, 64], [1, 1], "SAME", wd, "conv1_1"), leaky_param) conv12 = mf.add_leaky_relu( mf.convolution_2d_layer(conv11, [3, 3, 64, 64], [1, 1], "SAME", wd, "conv1_2"), leaky_param) conv12_maxpool = mf.maxpool_2d_layer(conv12, [3, 3], [2, 2], "maxpool1") print(conv12_maxpool) #hyper_list.append(conv12_maxpool) conv21 = mf.add_leaky_relu( mf.convolution_2d_layer(conv12_maxpool, [3, 3, 64, 128], [1, 1], "SAME", wd, "conv2_1"), leaky_param) conv22 = mf.add_leaky_relu( mf.convolution_2d_layer(conv21, [3, 3, 128, 128], [1, 1], "SAME", wd, "conv2_2"), leaky_param) conv22_maxpool = mf.maxpool_2d_layer(conv22, [3, 3], [2, 2], "maxpool2") print(conv22_maxpool) hyper_list.append(conv22_maxpool) conv31 = mf.add_leaky_relu( mf.convolution_2d_layer(conv22_maxpool, [3, 3, 128, 256], [1, 1], "SAME", wd, "conv3_1"), leaky_param) conv32 = mf.add_leaky_relu( mf.convolution_2d_layer(conv31, [3, 3, 256, 256], [1, 1], "SAME", wd, "conv3_2"), leaky_param) atrous3 = mf.add_leaky_relu( mf.atrous_convolution_layer(conv32, [3, 3, 256, 256], 2, "SAME", wd, "atrous3"), leaky_param) #conv33 = mf.add_leaky_relu(mf.convolution_2d_layer( # conv32, [3, 3, 256, 256], [1, 1], # "SAME", wd, "conv3_3"), leaky_param) #conv33_maxpool = mf.maxpool_2d_layer(conv33, [3, 3], # [2, 2], "maxpool3") print(atrous3) hyper_list.append(atrous3) conv41 = mf.add_leaky_relu( mf.convolution_2d_layer(atrous3, [3, 3, 256, 512], [1, 1], "SAME", wd, "conv4_1"), leaky_param) conv42 = mf.add_leaky_relu( mf.convolution_2d_layer(conv41, [3, 3, 512, 512], [1, 1], "SAME", wd, "conv4_2"), leaky_param) atrous4 = mf.add_leaky_relu( mf.atrous_convolution_layer(conv42, [3, 3, 512, 512], 2, "SAME", wd, "atrous4"), leaky_param) print(atrous4) hyper_list.append(atrous4) atrous51 = mf.add_leaky_relu( mf.atrous_convolution_layer(atrous4, [3, 3, 512, 512], 2, "SAME", wd, "atrous5_1"), leaky_param) atrous52 = mf.add_leaky_relu( mf.atrous_convolution_layer(atrous51, [3, 3, 512, 512], 2, "SAME", wd, "atrous5_2"), leaky_param) print(atrous52) #hyper_list.append(atrous52) hyper_list.append(atrous52) hypercolumn = self._pack_tensor_list(hyper_list) print(hypercolumn) [b, w, h, c] = hypercolumn.get_shape().as_list() conv6 = mf.add_leaky_relu( mf.convolution_2d_layer(hypercolumn, [1, 1, c, 512], [1, 1], "SAME", wd, "conv6"), leaky_param) deconv1 = self._deconv2_wrapper(conv6, conv21, 256, wd, "deconv1") print(deconv1) deconv2 = self._deconv2_wrapper(deconv1, conv11, 64, wd, "deconv2") print(deconv2) conv7 = mf.add_leaky_relu( mf.convolution_2d_layer(deconv2, [1, 1, 64, 1], [1, 1], "SAME", wd, "conv7"), leaky_param) print(conv7) stage2 = self._stage_conv(data_ph, conv7, 2, model_params) print(stage2) stage3 = self._stage_conv(data_ph, stage2, 3, model_params) print(stage3) self.predict_list = list() self.predict_list.append(conv7) self.predict_list.append(stage2) self.predict_list.append(stage3) with tf.variable_scope("image_sum"): self._add_image_sum(data_ph.get_input(), data_ph.get_label(), data_ph.get_mask())
from TensorflowToolbox.model_flow import model_func import tensorflow as tf if __name__ == "__main__": img = tf.constant(3, dtype = tf.float32, shape = [1, 224, 224, 1]) conv1 = model_func.convolution_2d_layer(img, \ [3, 3, 1, 1], [2,2], "VALID", 0.01, "conv1") print(conv1) conv2 = model_func.convolution_2d_layer(conv1, \ [3, 3, 1, 1], [2,2], "VALID", 0.01, "conv2") print(conv2)