def inference(input_tensor, bg_tensor, regularizer=None): # do_conv(name, input_tensor, out_channel, ksize, stride=[1, 1, 1, 1], is_pretrain=True, dropout=False, regularizer=None): for key in Config.CONV_LAYERS_CONFIG: layer_config = Config.CONV_LAYERS_CONFIG[key] conv_res = do_conv( key, input_tensor, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'] ) input_tensor = conv_res if layer_config['pooling']['exists']: pooling = pool( layer_config['pooling']['name'], input_tensor ) input_tensor = pooling for key in Config.CONV_LAYERS_CONFIG_BG: layer_config = Config.CONV_LAYERS_CONFIG_BG[key] conv_res = do_conv( key, bg_tensor, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'] ) bg_tensor = conv_res if layer_config['pooling']['exists']: pooling = pool( layer_config['pooling']['name'], bg_tensor, is_max_pool=layer_config['pooling']['max_pooling'] ) bg_tensor = pooling input_tensor = convert_two_dim(input_tensor) bg_tensor = convert_two_dim(bg_tensor) input_tensor = tf.concat([input_tensor, bg_tensor], axis=1) print input_tensor # FC_layer(layer_name, x, out_nodes, regularizer=None): fc1 = FC_layer( 'fc1', input_tensor, Config.FC_SIZE, regularizer ) fc1 = batch_norm(fc1) fc2 = FC_layer( 'fc2', fc1, Config.OUTPUT_NODE, regularizer ) return fc2
def inference(input_tensor, regularizer=None, return_feature=False): # do_conv(name, input_tensor, out_channel, ksize, stride=[1, 1, 1, 1], is_pretrain=True, dropout=False, regularizer=None): for key in Config.CONV_LAYERS_CONFIG: layer_config = Config.CONV_LAYERS_CONFIG[key] if layer_config['batch_norm']: input_tensor = batch_norm(input_tensor) conv_res = do_conv(key, input_tensor, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], is_pretrain=layer_config['trainable'], batch_normalization=layer_config['batch_norm']) input_tensor = conv_res if layer_config['pooling']['exists']: pooling = pool(layer_config['pooling']['name'], input_tensor) input_tensor = pooling # FC_layer(layer_name, x, out_nodes, regularizer=None): fc1 = FC_layer('fc1', input_tensor, Config.FC_SIZE, regularizer) fc1 = batch_norm(fc1) fc2 = FC_layer('fc2', fc1, Config.OUTPUT_NODE, regularizer) if return_feature: return fc2, fc1 return fc2
def inference(input_tensors, regularizer): CONV_LAYER_OUT = None for index, input_tensor in enumerate(input_tensors): # 对于每个尺度的图像,进行下面的RNN-Part input_shape = input_tensor.get_shape().as_list() state = tf.zeros([Config.BATCH_SIZE, Config.STATE_FEATURE_DIM]) with tf.variable_scope('rnn-part-' + str(index), reuse=False): # 对不同phase之间参数的一样的 for i in range(input_shape[3]): if i == 0: reuse = False else: reuse = True cur_input = input_tensor[:, :, :, i] cur_input = tf.reshape( cur_input, [input_shape[0], input_shape[1], input_shape[2], 1]) for key in Config.CONV_LAYERS_CONFIG: layer_config = Config.CONV_LAYERS_CONFIG[key] conv_res = do_conv( key, cur_input, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], reuse=reuse) cur_input = conv_res if layer_config['pooling']['exists']: pooling = pool( layer_config['pooling']['name'], cur_input, is_max_pool=layer_config['pooling']['is_max']) cur_input = pooling # 完成了卷积操作 cur_input = convert_two_dim(cur_input) # 展成二维的变量 print cur_input cur_input = tf.concat([cur_input, state], axis=1) print cur_input state = FC_layer('extract_state', cur_input, Config.STATE_FEATURE_DIM, regularizer, reuse) # 更新状态 if CONV_LAYER_OUT is None: CONV_LAYER_OUT = state else: CONV_LAYER_OUT = tf.concat([CONV_LAYER_OUT, state], axis=1) input_tensor = CONV_LAYER_OUT for key in Config.FC_LAYERS_CONFIG: layer_config = Config.FC_LAYERS_CONFIG[key] if not layer_config['regularizer']: cur_regularizer = None else: cur_regularizer = regularizer input_tensor = FC_layer(key, input_tensor, layer_config['size'], cur_regularizer) if layer_config['batch_norm']: input_tensor = batch_norm(input_tensor) return input_tensor
def inference(input_tensor, regularizer): input_shape = input_tensor.get_shape().as_list() CONV_OUT = None with tf.variable_scope('rnn-part'): for i in range(input_shape[3]): if i == 0: reuse = False else: reuse = True cur_input = input_tensor[:, :, :, i] cur_input = tf.reshape( cur_input, [input_shape[0], input_shape[1], input_shape[2], 1]) layer_keys = list(Config.CONV_LAYERS_CONFIG) layer_keys.sort() for key in layer_keys: layer_config = Config.CONV_LAYERS_CONFIG[key] conv_res = do_conv( key, cur_input, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], reuse=reuse) cur_input = conv_res if layer_config['pooling']['exists']: pooling = pool(layer_config['pooling']['name'], cur_input) cur_input = pooling # 完成了卷积操作 cur_input = convert_two_dim(cur_input) # 展成二维的变量 if CONV_OUT is None: CONV_OUT = cur_input else: CONV_OUT = tf.concat([CONV_OUT, cur_input], axis=1) input_tensor = CONV_OUT layer_keys = list(Config.FC_LAYERS_CONFIG) layer_keys.sort() for key in layer_keys: layer_config = Config.FC_LAYERS_CONFIG[key] if not layer_config['regularizer']: cur_regularizer = None else: cur_regularizer = regularizer input_tensor = FC_layer(key, input_tensor, layer_config['size'], cur_regularizer) if layer_config['batch_norm']: input_tensor = batch_norm(input_tensor) return input_tensor
def inference(input_tensors, phase_names, regularizer=None, return_feature=False): print input_tensors.get_shape().as_list() # input_tensor 包含了三个期项,shape是batchsize, image_w, image_h, 3 # do_conv(name, input_tensor, out_channel, ksize, stride=[1, 1, 1, 1], is_pretrain=True, dropout=False, regularizer=None): keys = list(Config.CONV_LAYERS_CONFIG.keys()) keys.sort() CONV_OUTPUT = None for phase_index, phase in enumerate(phase_names): phase_config = Config.CONV_LAYERS_CONFIG[phase] conv_names = list(phase_config.keys()) conv_names.sort() input_tensor = input_tensors[:, :, :, phase_index] for conv_name in conv_names: conv_layer_config = phase_config[conv_name] conv_res = do_conv( conv_name, input_tensor, conv_layer_config['deep'], [conv_layer_config['size'], conv_layer_config['size']], dropout=conv_layer_config['dropout'], is_pretrain=conv_layer_config['trainable'], batch_normalization=conv_layer_config['batch_norm']) input_tensor = conv_res if conv_layer_config['pooling']['exists']: pooling = pool(conv_layer_config['pooling']['name'], input_tensor) input_tensor = pooling input_tensor = convert_two_dim(input_tensor) if CONV_OUTPUT is not None: CONV_OUTPUT = tf.concat([CONV_OUTPUT, input_tensor], axis=1) else: CONV_OUTPUT = input_tensor # FC_layer(layer_name, x, out_nodes, regularizer=None): fc1 = FC_layer('fc1', CONV_OUTPUT, Config.FC_SIZE, regularizer) fc1 = batch_norm(fc1) fc2 = FC_layer('fc2', fc1, Config.OUTPUT_NODE, regularizer) if return_feature: return fc2, fc1 return fc2
def inference(input_tensors, regularizer=None): conv_laerys_output = None for index, input_tensor in enumerate(input_tensors): if index == 0: reuse = None else: reuse = True for key in Config.CONV_LAYERS_CONFIG: layer_config = Config.CONV_LAYERS_CONFIG[key] conv_res = do_conv(key, input_tensor, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], reuse=reuse) input_tensor = conv_res if layer_config['pooling']['exists']: pooling = pool(layer_config['pooling']['name'], input_tensor) input_tensor = pooling if conv_laerys_output is None: conv_laerys_output = convert_two_dim(input_tensor) else: conv_laerys_output = tf.concat( [conv_laerys_output, convert_two_dim(input_tensor)], 1) input_tensor = conv_laerys_output for key in Config.FC_LAYERS_CONFIG: layer_config = Config.FC_LAYERS_CONFIG[key] if not layer_config['regularizer']: cur_regularizer = None else: cur_regularizer = regularizer input_tensor = FC_layer(key, input_tensor, layer_config['size'], cur_regularizer) if layer_config['batch_norm']: input_tensor = batch_norm(input_tensor) return input_tensor
def inference(input_tensors, bg_tensors, regularizer): CONV_OUTPUT = None for index, input_tensor in enumerate(input_tensors): # 同一个尺度 bg_tensor = bg_tensors[index] shape = input_tensor.get_shape().as_list() shape_bg = bg_tensor.get_shape().as_list() state = tf.zeros( shape=[ shape[0], Config.STATE_FEATURE_DIM ], dtype=tf.float32 ) with tf.variable_scope('rnn-part-' + str(index)): for i in range(shape[3]): cur_input_tensor = input_tensor[:, :, :, i] # 获取某一层的数据 cur_input_tensor = tf.reshape( cur_input_tensor, shape=[ shape[0], shape[1], shape[2], 1 ] ) cur_bg_tensor = bg_tensor[:, :, :, i] cur_bg_tensor = tf.reshape( cur_bg_tensor, shape=[ shape_bg[0], shape_bg[1], shape_bg[2], 1 ] ) if i == 0: reuse = False else: reuse = True # 针对该phase 计算roi的特征 for key in Config.CONV_LAYERS_CONFIG: layer_config = Config.CONV_LAYERS_CONFIG[key] # def do_conv(name, input_tensor, out_channel, ksize, stride=[1, 1, 1, 1], is_pretrain=True, dropout=False, regularizer=None, reuse=False): conv_res = do_conv( key, cur_input_tensor, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], reuse=reuse ) cur_input_tensor = conv_res if layer_config['pooling']['exists']: pooling = pool( layer_config['pooling']['name'], cur_input_tensor ) cur_input_tensor = pooling # 针对该phase计算bg的特征 for key in Config.CONV_LAYERS_CONFIG_BG: layer_config = Config.CONV_LAYERS_CONFIG_BG[key] conv_res = do_conv( key, cur_bg_tensor, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], reuse=reuse ) cur_bg_tensor = conv_res if layer_config['pooling']['exists']: pooling = pool( layer_config['pooling']['name'], cur_bg_tensor ) cur_bg_tensor = pooling cur_input_tensor = convert_two_dim(cur_input_tensor) cur_bg_tensor = convert_two_dim(cur_bg_tensor) fc_input = tf.concat([cur_input_tensor, cur_bg_tensor, state], axis=1) state = FC_layer('extract_state', fc_input, Config.STATE_FEATURE_DIM, regularizer, reuse) # 更新状态 if CONV_OUTPUT is None: CONV_OUTPUT = state else: CONV_OUTPUT = tf.concat([CONV_OUTPUT, state], axis=1) input_tensor = CONV_OUTPUT for key in Config.FC_LAYERS_CONFIG: layer_config = Config.FC_LAYERS_CONFIG[key] if not layer_config['regularizer']: cur_regularizer = None else: cur_regularizer = regularizer input_tensor = FC_layer( key, input_tensor, layer_config['size'], cur_regularizer ) if layer_config['batch_norm']: input_tensor = batch_norm(input_tensor) return input_tensor
def inference(input_tensors, regularizer): CONV_LAYER_OUT = None params_shared = False for index, input_tensor in enumerate(input_tensors): # 对于每个尺度的图像,进行下面的RNN-Part input_shape = input_tensor.get_shape().as_list() with tf.variable_scope('rnn-part-' + str(index), reuse=False): # 对不同phase之间参数的一样的 ONE_SIZE_FEATURE = None for i in range(input_shape[3]): if i == 0: reuse = False else: reuse = True cur_input = input_tensor[:, :, :, i] cur_input = tf.reshape( cur_input, [input_shape[0], input_shape[1], input_shape[2], 1]) for key in Config.CONV_LAYERS_CONFIG: layer_config = Config.CONV_LAYERS_CONFIG[key] cur_input = batch_norm(cur_input) conv_res = do_conv( key, cur_input, layer_config['deep'], [layer_config['size'], layer_config['size']], dropout=layer_config['dropout'], reuse=reuse) cur_input = conv_res if layer_config['pooling']['exists']: pooling = pool( layer_config['pooling']['name'], cur_input, is_max_pool=layer_config['pooling']['is_max']) cur_input = pooling # 完成了卷积操作 cur_input = convert_two_dim(cur_input) # 展成二维的变量 print cur_input if ONE_SIZE_FEATURE is None: ONE_SIZE_FEATURE = cur_input else: ONE_SIZE_FEATURE = tf.concat([ONE_SIZE_FEATURE, cur_input], axis=1) FC_OUT = FC_layer('CONVERT-' + str(index), ONE_SIZE_FEATURE, Config.SIZE_FEATURE_DIM, regularizer) FC_OUT = batch_norm(FC_OUT) print 'FC OUT is ', FC_OUT if CONV_LAYER_OUT is None: CONV_LAYER_OUT = FC_OUT else: CONV_LAYER_OUT = tf.concat([CONV_LAYER_OUT, FC_OUT], axis=1) print 'CONVLAYEROUT is ', CONV_LAYER_OUT print 'CONVLAYEROUT is ', CONV_LAYER_OUT input_tensor = CONV_LAYER_OUT for key in Config.FC_LAYERS_CONFIG: layer_config = Config.FC_LAYERS_CONFIG[key] if not layer_config['regularizer']: cur_regularizer = None else: cur_regularizer = regularizer input_tensor = FC_layer(key, input_tensor, layer_config['size'], cur_regularizer) if layer_config['batch_norm']: input_tensor = batch_norm(input_tensor) return input_tensor
def convlayers(self): self.parameters = [] # zero-mean input # with tf.name_scope('preprocess') as scope: # mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean') # images = self.imgs-mean # 对输入数据进行归一化 images = batch_norm(self.imgs) # images = self.imgs # images = self.imgs # conv1_1 # do_conv(name, input_tensor, out_channel, ksize, stride=[1, 1, 1, 1], is_pretrain=True): self.conv1_1 = do_conv('conv1_1', images, 64, [3, 3], is_pretrain=self.trainable['conv1_1']) self.conv1_2 = do_conv('conv1_2', self.conv1_1, 64, [3, 3], is_pretrain=self.trainable['conv1_2'], batch_normalization=False) self.pooling1 = pool('pooling1', self.conv1_2, is_max_pool=True) self.conv2_1 = do_conv('conv2_1', self.pooling1, 128, [3, 3], is_pretrain=self.trainable['conv2_1']) self.conv2_2 = do_conv('conv2_2', self.conv2_1, 128, [3, 3], is_pretrain=self.trainable['conv2_2'], batch_normalization=False) self.pooling2 = pool('pooling2', self.conv2_2, is_max_pool=True) self.conv3_1 = do_conv('conv3_1', self.pooling2, 256, [3, 3], is_pretrain=self.trainable['conv3_1']) self.conv3_2 = do_conv('conv3_2', self.conv3_1, 256, [3, 3], is_pretrain=self.trainable['conv3_2']) self.conv3_3 = do_conv('conv3_3', self.conv3_2, 256, [3, 3], is_pretrain=self.trainable['conv3_3'], batch_normalization=False) self.pooling3 = pool('pooing3', self.conv3_3, is_max_pool=True) self.conv4_1 = do_conv('conv4_1', self.pooling3, 512, [3, 3], is_pretrain=self.trainable['conv4_1']) self.conv4_2 = do_conv('conv4_2', self.conv4_1, 512, [3, 3], is_pretrain=self.trainable['conv4_2']) self.conv4_3 = do_conv('conv4_3', self.conv4_2, 512, [3, 3], is_pretrain=self.trainable['conv4_3'], batch_normalization=False) self.pooling4 = pool('pooling4', self.conv4_3, is_max_pool=True) self.conv5_1 = do_conv('conv5_1', self.pooling4, 512, [3, 3], is_pretrain=self.trainable['conv5_1']) self.conv5_2 = do_conv('conv5_2', self.conv5_1, 512, [3, 3], is_pretrain=self.trainable['conv5_2']) self.conv5_3 = do_conv('conv5_3', self.conv5_2, 512, [3, 3], is_pretrain=self.trainable['conv5_3'], batch_normalization=False) self.pooling5 = pool('pooling5', self.conv5_3, is_max_pool=True) self.convs_output = self.pooling5