def get_image_embedding(self, images, reuse=None): config = self.model_config['embed_config'] arg_scope = convolutional_alexnet_arg_scope( config, trainable=config['train_embedding'], is_training=False) @functools.wraps(convolutional_alexnet) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet(images, reuse=reuse) embed_s_c5, embed_s_c4, embed_s_c3, _ = embedding_fn(images, reuse) embed_a_c5, embed_a_c4, embed_a_c3 = embed_alexnet(images) # ============================================================================= # embed_v_43, embed_v_42, embed_v_41 = embed_vgg16(images) # ============================================================================= # ============================================================================= # return embed_v_43, embed_v_42, embed_v_41 # ============================================================================= # ============================================================================= # return embed_a_c5, embed_a_c4, embed_a_c3 # ============================================================================= # ============================================================================= # return embed_s_c5, embed_s_c4, embed_s_c3 # ============================================================================= return embed_s_c5, embed_s_c4, embed_s_c3, embed_a_c5, embed_a_c4, embed_a_c3
def build_image_embeddings(self, reuse=False): """Builds the image model subgraph and generates image embeddings Inputs: self.exemplars: A tensor of shape [batch, hz, wz, 3] self.instances: A tensor of shape [batch, hx, wx, 3] Outputs: self.exemplar_embeds: A Tensor of shape [batch, hz_embed, wz_embed, embed_dim] self.instance_embeds: A Tensor of shape [batch, hx_embed, wx_embed, embed_dim] """ config = self.model_config['embed_config'] # ============================================================================= # arg_scope = convolutional_alexnet_arg_scope(config, # trainable=config['train_embedding'], # is_training=self.is_training()) # ============================================================================= arg_scope = convolutional_alexnet_arg_scope(config, trainable=False, is_training=False) @functools.wraps(convolutional_alexnet) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet(images, reuse=reuse) self.exemplar_embeds_c5, self.exemplar_embeds_c4, self.exemplar_embeds_c3, _ = embedding_fn( self.exemplars, reuse=reuse) self.instance_embeds_c5, self.instance_embeds_c4, self.instance_embeds_c3, _ = embedding_fn( self.instances, reuse=True)
def get_image_embedding(self, images, reuse=None): config = self.model_config['embed_config'] arg_scope = convolutional_alexnet_arg_scope( config, trainable=config['train_embedding'], is_training=False) @functools.wraps(convolutional_alexnet) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet(images, reuse=reuse) embed, _ = embedding_fn(images, reuse) return embed
def get_image_embedding(self, images, reuse=None): config = self.model_config['embed_config'] arg_scope = convolutional_alexnet_arg_scope(config, trainable=config['train_embedding'], is_training=False) @functools.wraps(convolutional_alexnet) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet(images, reuse=reuse) embed, _ = embedding_fn(images, reuse) return embed
def build_image_embeddings_alexnet(self, reuse=False): model_config = self.model_config['embed_config'] alexnet_config = self.model_config['alexnet'] arg_scope = convolutional_alexnet_arg_scope( model_config, trainable=model_config['train_embedding'], is_training=self.is_training()) @functools.wraps(convolutional_alexnet) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet( images, reuse=reuse, split=alexnet_config['split'], depthwise_list=alexnet_config['depthwise_list']) self.exemplar_embeds, _ = embedding_fn(self.exemplars, reuse=reuse) self.instance_embeds, _ = embedding_fn(self.instances, reuse=True)
def get_scope_and_backbone(config, is_training): embedding_name = config['embedding_name'] if embedding_name== 'convolutional_alexnet': arg_scope = convolutional_alexnet_arg_scope(config, trainable=config['train_embedding'], is_training=is_training) backbone_fn = convolutional_alexnet elif embedding_name== 'convolutional_alexnet_gn': arg_scope = convolutional_alexnet_gn_arg_scope(config, trainable=config['train_embedding']) backbone_fn = convolutional_alexnet elif embedding_name == 'alexnet_tweak': arg_scope = alexnet_tweak_arg_scope(config, trainable=config['train_embedding'], is_training=is_training) backbone_fn = alexnet_tweak elif embedding_name == 'featureExtract_alexnet': arg_scope = featureExtract_alexnet_arg_scope(config, trainable=config['train_embedding'], is_training=is_training) backbone_fn = featureExtract_alexnet elif embedding_name == 'featureExtract_alexnet_fixedconv3': arg_scope = featureExtract_alexnet_arg_scope(config, trainable=config['train_embedding'], is_training=is_training) backbone_fn = featureExtract_alexnet_fixedconv3 else: assert("support alexnet only now") return arg_scope,backbone_fn
def get_image_embedding(self, images, reuse=None): config = self.model_config['embed_config'] gcn_config = self.gcn_config arg_scope = convolutional_alexnet_arg_scope( config, trainable=config['train_embedding'], is_training=False) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): if config['use_full']: return convolutional_alexnet_full(images, reuse=tf.AUTO_REUSE) else: return convolutional_alexnet_me(images, reuse=tf.AUTO_REUSE) def embedding_fn_ins(inputs, channels, reuse=tf.AUTO_REUSE): return instance_layer(inputs, channels, reuse=reuse) embed_ini = embedding_fn(images, reuse) channels = gcn_config['g2_output'] embed = embedding_fn_ins(embed_ini, channels, reuse=tf.AUTO_REUSE) return [embed_ini, embed]
def build_image_embeddings(self, reuse=False): """Builds the image model subgraph and generates image embeddings Inputs: self.exemplars: A tensor of shape [batch, hz, wz, 3] self.instances: A tensor of shape [batch, hx, wx, 3] Outputs: self.exemplar_embeds: A Tensor of shape [batch, hz_embed, wz_embed, embed_dim] self.instance_embeds: A Tensor of shape [batch, hx_embed, wx_embed, embed_dim] """ config = self.model_config['embed_config'] arg_scope = convolutional_alexnet_arg_scope(config, trainable=config['train_embedding'], is_training=self.is_training()) @functools.wraps(convolutional_alexnet) def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet(images, reuse=reuse) self.exemplar_embeds, _ = embedding_fn(self.exemplars, reuse=reuse) self.instance_embeds, _ = embedding_fn(self.instances, reuse=True)
with tf.Session() as sess: template_image = tf.placeholder(tf.float32, shape=[size_z, size_z, 3], name='template_image') input_image = tf.placeholder(tf.float32, shape=[args.scale, size_x, size_x, 3], name='input_image') template_image = tf.expand_dims(template_image, 0) embed_config = model_config['embed_config'] # build cnn for feature extraction from either template image or input image feature_extactor = model_config['embed_config']['feature_extractor'] if feature_extactor == "alexnet": alexnet_config = model_config['alexnet'] arg_scope = convolutional_alexnet_arg_scope( embed_config, trainable=embed_config['train_embedding'], is_training=False) with slim.arg_scope(arg_scope): embed_x, end_points = convolutional_alexnet( input_image, reuse=False, split=alexnet_config['split']) embed_z, end_points_z = convolutional_alexnet( template_image, reuse=True, split=alexnet_config['split']) elif feature_extactor == "mobilenet_v1": mobilenent_config = model_config['mobilenet_v1'] with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)): with tf.variable_scope('MobilenetV1', reuse=False) as scope: embed_x, end_points = mobilenet_v1.mobilenet_v1_base( input_image, final_endpoint=mobilenent_config['final_endpoint'],