def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet( images, reuse=reuse, split=alexnet_config['split'], depthwise_list=alexnet_config['depthwise_list'])
def embedding_fn(images, reuse=False): with slim.arg_scope(arg_scope): return convolutional_alexnet(images, reuse=reuse)
input_image = tf.placeholder(tf.float32, shape=[args.scale, size_x, size_x, 3], name='input_image') template_image = tf.expand_dims(template_image, 0) embed_config = model_config['embed_config'] # build cnn for feature extraction from either template image or input image feature_extactor = model_config['embed_config']['feature_extractor'] if feature_extactor == "alexnet": alexnet_config = model_config['alexnet'] arg_scope = convolutional_alexnet_arg_scope( embed_config, trainable=embed_config['train_embedding'], is_training=False) with slim.arg_scope(arg_scope): embed_x, end_points = convolutional_alexnet( input_image, reuse=False, split=alexnet_config['split']) embed_z, end_points_z = convolutional_alexnet( template_image, reuse=True, split=alexnet_config['split']) elif feature_extactor == "mobilenet_v1": mobilenent_config = model_config['mobilenet_v1'] with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)): with tf.variable_scope('MobilenetV1', reuse=False) as scope: embed_x, end_points = mobilenet_v1.mobilenet_v1_base( input_image, final_endpoint=mobilenent_config['final_endpoint'], conv_defs=mobilenet.CONV_DEFS, depth_multiplier=mobilenent_config['depth_multiplier'], scope=scope) with tf.variable_scope('MobilenetV1', reuse=True) as scope: