def __init__(self, cache_folder, num_classes=1000, batch_size=1): # params self.batch_size = batch_size self.scale_size = vgg.vgg_16.default_image_size self.img_mean = np.array([104., 117., 124.]) # Runtime params with tf.device('/cpu:0'): self.input_images = tf.placeholder( tf.float32, [self.batch_size, None, None, 3]) # checkpoints_dir = os.path.join(cache_folder, 'checkpoints_vgg') checkpoints_dir = os.path.join(cache_folder, 'checkpoints') vgg_var_scope = 'vgg_16' with tf.variable_scope(vgg_var_scope, reuse=False): with slim.arg_scope(vgg.vgg_arg_scope(bn=False, is_training=False)): _, end_points = vgg.vgg_16_short(self.input_images, num_classes=num_classes, is_training=False) # self.scores = end_points['vgg_16/fc8/reduced'] self.scores = end_points['vgg_16/fc6/reduced'] restorer = get_init_restorer() config = tf.ConfigProto() config.gpu_options.allow_growth = True init_op = tf.global_variables_initializer() self.sess = tf.Session(config=config) print(str(datetime.now()) + ': Start Init') # restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned-8000')) restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned-20000')) print(str(datetime.now()) + ': Finish Init')
def __init__(self, which_layer='pool4', which_snapshot=200000, from_scratch=False): # params self.batch_size = 40 self.scale_size = vgg.vgg_16.default_image_size # Runtime params checkpoints_dir = '/data2/xuyangf/OcclusionProject/NaiveVersion/checkpoint' tf.logging.set_verbosity(tf.logging.INFO) # Create the model, use the default arg scope to configure the batch norm parameters. with tf.device('/cpu:0'): self.input_images = tf.placeholder( tf.float32, [self.batch_size, self.scale_size, self.scale_size, 3]) with tf.variable_scope('vgg_16', reuse=False): with slim.arg_scope(vgg.vgg_arg_scope()): _, vgg_end_points = vgg.vgg_16(self.input_images, num_classes=100, is_training=False, dropout_keep_prob=1) # self.pool4 = vgg_end_points['vgg_16/pool4'] # with tf.variable_scope('VC', reuse=False): # self.tight_loss, self.tight_end_points = online_clustering(self.pool4, 512) self.features = vgg_end_points['vgg_16/' + which_layer] # TODO # Create restorer and saver restorer = get_init_restorer() config = tf.ConfigProto() config.gpu_options.allow_growth = True init_op = tf.global_variables_initializer() # Run the session: self.sess = tf.Session(config=config) print(str(datetime.now()) + ': Start Init') if which_snapshot == 0: # Start from a pre-trained vgg ckpt if from_scratch: self.sess.run(init_op) else: restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned')) else: # Start from the last time # sess.run(init_op) restorer.restore( self.sess, os.path.join(checkpoints_dir, 'fine_tuned-' + str(which_snapshot))) print(str(datetime.now()) + ': Finish Init')
def __init__(self, mylayer): self.batch_size = 1 self.scale_size = vgg.vgg_16.default_image_size self.feature_size = 224 / pow(2, mylayer) self.featDim_set = [64, 128, 256, 512, 512] self.featuredim = self.featDim_set[int(mylayer) - 1] tf.logging.set_verbosity(tf.logging.INFO) with tf.device('/cpu:0'): self.input_images = tf.placeholder( tf.float32, [self.batch_size, self.scale_size, self.scale_size, 3]) self.input_features = tf.placeholder(tf.float32, [ self.batch_size, self.feature_size, self.feature_size, self.featuredim ]) with tf.variable_scope('vgg_16', reuse=False): with slim.arg_scope(vgg.vgg_arg_scope()): self.final_result, _ = vgg.vgg_16(self.input_images, num_classes=100, is_training=False, dropout_keep_prob=1) self.final_result_part1 = vgg.vgg_16_part1(self.input_images, num_classes=100, is_training=False, dropout_keep_prob=1) self.final_result_part2 = vgg.vgg_16_part2(self.input_features, num_classes=100, is_training=False, dropout_keep_prob=1) restorer = get_init_restorer() config = tf.ConfigProto() config.gpu_options.allow_growth = True #init_op = tf.global_variables_initializer() self.sess = tf.Session(config=config) checkpoints_dir = os.path.join( '/data2/xuyangf/OcclusionProject/NaiveVersion/checkpoint') restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned')) print('load finish')
def __init__(self, cache_folder, which_net, which_layer, which_snapshot, batch_size=1, from_scratch=False): # params self.batch_size = batch_size self.scale_size = vgg.vgg_16.default_image_size self.img_mean = np.array([123.68, 116.779, 103.939]) # RGB # Runtime params self.net_type = which_net with tf.device('/cpu:0'): self.input_images = tf.placeholder( tf.float32, [self.batch_size, None, None, 3]) if which_net == 'vgg16': # checkpoints_dir = os.path.join(cache_folder, 'checkpoints_vgg') checkpoints_dir = os.path.join(cache_folder, 'checkpoints') vgg_var_scope = 'vgg_16' if which_net == 'vgg16': with tf.variable_scope(vgg_var_scope, reuse=False): with slim.arg_scope( vgg.vgg_arg_scope(bn=False, is_training=False)): _, _ = vgg.vgg_16_pool4(self.input_images, is_training=False) self.features = tf.get_default_graph().get_tensor_by_name( vgg_var_scope + '/' + which_layer + '/MaxPool:0') restorer = self.get_init_restorer(bn=False, vc=False) config = tf.ConfigProto() config.gpu_options.allow_growth = True init_op = tf.global_variables_initializer() self.sess = tf.Session(config=config) print(str(datetime.now()) + ': Start Init') if from_scratch: self.sess.run(init_op) else: restorer.restore( self.sess, os.path.join(checkpoints_dir, 'vgg_16.ckpt')) else: with tf.variable_scope(vgg_var_scope, reuse=False): with slim.arg_scope( vgg.vgg_arg_scope(bn=True, is_training=False)): _, _ = vgg.vgg_16(self.input_images, is_training=False) self.features = tf.get_default_graph().get_tensor_by_name( vgg_var_scope + '/' + which_layer + '/MaxPool:0') restorer = self.get_init_restorer(bn=True, vc=False) config = tf.ConfigProto() config.gpu_options.allow_growth = True init_op = tf.global_variables_initializer() self.sess = tf.Session(config=config) print(str(datetime.now()) + ': Start Init') restorer.restore( self.sess, os.path.join(checkpoints_dir, 'fine_tuned-' + str(which_snapshot))) # elif which_net=='alexnet': # checkpoints_dir = os.path.join(cache_folder, 'checkpoints_alex') # vgg_var_scope = 'vgg_16' # with tf.variable_scope(vgg_var_scope, reuse=False): # with slim.arg_scope(vgg.vgg_arg_scope(bn=True, is_training=False)): # _, _ = vgg.alexnet(self.input_images, is_training=False) # self.features = tf.get_default_graph().get_tensor_by_name(vgg_var_scope + '/' + which_layer + '/Relu:0') # restorer = self.get_init_restorer(bn=True, vc=False) # config = tf.ConfigProto() # config.gpu_options.allow_growth = True # init_op = tf.global_variables_initializer() # self.sess = tf.Session(config=config) # print(str(datetime.now()) + ': Start Init') # restorer.restore(self.sess, os.path.join(checkpoints_dir, 'fine_tuned-' + str(which_snapshot))) else: print('error: unknown net') return 0 print(str(datetime.now()) + ': Finish Init')
output_buffer_size=batch_size) batched_val_dataset = val_dataset.batch(batch_size) iterator = tf.contrib.data.Iterator.from_structure( batched_train_dataset.output_types, batched_train_dataset.output_shapes) images, labels = iterator.get_next() train_init_op = iterator.make_initializer(batched_train_dataset) val_init_op = iterator.make_initializer(batched_val_dataset) # Indicates whether we are in training or in test mode is_training = tf.placeholder(tf.bool) vgg = tf.contrib.slim.nets.vgg with slim.arg_scope(vgg.vgg_arg_scope(weight_decay=weight_decay)): logits, _ = vgg.vgg_16(images, num_classes=num_classes, is_training=is_training, dropout_keep_prob=dropout_keep_prob) # Specify where the model checkpoint is (pretrained weights). # Restore only the layers up to fc7 (included) # Calling function `init_fn(sess)` will load all the pretrained weights. variables_to_restore = tf.contrib.framework.get_variables_to_restore( exclude=['vgg_16/fc8', 'vgg_16/fc7']) init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, variables_to_restore) # Initialization operation from scratch for the new "fc8" layers
variables_to_restore = [] # for var in slim.get_model_variables(): for var in tf.global_variables(): variables_to_restore.append(var) return tf.train.Saver(variables_to_restore) # checkpoints_dir = os.path.join(model_cache_folder, 'checkpoints_vgg') checkpoints_dir = os.path.join(model_cache_folder_f, 'checkpoints') tf.logging.set_verbosity(tf.logging.INFO) with tf.device('/cpu:0'): input_images = tf.placeholder(tf.float32, [1, None, None, 3]) vgg_var_scope = 'vgg_16' with tf.variable_scope(vgg_var_scope, reuse=False): with slim.arg_scope(vgg.vgg_arg_scope(bn=False, is_training=False)): _, end_points = vgg.vgg_16(input_images, num_classes=len(all_categories), is_training=False) restorer = get_init_restorer() config = tf.ConfigProto() config.gpu_options.allow_growth = True init_op = tf.global_variables_initializer() sess = tf.Session(config=config) print(str(datetime.now()) + ': Start Init') # restorer.restore(sess, os.path.join(checkpoints_dir, 'vgg_16.ckpt')) restorer.restore(sess, os.path.join(checkpoints_dir, 'fine_tuned-8000')) print(str(datetime.now()) + ': Finish Init') grad_ts_ls = [] for ii in range(end_points['vgg_16/fc8/reduced'].get_shape().as_list()[1]): grad_ts_ls.append(tf.gradients(end_points['vgg_16/fc8/reduced'][0,ii], input_images)[0])