def __init__(self, model_dir): # id_count = 5841 # id_count = 1025 self.batch = 50 # self.model = ResNet([224, 224], True, id_count, batch_size = self.batch) self.inputs = tf.placeholder(tf.float32, shape=(self.batch, 299, 299, 3)) _, self.end_points = inception_v3.inception_v3(self.inputs, is_training=True, num_classes=1501, batch_size=self.batch, scope="InceptionV3") self.model_dir = model_dir # A TensorFlow Session for use in interactive contexts, such as a shell. self.sess = tf.InteractiveSession() variables_to_restore = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionV3') saver = tf.train.Saver(variables_to_restore) self.sess.run(tf.global_variables_initializer()) # saver.restore(self.sess, '/home/share/lixiang2/resnet/saves/') # saver.restore(self.sess, '/home/share/jiening/resnet/saves_lixiang/') saver.restore(self.sess, self.model_dir) print 'model load'
batch_size = 8 num_classes = 1501 img_height = 299 img_width = 299 learning_rate = 0.0002 beta1 = 0.5 beta2 = 0.999 # Train_Images = tf.placeholder(tf.float32, shape=(batch_size, 128, 64, 3)) Train_Images = tf.placeholder(tf.float32, shape=(batch_size, img_height, img_width, 3)) Train_Labels = tf.placeholder(tf.int32, shape=batch_size) # logits, end_points = inception_reid.inception_reid(Train_Images, num_classes, asoftmax=False) logits, end_points = inception_v3.inception_v3(Train_Images, num_classes) variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='InceptionV3') # variables_to_restore = [var for var in variables_to_restore if not var.name.startswith('InceptionV3/logits')] saver_to_load = tf.train.Saver(variables_to_restore) saver_to_restore = tf.train.Saver() cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=Train_Labels) cross_entropy_mean = tf.reduce_mean(cross_entropy)
batch_size = 8 num_classes = 1501 img_height = 299 img_width = 299 learning_rate = 0.0002 beta1 = 0.5 beta2 = 0.999 # Train_Images = tf.placeholder(tf.float32, shape=(batch_size, 128, 64, 3)) Train_Images = tf.placeholder(tf.float32, shape=(batch_size, img_height, img_width, 3)) Train_Labels = tf.placeholder(tf.int32, shape=batch_size) # logits, end_points = inception_reid.inception_reid(Train_Images, num_classes, asoftmax=False) logits, end_points = inception_v3.inception_v3(Train_Images, num_classes, is_training=True) variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='InceptionV3') # variables_to_restore = [var for var in variables_to_restore if not var.name.startswith('InceptionV3/logits')] saver_to_load = tf.train.Saver(variables_to_restore) saver_to_restore = tf.train.Saver() cross_entropy_mean = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = Train_Labels)) step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
beta1 = 0.5 beta2 = 0.999 data_node = tf.placeholder(tf.float32, shape=(None, 299, 299, 3)) labels_node = tf.placeholder(tf.int32, shape=None) va_data_node = tf.placeholder(tf.float32, shape=(None, 299, 299, 3)) va_labels_node = tf.placeholder(tf.int32, shape=None) # To know if it is training or not train_flag = tf.placeholder(tf.bool) # logits, end_points = inception_reid.inception_reid(Train_Images, num_classes, asoftmax=False) logits, end_points = inception_v3.inception_v3(data_node, num_classes, is_training=train_flag) variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='InceptionV3') # variables_to_restore = [var for var in variables_to_restore if not var.name.startswith('InceptionV3/logits')] saver_to_load = tf.train.Saver(variables_to_restore) saver_to_restore = tf.train.Saver() cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels_node) cross_entropy_mean = tf.reduce_mean(cross_entropy)