def convert(save_path=FLAGS.save_path, model_name='facenet.tfmodel', RESULTS_NODE_NAME='embeddings'): with tf.Graph().as_default(): with tf.Session() as sess: saver = import_graph_def() load_weights(sess=sess, saver=saver) show_graph(sess.graph) #white_list = del_moving_average_op(sess.graph) constant_graph = graph_util.convert_variables_to_constants( sess, sess.graph_def, [RESULTS_NODE_NAME]) #write_proto(os.path.join(save_path, model_name), constant_graph) print type(constant_graph) tf.train.write_graph(constant_graph, save_path, model_name, as_text=False)
def convert1(save_path=FLAGS.save_path, model_name='facenet.tfmodel', RESULTS_NODE_NAME='embeddings'): with tf.Graph().as_default(): graph_def = tf.GraphDef() with open('nn4.pbtxt', 'rb') as graph_file: graph_def.ParseFromString(graph_file.read()) tf.import_graph_def(graph_def) with tf.Session() as sess: #saver = import_graph_def() saver = tf.train.Saver(tf.trainable_variables()) load_weights(sess=sess, saver=saver) show_graph(sess.graph) #white_list = del_moving_average_op(sess.graph) constant_graph = graph_util.convert_variables_to_constants( sess, sess.graph_def, [RESULTS_NODE_NAME]) #write_proto(os.path.join(save_path, model_name), constant_graph) print type(constant_graph) tf.train.write_graph(constant_graph, save_path, model_name, as_text=False)
sess = tf.Session() sess.run(tf.global_variables_initializer()) history = [] iterep = 500 for i in range(iterep * 30): idx = np.random.choice(np.arange(len(y_train)), 100, replace=False) x_batch = X_train[idx] y_batch = y_train[idx] sess.run(train_step, feed_dict={x: x_batch, y: y_batch, phase: True}) if (i + 1) % iterep == 0: epoch = (i + 1)/iterep tr = sess.run([loss, accuracy], feed_dict={x: X_train, y: y_train, phase: False}) t = sess.run([loss, accuracy], feed_dict={x: X_eval, y: y_eval, phase: False}) history += [[epoch] + tr + t] print history[-1] sys.path.append('/home/abdullah/PycharmProjects/Pattern') # point to your tensorflow dir show_graph(tf.get_default_graph().as_graph_def())
def show_graph(self): graph = tf.get_default_graph() show_graph(graph)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): self.train_step = tf.train.AdamOptimizer( learning_rate=self.lr).minimize(self.loss, global_step=self.global_step) # get accuracy prediction = tf.argmax(predictions, 1) equality = tf.equal(prediction, tf.argmax(self.targets, 1)) self.accuracy = tf.reduce_mean(tf.cast(equality, tf.float32)) # define summaries tf.summary.scalar('loss_ce', ce_term) tf.summary.scalar('loss_activacion_mean', self.mean_act_loss_term) tf.summary.scalar('loss_total', self.loss) if __name__ == "__main__": from datasets.quickdraw_dataset import QuickDraw_Dataset with tf.Session().as_default() as sess: #t = QuickDraw_Dataset(1, 60,data_folder='./temp/quickdraw_expanded_images') #t = Xray_dataset(1, 10, data_folder='./temp/dataset_xray_imgs') t = Imagenet_Dataset(1, 55, data_folder='temp/imagenet_subset') with imagenet_classifier_cam_loss_V2(t, debug=False) as model: #model.train(save_model=False, special_t=1) show_graph(model.graph) #model.eval('test')
i = np.random.randint(0, len(train_images)) print('displaying image {} with class {}'.format(i, train_labels[i])) plt.imshow(train_images[i], cmap='gray') plt.show() g = tf.Graph() plt.show() flatten_shape = np.prod(train_images.shape[1:]) with g.as_default(): X = tf.placeholder(tf.float32, [None, flatten_shape], name='X') y = tf.placeholder(tf.float32, [None, 10], name='y') utils.show_graph(g) with g.as_default(): # define the model l1 = dense(X, 32, 'h1', activation=tf.nn.sigmoid) l2 = dense(l1, 64, 'h2', activation=tf.nn.relu) logits = dense(l1, 10, 'out', activation=None) # define the loss function loss_op = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)) # define the optimizer optmizer = tf.train.RMSPropOptimizer(learning_rate=0.01) # train operation
predictions, acts = get_slim_arch_bn(self.input_l, phase, self.dataset.shape_target[0]) # Configure values for visualization self.last_conv = acts['vgg_16/conv5/conv5_3'] self.softmax_weights = r"vgg_16/softmax_logits/weights:0" self.pred = tf.nn.softmax(predictions, name='prediction') self.loss = tf.losses.softmax_cross_entropy(self.targets, predictions) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): self.train_step = tf.train.AdamOptimizer( learning_rate=self.lr).minimize(self.loss) # get accuracy prediction = tf.argmax(predictions, 1) equality = tf.equal(prediction, tf.argmax(self.targets, 1)) self.accuracy = tf.reduce_mean(tf.cast(equality, tf.float32)) if __name__ == '__main__': dataset = Imagenet_Dataset(2, 40) with vgg_16_batchnorm(dataset, debug=False) as model: show_graph(tf.get_default_graph()) save_graph_txt(tf.get_default_graph()) graph = tf.get_default_graph() writer = tf.summary.FileWriter(logdir='logdir', graph=graph) writer.flush()