示例#1
0
def main():
    dir_name, file_name = os.path.split(args.restore_path)

    graph = tf.Graph()
    with graph.as_default():
        net = FlowNetS(mode=Mode.TEST)
        images_placeholder, _ = net.placeholders()
        image_a, image_b = tf.split(images_placeholder,
                                    num_or_size_splits=2,
                                    axis=3)
        inputs = {
            'input_a': image_a,
            'input_b': image_b,
        }
        predict_flow = net.model(inputs, LONG_SCHEDULE, trainable=False)
        labels = tf.identity(predict_flow["flow"], name="output")
        init_op = tf.global_variables_initializer()
        saver = tf.train.Saver(max_to_keep=50)

    session_config = tf.ConfigProto()
    sess = tf.Session(graph=graph, config=session_config)
    sess.run(init_op)

    saver.restore(sess, args.restore_path)
    FileWriter("__tb", sess.graph)
    graph_def = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(add_shapes=True), ['output'])
    tf.train.write_graph(graph, dir_name, 'flownet_s.pb', as_text=False)
    def __init__(self, log_dir: str):
        super(ValueFunction, self).__init__()

        # Write logs
        log_dir = log_dir + type(self).__name__ + '/'
        if not isdir(log_dir):
            makedirs(log_dir)
        self.writer = FileWriter(log_dir)
示例#3
0
    def set_callbacks(self,
                      checkpoints=True,
                      tensorboard=True,
                      auto_stopping=False):
        """ Set any model callbacks here """

        self.callbacks = list()

        if checkpoints:
            if not os.path.exists('checkpoints'):
                os.mkdir('checkpoints')

            checkpoint = ModelCheckpoint(filepath=os.path.join(
                'checkpoints', self.filename()),
                                         monitor='val_accuracy',
                                         verbose=1,
                                         save_best_only=True,
                                         mode='max')
            self.callbacks.append(checkpoint)

        if tensorboard:
            log_dir = os.path.join(self.model_log_dir, self.filename()[:-3])
            self.file_writer = FileWriter(os.path.join(log_dir, 'metrics'))
            self.file_writer.set_as_default()
            tensorboard_callback = TensorBoard(
                log_dir=log_dir,
                write_graph=True,
                write_images=True,
                histogram_freq=0,
                profile_batch=0,
            )
            self.callbacks.append(tensorboard_callback)

        lr_schedule = None
        config = self.lr_schedule_config
        if config:
            if config.get('lr_schedule') == 'polynomial':
                lr_schedule = PolynomialDecay(maxEpochs=self.epochs,
                                              initAlpha=self.lr,
                                              power=config.get('lr_power'))
            elif config.get('lr_schedule') == 'linear':
                lr_schedule = PolynomialDecay(maxEpochs=self.epochs,
                                              initAlpha=self.lr,
                                              power=1)

        if lr_schedule:
            lr_callback = LearningRateScheduler(lr_schedule)
            self.callbacks.append(lr_callback)

        if auto_stopping:
            es_callback = EarlyStopping(monitor='val_accuracy',
                                        mode='max',
                                        patience=10)
            self.callbacks.append(es_callback)
示例#4
0
 def __init__(self, dir):
     os.makedirs(dir, exist_ok=True)
     self.dir = dir
     self.step = 1
     prefix = 'events'
     path = osp.join(osp.abspath(dir), prefix)
     import tensorflow as tf
     from tensorflow.python import pywrap_tensorflow
     from tensorflow.core.util import event_pb2
     from tensorflow.python.util import compat
     from tensorflow.summary import FileWriter
     self.tf = tf
     self.event_pb2 = event_pb2
     self.pywrap_tensorflow = pywrap_tensorflow
     self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
     self.graph_summary_writer = FileWriter(
         osp.join(osp.abspath(dir), 'tb_graph'))
示例#5
0
def convert_ckpt2pb():
    # with tf.Session() as sess:
    sess = tf.Session()
    saver = tf.train.import_meta_graph(DIR+sub+".meta")
    FileWriter("__tb", sess.graph)
    aver.restore(sess, tf.train.latest_checkpoint(DIR))
    val_names = [v.name for v in tf.global_variables()]
    # Save the graph for tensorboard
    g = tf.get_default_graph()
    ops = g.get_operations()
    ops_ = [op.name for op in g.get_operations()]
    graph_def = tf.get_default_graph().as_graph_def()
    possible_io_nodes = [n.name + '=>' +  n.op for n in graph_def.node if n.op in ( 'Softmax','Placeholder')]
    output_nodes = []
    output_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, output_nodes)
    with tf.gfile.GFile("./test.pb", "wb") as fid:
        serialized_graph = graph_def.SerializeToString()
        fid.write(serialized_graph)
示例#6
0
    def __init__(self, output_dir):
        if cfg.TRAIN.FLAG:
            self.model_dir = os.path.join(output_dir, 'Model')
            self.image_dir = os.path.join(output_dir, 'Image')
            self.log_dir = os.path.join(output_dir, 'Log')
            mkdir_p(self.model_dir)
            mkdir_p(self.image_dir)
            mkdir_p(self.log_dir)
            self.summary_writer = FileWriter(self.log_dir)

        self.max_epoch = cfg.TRAIN.MAX_EPOCH
        self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL

        s_gpus = cfg.GPU_ID.split(',')
        self.gpus = [int(ix) for ix in s_gpus]
        self.num_gpus = len(self.gpus)
        self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
        torch.cuda.set_device(self.gpus[0])
        cudnn.benchmark = True
    def run(self):
        # Remove http messages
        dummy = logging.getLogger('werkzeug').setLevel(logging.ERROR)

        # address the tensorboard "unable to get first event timestamp for run" bug
        events_folders = [ root
                           for root, dirs, files in os.walk(self.dir_path)
                           for name in files
                           if events_filename_pattern.match(name) ]
        #print(str(events_folders))
        for events_folder in events_folders :
            writer = FileWriter( events_folder )
            writer.close()

        # Start tensorboard server
        self.tb = program.TensorBoard(
            default.get_plugins(), program.get_default_assets_zip_provider())
        self.tb.configure(argv=[None, '--logdir', self.dir_path])
        url = self.tb.launch()
        sys.stdout.write('TensorBoard %s at %s [ %s ]\n' %
                         (version.VERSION, url
                          , "http://localhost:" + re.search(host_port_pattern_str, url).group('port')
                         ))
示例#8
0
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.summary import FileWriter

sess = tf.Session()
tf.train.import_meta_graph("./model/lr.ckpt.meta")
FileWriter("logs/1", sess.graph)
sess.close()
示例#9
0
def main():

    # Hyper parameters
    epochs = 10
    batch_size = 128
    keep_probability = 0.7
    learning_rate = 0.001

    # Remove previous weights, bias, inputs, etc..
    tf.reset_default_graph()

    # Inputs
    x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name='input_x')
    y = tf.placeholder(tf.float32, shape=(None, 10), name='output_y')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    # Build model
    logits = conv_net(x, keep_prob)
    model = tf.identity(
        logits, name='logits'
    )  # Name logits Tensor, so that can be loaded from disk after training

    # Loss and Optimizer
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cost)

    # Accuracy
    correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32),
                              name='accuracy')

    # Training Phase
    # save_model_path = './image_classification'
    save_model_path = 'saved_model/image_classification'

    print('Training...')
    with tf.Session() as sess:
        # Initializing the variables
        sess.run(tf.global_variables_initializer())

        # # Training cycle
        # for epoch in range(epochs):
        #     # Loop over all batches
        #     n_batches = 5
        #     for batch_i in range(1, n_batches + 1):
        #         for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
        #             train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
        #
        #         print('Epoch {:>2}, CIFAR-10 Batch {}:  '.format(epoch + 1, batch_i), end='')
        #         print_stats(sess, batch_features, batch_labels, cost, accuracy)

        # Save Model
        saver = tf.train.Saver()
        save_path = saver.save(sess, save_model_path)
        ############################################################################################
        # saver = tf.train.import_meta_graph('/home/mayank_sati/codebase/python/camera/tensorflow/CIFAR10-img-classification-tensorflow/saved_model/image_classification.meta')
        # saver.restore(sess, tf.train.latest_checkpoint('./'))
        # print("finised loading")
        # ################################################33
        # frozen_graph = freeze_session(sess)
        frozen_graph = freeze_session(sess, output_names=['output_y'])
        # frozen_graph = freeze_session(K.get_session())

        tf.train.write_graph(frozen_graph,
                             "model",
                             "tf_model_ti.pb",
                             as_text=False)
        FileWriter("__tb", sess.graph)
        # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
        # frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output_y"])
        # # Save the frozen graph
        # with open('output_graph.pb', 'wb') as f:
        #     f.write(frozen_graph_def.SerializeToString())
        ##########################################################################3
        # Print all operators in the graph
        for op in sess.graph.get_operations():
            print(op)
        # Print all tensors produced by each operator in the graph
        for op in sess.graph.get_operations():
            print(op.values())
        tensor_names = [[v.name for v in op.values()]
                        for op in sess.graph.get_operations()]
        tensor_names = np.squeeze(tensor_names)
        print(tensor_names)
示例#10
0
文件: train.py 项目: naruarjun/DDPG
def train(config):
    env = gym.make("Go2Goal-v0")
    is_u_discrete = len(env.action_space.shape) == 0
    tf_session = tf.Session()
    ddpg_agent = DDPG(tf_session, config)
    tf_session.run(tf.global_variables_initializer())

    print(config.keys())
    saver = tf.train.Saver()
    summarizer = FileWriter("__tensorboard/her2", tf_session.graph)
    s_summary = tf.Summary()
    log_str = "| [{}] Episode: {:4} | Reward: {:7.3f} | Q: {:8.3f} | T: {:3d} | MIND: {:4.3f} |"

    summary_op = tf.summary.merge_all()

    # for testing purposes!!!
    current_best_eval_score = 0
    for episode in range(config["n_episodes"]):
        episodic_r = 0.
        episodic_q = 0.
        obs = env.reset()
        episode_batch = []
        min_d2goal = env.distance_from_goal()
        for i in range(env._max_episode_steps):
            # print(obs)
            action, u, q = ddpg_agent.step(np.hstack([obs["observation"],
                                           obs["desired_goal"]]),
                                           is_u_discrete)
            episodic_q += q
            action = scale_action(action)
            new_obs, r, done, info = env.step(action)
            ogag = [obs[k] for k in ["observation", "desired_goal", "achieved_goal"]]
            episode_batch.append([*ogag, u, r, new_obs["observation"],
                                  new_obs["desired_goal"], int(done)])
            if (info["dist"] < min_d2goal).all():
                min_d2goal = info["dist"]
            obs = new_obs
            if "render" in config.keys() and config["render"]:
                env.render()
            episodic_r += r
            for epoch in range(5):
                ddpg_agent.train()
            if done:
                break
            s_summary.value.add(tag="run/l_velocity", simple_value=(action)[0])
            s_summary.value.add(tag="run/a_velocity", simple_value=(action)[1])
            s_summary.value.add(tag="run/meanQ",
                                simple_value=float(episodic_q/(i+1)))
            summarizer.add_summary(s_summary, episode*env._max_episode_steps+i)
        # n_batch = reward_normalizer.discount(episode_batch)
        for experience in episode_batch:
            ddpg_agent.remember(experience)
        print(log_str.format("T", episode+1, episodic_r,
                             float(episodic_q), i+1, np.linalg.norm(min_d2goal)))
        summarizer.add_summary(tf_session.run(summary_op), episode)
        summarizer.flush()
        # To run or not to run evaluations on current target policy...
        if (episode+1) % 20 != 0:
            continue
        m_eval_score = 0.
        m_eval_q = 0.
        print()
        for eval_run in range(5):
            eval_score = 0.
            eval_q = 0.
            obs = env.reset()
            for j in range(env._max_episode_steps):
                u, _, q = ddpg_agent.step(np.hstack([obs["observation"], obs["desired_goal"]]),
                                          is_u_discrete, explore=False)
                obs, r, done, _ = env.step(u)
                eval_score += r
                eval_q += q
                if done:
                    break
            m_eval_q += eval_q
            m_eval_score += eval_score
            print(log_str.format("E", eval_run+1, m_eval_score,
                                 float(m_eval_q), j+1, -1))
        print()
        # save the model checkpoints if they are the current best...
        if m_eval_score > current_best_eval_score:
            print("New best policy found with eval score of: ", m_eval_score)
            print("old best policy's eval score: ", current_best_eval_score)
            current_best_eval_score = m_eval_score
            saver.save(tf_session, "__checkpoints/nb_policy", episode)
def main():
	sess = tf.InteractiveSession()

	saver = tf.train.import_meta_graph(PATH+'.meta')
	saver.restore(sess, PATH)

	#PRED = sess.run('output/BiasAdd:0',feed_dict={'inputs:0':X})#,'hidden_state:0':np.zeros((3,X.shape[0],64))})

	# RNN, GRU OK
	#HS = np.zeros((3,X.shape[0],64))
	# LSTM 
	HS = np.zeros((3,2,X.shape[0],64))
	# MLP, CNN
	# No HS, input --> inputs, pred [:,-1,:] --> [:,:]


	start = datetime.datetime.now()
	for i in range(inference_Loop):
	    PRED = sess.run('output/BiasAdd:0',feed_dict={'input:0':X,'hidden_state:0':HS})
	end = datetime.datetime.now()

	elapsed_time = (end-start).total_seconds()
	print(PRED.shape)
	#RMSE = np.sqrt(np.mean((PRED[:,-1,:] - Y[:,-1,:])**2))
	#STD = np.std((PRED[:,-1,:] - Y[:,-1,:])**2)
	#RMSE = np.sqrt(np.mean((PRED[:,:] - Y[:,-1,:])**2))
	#STD = np.std((PRED[:,:] - Y[:,-1,:])**2)


	FileWriter("__tb", sess.graph)
	#--------------------------------------------------#
	#Get output nodes  Names
	#--------------------------------------------------#
	graph = sess.graph
	#print([node.name for node in graph.as_graph_def().node])
	output_node_names=[node.name for node in graph.as_graph_def().node]

	#----------------------------------------------------------------#
	#Make a frozen model(.pb) of the TF  model in order to convert it into UFF#
	#----------------------------------------------------------------#
	# We use a built-in TF helper to export variables to constants
	output_graph_def = tf.graph_util.convert_variables_to_constants(
		   sess, # The session is used to retrieve the weights
		   tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes 
		   output_node_names # The output node names are used to select the usefull nodes
		) 

	input_checkpoint=PATH
	# We precise the file fullname of our freezed graph
	absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
	output_graph = absolute_model_dir + "/frozen_model.pb"

	# Finally we serialize and dump the output graph to the filesystem
	with tf.gfile.GFile(output_graph, "wb") as f:
	    f.write(output_graph_def.SerializeToString())
	#print("%d ops in the final graph." % len(output_graph_def.node))

	#----------------------------------#
	#Conversion TF graph def as UFF #
	#---------------------------------#
	uff_model = uff.from_tensorflow_frozen_model(ROOT+'/frozen_model.pb',['output/BiasAdd'],output_filename = ModelData.MODEL_FILE)


	#----------------------------------#
	#Build the engine and run inference#
	#---------------------------------#
	model_file=ModelData.MODEL_FILE
	builder=build_engine(model_file)
	
	with builder as engine:
		
		# Build an engine, allocate buffers and create a stream.
		inputs, outputs, bindings, stream = common.allocate_buffers(engine)
		with engine.create_execution_context() as context:
		    #case_num = load_normalized_test_case(data_paths, pagelocked_buffer=inputs[0].host)
		    #case_num = load_normalized_test_case(data_paths, pagelocked_buffer=inputs[0].host)
		    l=[0]*MAX_BATCH_SIZE*X.shape[1]*X.shape[2]
		    for k in range(X.shape[0]):
		       for i in range(X.shape[1]):
		          for j in range(X.shape[2]):
		             l[k*X.shape[1]*X.shape[2] + j*X.shape[1] + i]=X[k][i][j]
		             #l[k]=X[0][0][0]
		    np.copyto(inputs[0].host,l)

		    # The common.do_inference function will return a list of outputs - we only have one in this case.
		    [output] =do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
		    pred = output#np.argmax(output)
		    print(output.shape)
		    #print("Test Case: " + str(case_num))
		    print("Engine Input shape : ",len(inputs[0].host))
		    print()
		    print("Engine Input : ",inputs)
		    print("Prediction: " + str(pred))



	print("input shape: ",X.shape[:])
	print("input : ",X)


	print("output shape: ",output.shape)
	print("Elapsed time without TensorRT: ",elapsed_time)
示例#12
0
    def train(self):

        self.t_vars = tf.trainable_variables()
        self.d_vars = [var for var in self.t_vars if 'D' in var.name]
        self.g_vars = [var for var in self.t_vars if 'G' in var.name]
        self.e_vars = [var for var in self.t_vars if 'encode' in var.name]
        assert len(self.t_vars) == len(self.d_vars + self.g_vars + self.e_vars)

        self.saver = tf.train.Saver()
        self.p_saver = tf.train.Saver(self.e_vars)

        opti_D = tf.train.AdamOptimizer(self.opt.lr_d * self.lr_decay, beta1=self.opt.beta1, beta2=self.opt.beta2).\
                                        minimize(loss=self.D_loss, var_list=self.d_vars)
        opti_G = tf.train.AdamOptimizer(self.opt.lr_g * self.lr_decay, beta1=self.opt.beta1, beta2=self.opt.beta2).\
                                        minimize(loss=self.G_loss, var_list=self.g_vars)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            sess.run(init)
            start_step = 0
            ckpt = tf.train.get_checkpoint_state(self.opt.checkpoints_dir)
            if ckpt and ckpt.model_checkpoint_path:
                start_step = int(
                    ckpt.model_checkpoint_path.split('model_',
                                                     2)[1].split('.', 2)[0])
                self.saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                print("")
                # try:
                #     #self.p_saver.restore(sess, os.path.join(self.opt.pretrain_path,
                #     #                                           'model_{:06d}.ckpt'.format(100000)))
                # except:
                #     print(" Self-Guided Model path may not be correct")

            #summary_op = tf.summary.merge_all()
            #summary_writer = tf.summary.FileWriter(self.opt.log_dir, sess.graph)
            step = start_step
            lr_decay = 1

            print("Start read dataset")

            image_path, train_images, train_eye_pos, test_images, test_eye_pos = self.dataset.input(
            )
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            print("Start entering the looping")
            real_test_batch, real_test_pos = sess.run(
                [test_images, test_eye_pos])

            while step <= self.opt.niter:

                if step > 20000 and step % 2000 == 0:
                    lr_decay = (self.opt.niter - step) / float(self.opt.niter -
                                                               20000)

                real_batch_image_path, x_data, x_p_data = sess.run(
                    [image_path, train_images, train_eye_pos])
                xm_data, x_left_p_data, x_right_p_data = self.get_Mask_and_pos(
                    x_p_data)

                f_d = {
                    self.x: x_data,
                    self.xm: xm_data,
                    self.x_left_p: x_left_p_data,
                    self.x_right_p: x_right_p_data,
                    self.lr_decay: lr_decay
                }

                # optimize D
                sess.run(opti_D, feed_dict=f_d)
                # optimize G
                sess.run(opti_G, feed_dict=f_d)
                #summary_str = sess.run(summary_op, feed_dict=f_d)
                #summary_writer.add_summary(summary_str, step)
                if step % 500 == 0:

                    if self.opt.is_ss:
                        output_loss = sess.run([
                            self.D_loss, self.G_loss, self.opt.lam_r *
                            self.recon_loss, self.opt.lam_p * self.percep_loss,
                            self.r_cls_loss, self.f_cls_loss
                        ],
                                               feed_dict=f_d)
                        print(
                            "step %d D_loss=%.8f, G_loss=%.4f, Recon_loss=%.4f, Percep_loss=%.4f, "
                            "Real_class_loss=%.4f, Fake_class_loss=%.4f, lr_decay=%.4f"
                            % (step, output_loss[0], output_loss[1],
                               output_loss[2], output_loss[3], output_loss[4],
                               output_loss[5], lr_decay))
                    else:
                        output_loss = sess.run([
                            self.D_loss, self.G_loss, self.opt.lam_r *
                            self.recon_loss, self.opt.lam_p * self.percep_loss
                        ],
                                               feed_dict=f_d)
                        print(
                            "step %d D_loss=%.8f, G_loss=%.4f, Recon_loss=%.4f, Percep_loss=%.4f, lr_decay=%.4f"
                            % (step, output_loss[0], output_loss[1],
                               output_loss[2], output_loss[3], lr_decay))

                if np.mod(step, 2000) == 0:

                    train_output_img = sess.run([
                        self.xl_left, self.xl_right, self.xc, self.yo, self.y,
                        self.yl_left, self.yl_right
                    ],
                                                feed_dict=f_d)

                    batch_masks, batch_left_eye_pos, batch_right_eye_pos = self.get_Mask_and_pos(
                        real_test_pos)
                    #for test
                    f_d = {
                        self.x: real_test_batch,
                        self.xm: batch_masks,
                        self.x_left_p: batch_left_eye_pos,
                        self.x_right_p: batch_right_eye_pos,
                        self.lr_decay: lr_decay
                    }

                    test_output_img = sess.run([self.xc, self.yo, self.y],
                                               feed_dict=f_d)
                    output_concat = self.Transpose(
                        np.array([
                            x_data, train_output_img[2], train_output_img[3],
                            train_output_img[4]
                        ]))
                    local_output_concat = self.Transpose(
                        np.array([
                            train_output_img[0], train_output_img[1],
                            train_output_img[5], train_output_img[6]
                        ]))
                    test_output_concat = self.Transpose(
                        np.array([
                            real_test_batch, test_output_img[0],
                            test_output_img[2], test_output_img[1]
                        ]))
                    save_images(
                        local_output_concat,
                        '{}/{:02d}_local_output.jpg'.format(
                            self.opt.sample_dir, step))
                    save_images(
                        output_concat,
                        '{}/{:02d}_output.jpg'.format(self.opt.sample_dir,
                                                      step))
                    save_images(
                        test_output_concat, '{}/{:02d}_test_output.jpg'.format(
                            self.opt.sample_dir, step))

                if np.mod(step, 20000) == 0:
                    self.saver.save(
                        sess,
                        os.path.join(self.opt.checkpoints_dir,
                                     'model_{:06d}.ckpt'.format(step)))

                step += 1

            save_path = self.saver.save(
                sess,
                os.path.join(self.opt.checkpoints_dir,
                             'model_{:06d}.ckpt'.format(step)))

            ############################
            #CREATING A TENSORBOARD BASED FILE FOR VISUALIZATION.
            ############################
            from tensorflow.summary import FileWriter

            # tf.train.import_meta_graph("checkpoints/model_100001.ckpt.meta")

            FileWriter("__tb", sess.graph)
            print("\n Graph File written\n")
            #####################################
            #Saving pb files: //Anant
            #####################################
            from tensorflow.python.tools import freeze_graph
            #####################################
            print("\n About to Freeze the graph\n")
            filename = "saved_model"
            directory = "log3_25_1"
            pbtxt_filename = filename + '.pbtxt'
            pbtxt_filepath = os.path.join(directory, pbtxt_filename)
            pb_filepath = os.path.join(directory, filename + '.pb')
            # This will only save the graph but the variables will not be saved.
            # You have to freeze your model first.
            tf.train.write_graph(graph_or_graph_def=sess.graph_def,
                                 logdir=directory,
                                 name=pbtxt_filename,
                                 as_text=True)

            # Freeze graph
            # Method 1
            # freeze_graph.freeze_graph(input_graph=pbtxt_filepath, input_saver='', input_binary=False, input_checkpoint=save_path, output_node_names='y', restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph=pb_filepath, clear_devices=True, initializer_nodes='')
            print("\n Graph frozen\n")
            #summary_writer.close()

            coord.request_stop()
            coord.join(threads)

            print("Model saved in file: %s \n" % save_path)
            print("Saved_model saved in File: %s" % pb_filepath)
示例#13
0
def get_node_from_ckpt():
    sess = tf.Session()
    tf.train.import_meta_graph(
        "./tusimple_lanenet_vgg/tusimple_lanenet_vgg.ckpt.meta")
    FileWriter("__tb", sess.graph)
示例#14
0
def totb(g: TF_Graph):
    """ Export to TensorBoard """
    writer = FileWriter(get_log_dir("freezepb"))
    writer.add_graph(g)
示例#15
0
def checkNode_2(checkpoint_path):
    sess = tf.Session()
    tf.train.import_meta_graph(checkpoint_path + '.meta')
    FileWriter("__tb", sess.graph)
    print("Success!")
示例#16
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.step = 1
     self.writer = FileWriter(self.log_dir)
示例#17
0
    def test(self,
             freeze_model,
             num_custom_images,
             flag_save_images=True,
             custom_dataset=True):

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.saver = tf.train.Saver()

        with tf.Session(config=config) as sess:
            sess.run(init)
            ckpt = tf.train.get_checkpoint_state(self.opt.checkpoints_dir)
            print('Load checkpoint')
            if ckpt and ckpt.model_checkpoint_path:
                self.saver.restore(sess, ckpt.model_checkpoint_path)
                print('Load Succeed!')
            else:
                print('Do not exists any checkpoint,Load Failed!')
                exit()

            if custom_dataset == True:
                batch_num = num_custom_images
                testbatch, testmask = self.dataset.custom_test_input()
            else:
                batch_num = 3451 / self.opt.batch_size  #Have made batch size = 1
                _, _, _, testbatch, testmask = self.dataset.input()
            #_,_,_, testbatch, testmask = self.dataset.input()
            #testbatch, testmask = self.dataset.custom_test_input()
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            #################################
            # STARTING TIMING
            ##################################
            start_time = time.time()

            for j in range(int(batch_num)):
                real_test_batch, real_eye_pos = sess.run([testbatch, testmask])
                batch_masks, batch_left_eye_pos, batch_right_eye_pos = self.get_Mask_and_pos(
                    real_eye_pos)
                f_d = {
                    self.x: real_test_batch,
                    self.xm: batch_masks,
                    self.x_left_p: batch_left_eye_pos,
                    self.x_right_p: batch_right_eye_pos
                }
                ############################
                # Saving the above 4 inputs consisting of Arrays to exportable files. Simply modify following things to load them:
                # 'wb' -> 'rb'
                # np.save -> var = np.load(file1)
                # - ANANT
                #############################
                # with open('/disk/projectEyes/GazeCorrection/log3_25_1/array_vars/placeholder_1', 'wb') as file1:
                #     np.save(file1, batch_right_eye_pos)
                # with open('/disk/projectEyes/GazeCorrection/log3_25_1/array_vars/placeholder', 'wb') as file1:
                #     np.save(file1, batch_left_eye_pos)
                # with open('/disk/projectEyes/GazeCorrection/log3_25_1/array_vars/placeholder_3', 'wb') as file1:
                #     np.save(file1, batch_masks)
                # with open('/disk/projectEyes/GazeCorrection/log3_25_1/array_vars/placeholder_2', 'wb') as file1:
                #     np.save(file1, real_test_batch)

                # #Loading back the variables from files.
                # with open('/disk/projectEyes/GazeCorrection/log3_25_1/array_vars/placeholder_2', 'rb') as file1:
                #     arr_plh1 = np.load(file1)

                output = sess.run([self.x, self.y], feed_dict=f_d)
                if flag_save_images == True:
                    #if j % 100 == 0 : #Considering the batch_num is 0
                    output_concat = self.Transpose(
                        np.array([output[0], output[1]]))
                    #save_images(output_concat, '{}/{:02d}.jpg'.format(self.opt.test_sample_dir, j))

                    ######################
                    # IF ONLY RESULTANT IMAGE NEEDS TO BE SAVED W/O CONCATINATION:
                    # -ANANT
                    ######################
                    # output_image = np.reshape(output[1], [256, 256, 3])
                    # save_images(output_image, '{}/out{}.jpg'.format("/disk/projectEyes/GazeCorrection/log3_25_1/test_sample_dir", j))

                    ######################
                    # IF CONCAT OF INPUT + OUTPUT NEEDS TO BE SAVED:
                    # - ANANT
                    ######################
                    save_images(
                        output_concat, '{}/{:02d}.jpg'.format(
                            "/disk/projectEyes/GazeCorrection/log3_25_1/test_sample_dir",
                            j))

            #################################
            # ENDING TIMING
            ##################################
            print(
                "\n \n INNER Time elapsed in GazeGan inference using TF of 3451 images = ",
                time.time() - start_time)

            if freeze_model == True:
                ############################
                #CREATING A TENSORBOARD BASED FILE FOR VISUALIZATION.
                ############################
                from tensorflow.summary import FileWriter

                # tf.train.import_meta_graph("checkpoints/model_100001.ckpt.meta")

                FileWriter("__tb_test", sess.graph)
                print("\n Graph File written\n")
                #####################################
                #Saving pb files: //Anant
                #####################################
                from tensorflow.python.tools import freeze_graph
                #####################################
                print("\n About to Freeze the graph\n")
                filename = "saved_model_test"
                directory = "log3_25_1"
                pbtxt_filename = filename + '.pbtxt'
                pbtxt_filepath = os.path.join(directory, pbtxt_filename)
                pb_filepath = os.path.join(directory, filename + '.pb')
                tf.train.write_graph(graph_or_graph_def=sess.graph_def,
                                     logdir=directory,
                                     name=pbtxt_filename,
                                     as_text=True)

                #freeze_graph.freeze_graph(input_graph=pbtxt_filepath, input_saver='', input_binary=False, input_checkpoint=tf.train.latest_checkpoint(self.opt.checkpoints_dir), output_node_names='add', restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph=pb_filepath, clear_devices=True, initializer_nodes='')

                from tensorflow.python.framework import graph_io
                frozen = tf.graph_util.convert_variables_to_constants(
                    sess, sess.graph_def, ["add"])
                graph_io.write_graph(frozen,
                                     './log3_25_1/',
                                     'inference_graph_3_batch1.pb',
                                     as_text=False)
            coord.request_stop()
            coord.join(threads)