def __init__(self, args):
        self.opts = json.load(open(args.exp, 'r'))
        self.output_dir = args.output_dir
        if self.output_dir is None:
            self.output_dir = os.path.join(self.opts['exp_dir'], 'preds')
        print '==> Clean output folder'
        if os.path.exists(self.output_dir): shutil.rmtree(self.output_dir)
        utils.create_folder(self.output_dir)
        self.opts = override_options(self.opts)
        self.val_loader = get_data_loaders(self.opts['dataset'], data_provider.DataProvider)
        self.spline = ActiveSplineTorch(self.opts['cp_num'], self.opts[u'p_num'], device=device)

        # self.model = GNN_model.Model(state_dim=self.opts['state_dim'],
        #                               n_adj=self.opts['n_adj'],
        #                               cnn_feature_grids=self.opts['cnn_feature_grids'],
        #                               coarse_to_fine_steps=self.opts['coarse_to_fine_steps'],
        #                               get_point_annotation=self.opts['get_point_annotation']
        #                               ).to(device)
        self.model = Interactive_gnn_model.interactiveGNN(state_dim=self.opts['state_dim'],
                                                         n_adj=self.opts['n_adj'],
                                                         cnn_feature_grids=self.opts['cnn_feature_grids'],
                                                         coarse_to_fine_steps=self.opts['coarse_to_fine_steps'],
                                                         get_point_annotation=self.opts['get_point_annotation'],
                                                         ).to(device)

        print '==> Reloading Models'
        self.model.reload(args.reload, strict=False)
Ejemplo n.º 2
0
    def __init__(self, args):
        self.global_step = 0
        self.epoch = 0
        self.opts = json.load(open(args.exp, 'r'))
        utils.create_folder(os.path.join(self.opts['exp_dir'], 'checkpoints'))

        # Copy experiment file
        os.system('cp %s %s' % (args.exp, self.opts['exp_dir']))
        # result of experiment
        self.writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train'))
        self.val_writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train_val'))

        self.train_loader, self.val_loader = get_data_loaders(
            self.opts['dataset'], cityscapes.DataProvider)
        self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
        self.grid_size = self.model.encoder.feat_size

        if 'encoder_reload' in self.opts.keys():
            self.model.encoder.reload(self.opts['encoder_reload'])

        # OPTIMIZER
        no_wd = []
        wd = []
        print 'Weight Decay applied to: '

        for name, p in self.model.named_parameters():
            if not p.requires_grad:
                # No optimization for frozen params
                continue

            if 'bn' in name or 'conv_lstm' in name or 'bias' in name:
                no_wd.append(p)
            else:
                wd.append(p)
                print name,

        # Allow individual options
        self.optimizer = optim.Adam([{
            'params': no_wd,
            'weight_decay': 0.0
        }, {
            'params': wd
        }],
                                    lr=self.opts['lr'],
                                    weight_decay=self.opts['weight_decay'],
                                    amsgrad=False)
        # TODO: Test how amsgrad works (On the convergence of Adam and Beyond)

        self.lr_decay = optim.lr_scheduler.StepLR(
            self.optimizer, step_size=self.opts['lr_decay'], gamma=0.1)

        if args.resume is not None:
            self.resume(args.resume)
Ejemplo n.º 3
0
    def __init__(self, args):
        self.opts = json.load(open(args.exp, 'r'))
        self.output_dir = args.output_dir
        self.fp_beam_size = args.fp_beam_size
        self.lstm_beam_size = args.lstm_beam_size
        if self.output_dir is None:
            self.output_dir = os.path.join(self.opts['exp_dir'], 'preds')

        utils.create_folder(self.output_dir)
        self.opts = override_options(self.opts)
        self.val_loader = get_data_loaders(self.opts['dataset'],
                                           cityscapes.DataProvider)
        self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
        self.model.reload(args.reload, strict=False)

        if self.opts['use_ggnn'] == True:
            self.grid_size = self.model.ggnn.ggnn_grid_size
        else:
            self.grid_size = self.model.encoder.feat_size
Ejemplo n.º 4
0
    def __init__(self, args):
        self.global_step = 0
        self.epoch = 0
        self.opts = json.load(open(args.exp, 'r'))
        utils.create_folder(os.path.join(self.opts['exp_dir'], 'checkpoints'))

        # Copy experiment file
        os.system('cp %s %s' % (args.exp, self.opts['exp_dir']))

        self.writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train'))
        self.val_writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train_val'))

        self.train_loader, self.val_loader = get_data_loaders(
            self.opts['dataset'], cityscapes.DataProvider)
        self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
        self.grid_size = self.model.encoder.feat_size

        if 'xe_initializer' in self.opts.keys():
            self.model.reload(self.opts['xe_initializer'])

        elif 'encoder_reload' in self.opts.keys():
            self.model.encoder.reload(self.opts['encoder_reload'])

        self.model.encoder.eval()
        print 'Setting encoder to eval'

        print 'No weight decay in RL training'

        train_params = [p for p in self.model.parameters() if p.requires_grad]

        self.optimizer = optim.Adam(train_params,
                                    lr=self.opts['lr'],
                                    amsgrad=False)
        # TODO: Test how amsgrad works (On the convergence of Adam and Beyond)

        self.lr_decay = optim.lr_scheduler.StepLR(
            self.optimizer, step_size=self.opts['lr_decay'], gamma=0.1)

        if args.resume is not None:
            self.resume(args.resume)
Ejemplo n.º 5
0
    def run_inference_on_images(self, topology):
        new_input_path = utils.create_folder('Dataset/Images2/new_input/')
        label_path = utils.create_folder('Dataset/Images2/new_label/')
        if not utils.is_model_stored(topology):
            print("No model stored to be restored.")
            return
        print('Running inference on', topology)
        tf.reset_default_graph()
        topology_path = 'Models/{}/'.format(topology)
        saver = tf.train.import_meta_graph(topology_path + 'model.meta')
        g = tf.get_default_graph()
        x = g.get_tensor_by_name("input_images:0")
        y = g.get_tensor_by_name("label_images:0")
        keep_prob = g.get_tensor_by_name("keep_prob:0")
        output = g.get_tensor_by_name("superpixels:0")

        with tf.Session() as sess:
            saver.restore(sess, topology_path + 'model')
            for img in os.scandir('Dataset/Images/input/'):
                input_image = cv2.imread(img.path)
                image = (input_image - 128) / 128
                image = np.array(image, ndmin=4)
                label_image = cv2.imread(img.path.replace('input', 'label'))
                label = (label_image - 128) / 128
                label = np.array(label, ndmin=4)

                result = np.round(
                    sess.run(output, feed_dict={
                        x: image,
                        keep_prob: 1.0
                    }))[0]
                new_input = utils.generate_new_input_using_floor_detection(
                    input_image, result)
                new_label = utils.generate_new_input_using_floor_detection(
                    label_image, result)

                cv2.imwrite(new_input_path + img.name, new_input)
                cv2.imwrite(label_path + img.name, new_label)
                print(img.name, 'completed', end='\r')
            print('\nDone')
Ejemplo n.º 6
0
 def freeze_graph_model(self, session, g=tf.get_default_graph()):
     graph_def_original = g.as_graph_def()
     # freezing model = converting variables to constants
     graph_def_simplified = tf.graph_util.convert_variables_to_constants(
         sess=session,
         input_graph_def=graph_def_original,
         output_node_names=['input_images', 'keep_prob', 'superpixels'])
     #saving frozen graph to disk
     output_folder = utils.create_folder('Models/' + self.name + '/frozen')
     if output_folder is not None:
         model_path = tf.train.write_graph(
             graph_or_graph_def=graph_def_simplified,
             logdir=output_folder,
             name='model.pb',
             as_text=False)
         print("Model saved in file: %s" % model_path)
     else:
         print('Output folder could not be created')
Ejemplo n.º 7
0
    def freeze_graph_model(self, session=None, g=None, topology=None):
        if topology is None:
            if self.name is not None:
                topology = self.name
            else:
                print('no topology was chosen')
                return

        topology_path = os.path.join(MODELS_PATH, topology)
        if not utils.is_model_stored(topology_path):
            print("No model stored to be restored.")
            return
        try:
            tf.reset_default_graph()
        except:
            pass
        if g is None:
            g = tf.get_default_graph()

        if session is None:
            session = tf.Session()
            saver = tf.train.import_meta_graph(
                os.path.join(topology_path, 'model.meta'))
            saver.restore(session, os.path.join(topology_path, 'model'))

        graph_def_original = g.as_graph_def()
        # freezing model = converting variables to constants
        graph_def_simplified = tf.graph_util.convert_variables_to_constants(
            sess=session,
            input_graph_def=graph_def_original,
            output_node_names=['input_images', 'keep_prob', 'cars'])
        #saving frozen graph to disk
        output_folder = utils.create_folder(
            os.path.join(topology_path, 'frozen'))
        if output_folder is not None:
            model_path = tf.train.write_graph(
                graph_or_graph_def=graph_def_simplified,
                logdir=output_folder,
                name='model.pb',
                as_text=False)
            print("Model saved in file: %s" % model_path)
        else:
            print('Output folder could not be created')
Ejemplo n.º 8
0
	def freeze_graph_model(self, session = None, g = None , topology = None):
		if topology is None:
			if self.name is not None:
				topology = self.name
			else:
				print('no topology was chosen')
				return

		if not utils.is_model_stored(topology):
			print("No model stored to be restored.")
			return
		try:
			tf.reset_default_graph()
		except:
			pass
		if g is None:
			g = tf.get_default_graph()

		if session is None:
			session = tf.Session()
			topology_path ='Models/{}/'.format(topology)
			saver = tf.train.import_meta_graph(topology_path+'model.meta')
			saver.restore(session,topology_path + 'model')

		graph_def_original = g.as_graph_def();
		# freezing model = converting variables to constants
		graph_def_simplified = tf.graph_util.convert_variables_to_constants(
				sess = session,
				input_graph_def = graph_def_original,
				output_node_names =['input_images','keep_prob','superpixels'])
		#saving frozen graph to disk
		output_folder = utils.create_folder('Models/'+topology+'/frozen')
		if output_folder is not None:
			model_path = tf.train.write_graph(
					graph_or_graph_def = graph_def_simplified,
					logdir = output_folder,
					name = 'model.pb',
					as_text=False)
			print("Model saved in file: %s" % model_path)
		else:
			print('Output folder could not be created')
    def __init__(self, args):
        self.global_step = 0
        self.epoch = 0

        self.opts = json.load(open(args.exp, 'r'))

        if 'test' in self.opts['exp_dir'] and os.path.exists(
                self.opts['exp_dir']):
            shutil.rmtree(os.path.join(self.opts['exp_dir']))

        utils.create_folder(os.path.join(self.opts['exp_dir'], 'checkpoints'))

        # Copy experiment file
        os.system('cp %s %s' % (args.exp, self.opts['exp_dir']))

        self.writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train'))
        self.val_writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train_val'))

        self.spline = ActiveSplineTorch(self.opts['cp_num'],
                                        self.opts[u'p_num'],
                                        device=device,
                                        alpha=self.opts['spline_alpha'])
        self.train_loader, self.val_loader = get_data_loaders(
            self.opts, data_provider.DataProvider)

        # self.model = poly_gnn.Model(state_dim=self.opts['state_dim'],
        #                               n_adj=self.opts['n_adj'],
        #                               cnn_feature_grids=self.opts['cnn_feature_grids'],
        #                               coarse_to_fine_steps=self.opts['coarse_to_fine_steps'],
        #                               get_point_annotation=self.opts['get_point_annotation'],
        #                               ).to(device)
        self.model = Interactive_gnn_model.interactiveGNN(
            state_dim=self.opts['state_dim'],
            n_adj=self.opts['n_adj'],
            cnn_feature_grids=self.opts['cnn_feature_grids'],
            coarse_to_fine_steps=self.opts['coarse_to_fine_steps'],
            get_point_annotation=self.opts['get_point_annotation'],
        ).to(device)

        if 'xe_initializer' in self.opts.keys():
            self.model.reload(self.opts['xe_initializer'])
        elif 'encoder_reload' in self.opts.keys():

            self.model.autoGCN.encoder.reload(self.opts['encoder_reload'])

        ## when train interactiveGCN, load the autoGCN parameter
        self.model.autoGCN.reload('.../checkpoints/epoch42_step11996.pth')

        # OPTIMIZER
        no_wd = []
        wd = []
        print 'Weight Decay applied to: '

        for name, p in self.model.named_parameters():
            if not p.requires_grad:
                # No optimization for frozen params
                continue

            if 'bn' in name or 'bias' in name:
                no_wd.append(p)
            else:
                wd.append(p)
                print name,

        # Allow individual options
        self.optimizer = optim.Adam([{
            'params': no_wd,
            'weight_decay': 0.0
        }, {
            'params': wd
        }],
                                    lr=self.opts['lr'],
                                    weight_decay=self.opts['weight_decay'],
                                    amsgrad=False)

        self.lr_decay = optim.lr_scheduler.StepLR(
            self.optimizer, step_size=self.opts['lr_decay'], gamma=0.1)

        if args.resume is not None:
            self.resume(args.resume)

        if self.opts['debug']:
            print "********************* we are in debug mode *********************"
Ejemplo n.º 10
0
	def train(self,iterations=100000,learning_rate = 1e-04):
		# reading dataset
		if self.dataset is None:
			self.dataset = DataHandler().build_datasets()
		# loss function
		# MSE = tf.reduce_mean(tf.square(self.y - self.output))
		MSE = tf.reduce_mean(tf.square(self.y - self.output + tf.maximum((self.y - self.output) * 2, 0))) #Added higher weight penalties to the false negatives
		# cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.output), reduction_indices=[1]))
		loss = MSE
		train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
		lossFunc = list()

		completed_iterations = tf.Variable(0, trainable=False, name='completed_iterations')
		# Creating session and initilizing variables
		init = tf.global_variables_initializer()
		lossFunc = list()
		saver = tf.train.Saver()

		with tf.Session() as sess:
			model_stored = utils.is_model_stored(self.name)
			if model_stored:
				print('Restoring Graph')
				saver.restore(sess,'Models/'+self.name+'/model')
			else:
				sess.run(init)

			lstt = tf.trainable_variables()
			acum = 0
			for lt in lstt:
				ta = lt.get_shape()
				lstd = ta.as_list()
				mult = functools.reduce(operator.mul, lstd, 1)
				acum = acum + mult
			print('Number of parameters',acum) # number of trainable parameters

			comp_iters = sess.run(completed_iterations)
			utils.create_folder('Models/'+self.name,clear_if_exists = not (comp_iters >0)) # clear Model folder if training has never taken place
			remaining_iterations = iterations - comp_iters
			print('Remaining Iterations:', remaining_iterations, '- Completed Iterations: ',comp_iters)
			init_time = time.time()
			last_saved_time = time.time()
			with open('Models/'+self.name+'/README.txt','w') as f:
				f.write('Network Topology:\n')
				for k,v in self.layers.items():
					f.write(k + " : " + str(v) + '\n')

				msg = "\nNumber of parameters = {}\nNumber of iterations = {}\nLearning rate = {}\n".format(acum,(comp_iters + remaining_iterations),learning_rate)
				f.write(msg)

			for i in range(remaining_iterations):
				start = time.time()
				batch = self.dataset.training.next_batch(50)
				normBatch = np.array([(img-128)/128 for img in batch[0]])
				labelBatch = [lbl for lbl in batch[1]]

				train_step.run(feed_dict={self.x:normBatch,self.y:labelBatch, self.keep_prob:0.5})
				if i%100==0 or i==remaining_iterations-1:
					MSE = loss.eval(feed_dict={self.x:normBatch, self.y:labelBatch, self.keep_prob:1.0})
					print("iter {}, mean square error {}, step duration -> {:.2f} secs, time since last saved -> {:.2f} secs".format(i, MSE,(time.time()-start),time.time()-last_saved_time))
					update = comp_iters + (i+1)
					print('updating completed iterations:',sess.run(completed_iterations.assign(update)))

					save_path = saver.save(sess,'Models/'+self.name+'/model')
					print("Model saved in file: %s" % save_path)
					batch = self.dataset.validation.next_batch(50)
					normBatch = np.array([(img-128)/128 for img in batch[0]])
					labelBatch = [lbl for lbl in batch[1]]
					results = np.round(sess.run(self.output,feed_dict={self.x:normBatch, self.y: labelBatch, self.keep_prob:1.0}))
					print("Parcial Results")
					acc,prec,rec = utils.calculateMetrics(labelBatch,results)
					print('Accuracy',acc)
					print('Precision',prec)
					print('Recall',rec)
					print("Parcial Results")
					utils.PainterThread(batch[0],results).start()
					last_saved_time = time.time()

			if remaining_iterations > 0 or not os.path.exists('Models/'+self.name+'/frozen/model.pb'):
				self.freeze_graph_model(sess)
			else:
				print('Nothing to be done')
			print('total time -> {:.2f} secs'.format(time.time()-init_time))
		tf.reset_default_graph()
Ejemplo n.º 11
0
    def train(self, iterations=100000, learning_rate=1e-04):
        # loss function
        MSE = tf.reduce_mean(tf.square(self.y - self.output))
        # MSE = tf.reduce_mean(tf.square(self.y - self.output + tf.maximum((self.y - self.output) * 2, 0))) #Added higher weight penalties to the false negatives
        # cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.output), reduction_indices=[1]))
        loss = MSE
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
        lossFunc = list()

        completed_iterations = tf.Variable(0,
                                           trainable=False,
                                           name='completed_iterations')
        # Creating session and initilizing variables
        init = tf.global_variables_initializer()
        lossFunc = list()
        saver = tf.train.Saver()

        with tf.Session() as sess:
            model_stored = utils.is_model_stored(self.name)
            if model_stored:
                print('Restoring Graph')
                saver.restore(sess, 'Models/' + self.name + '/model')
            else:
                sess.run(init)

            lstt = tf.trainable_variables()
            acum = 0
            for lt in lstt:
                ta = lt.get_shape()
                lstd = ta.as_list()
                mult = functools.reduce(operator.mul, lstd, 1)
                acum = acum + mult
            print('Number of parameters',
                  acum)  # number of trainable parameters

            comp_iters = sess.run(completed_iterations)
            utils.create_folder(
                'Models/' + self.name, clear_if_exists=not (comp_iters > 0)
            )  # clear Model folder if training has never taken place
            remaining_iterations = iterations - comp_iters
            print('Remaining Iterations:', remaining_iterations,
                  '- Completed Iterations: ', comp_iters)
            init_time = time.time()
            last_saved_time = time.time()
            with open('Models/' + self.name + '/README.txt', 'w') as f:
                f.write('Network Topology:\n')
                for k, v in self.layers.items():
                    f.write(k + " : " + str(v) + '\n')

                msg = "\nNumber of parameters = {}\nNumber of iterations = {}\nLearning rate = {}\n".format(
                    acum, (comp_iters + remaining_iterations), learning_rate)
                f.write(msg)

            for i in range(remaining_iterations):
                start = time.time()
                batch = self.dataset.training.next_batch(50)
                normBatch = np.array([(img - 128) / 128 for img in batch[0]])
                labelBatch = [lbl for lbl in batch[1]]

                train_step.run(feed_dict={
                    self.x: normBatch,
                    self.y: labelBatch,
                    self.keep_prob: 0.5
                })
                if i % 100 == 0 or i == remaining_iterations - 1:
                    MSE = loss.eval(feed_dict={
                        self.x: normBatch,
                        self.y: labelBatch,
                        self.keep_prob: 1.0
                    })
                    print(
                        "iter {}, mean square error {}, step duration -> {:.2f} secs, time since last saved -> {:.2f} secs"
                        .format(i, MSE, (time.time() - start),
                                time.time() - last_saved_time))
                    update = comp_iters + (i + 1)
                    print('updating completed iterations:',
                          sess.run(completed_iterations.assign(update)))

                    save_path = saver.save(sess,
                                           'Models/' + self.name + '/model')
                    print("Model saved in file: %s" % save_path)
                    batch = self.dataset.validation.next_batch(50)
                    normBatch = np.array([(img - 128) / 128
                                          for img in batch[0]])
                    labelBatch = [lbl for lbl in batch[1]]
                    results = np.round(
                        sess.run(self.output,
                                 feed_dict={
                                     self.x: normBatch,
                                     self.y: labelBatch,
                                     self.keep_prob: 1.0
                                 }))
                    print("Parcial Results")
                    acc, prec, rec = utils.calculateMetrics(
                        labelBatch, results)
                    print('Accuracy', acc)
                    print('Precision', prec)
                    print('Recall', rec)
                    print("Parcial Results")
                    utils.PainterThread(batch[0], results).start()
                    last_saved_time = time.time()

            if remaining_iterations > 0:
                self.freeze_graph_model(sess)
            else:
                print('Nothing to be done')
            print('total time -> {:.2f} secs'.format(time.time() - init_time))
        tf.reset_default_graph()
Ejemplo n.º 12
0
    def train(self, iterations=10000, learning_rate=1e-03):
        # reading dataset
        if self.dataset is None:
            self.dataset = DataHandler().build_datasets()

        # loss function
        loss = tf.losses.softmax_cross_entropy(self.y, self.output)
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
        completed_iterations = tf.Variable(0,
                                           trainable=False,
                                           name='completed_iterations')

        # Performance metric
        prediction = tf.nn.softmax(self.output)
        correct_prediction = tf.math.equal(tf.math.argmax(prediction, axis=0),
                                           tf.math.argmax(self.y, axis=0))
        accuracy_op = tf.math.reduce_mean(tf.dtypes.cast(
            correct_prediction, tf.float32),
                                          name='accuracy')
        last_biggest_accuracy_var = tf.Variable(0.0,
                                                trainable=False,
                                                name='last_biggest_accuracy')

        # Creating session and initilizing variables
        init = tf.global_variables_initializer()
        saver = tf.train.Saver()

        topology_path = os.path.join(MODELS_PATH, self.name)
        model_path = os.path.join(topology_path, 'model')
        with tf.Session() as sess:
            model_stored = utils.is_model_stored(topology_path)
            if model_stored:
                print('Restoring Graph')
                saver.restore(sess, model_path)
            else:
                sess.run(init)

            lstt = tf.trainable_variables()
            acum = 0
            for lt in lstt:
                ta = lt.get_shape()
                lstd = ta.as_list()
                mult = functools.reduce(operator.mul, lstd, 1)
                acum = acum + mult
            print('Number of parameters',
                  acum)  # number of trainable parameters

            comp_iters = sess.run(completed_iterations)
            utils.create_folder(
                topology_path, clear_if_exists=not (comp_iters > 0)
            )  # clear Model folder if training has never taken place
            remaining_iterations = iterations - comp_iters
            print('Remaining Iterations:', remaining_iterations,
                  '- Completed Iterations: ', comp_iters)
            init_time = time.time()
            last_saved_time = time.time()
            readme_path = os.path.join(topology_path, 'README.txt')
            with open(readme_path, 'w') as f:
                f.write('Network Topology:\n')
                for k, v in self.layers.items():
                    f.write(k + " : " + str(v) + '\n')

                msg = "\nNumber of parameters = {}\nNumber of iterations = {}\nLearning rate = {}\n".format(
                    acum, (comp_iters + remaining_iterations), learning_rate)
                f.write(msg)

            stopping_criteria_cnt = 0
            for i in range(remaining_iterations):
                start = time.time()
                batch = self.dataset.training.next_batch()
                normBatch = batch[0]
                labelBatch = batch[1]
                train_step.run(feed_dict={
                    self.x: normBatch,
                    self.y: labelBatch,
                    self.keep_prob: 0.5
                })
                if i % 100 == 0 or i == remaining_iterations - 1:
                    loss_value = loss.eval(feed_dict={
                        self.x: normBatch,
                        self.y: labelBatch,
                        self.keep_prob: 1.0
                    })
                    print(
                        "iter {}, mean square error {}, step duration -> {:.2f} secs, time since last saved -> {:.2f} secs"
                        .format(i, loss_value, (time.time() - start),
                                time.time() - last_saved_time))
                    update = comp_iters + i + 1
                    print('updating completed iterations:',
                          sess.run(completed_iterations.assign(update)))

                    batch = self.dataset.validation.next_batch()
                    normBatch = batch[0]
                    labelBatch = batch[1]
                    accuracy_validation = sess.run(accuracy_op,
                                                   feed_dict={
                                                       self.x: normBatch,
                                                       self.y: labelBatch,
                                                       self.keep_prob: 1.0
                                                   })
                    last_biggest_accuracy = sess.run(last_biggest_accuracy_var)
                    print('Validation accuracy', accuracy_validation,
                          'last_biggest_accuracy', last_biggest_accuracy)
                    if accuracy_validation > last_biggest_accuracy:
                        stopping_criteria_cnt = 0
                        sess.run(
                            last_biggest_accuracy_var.assign(
                                accuracy_validation))
                        save_path = saver.save(sess, model_path)
                        print("Model saved in file: %s" % save_path)
                    else:
                        stopping_criteria_cnt += 1
                    last_saved_time = time.time()

                    if stopping_criteria_cnt >= 100:
                        print('Stopping early')
                        break
            # frozen_model_path = os.path.join(topology_path,'frozen/model.pb')
            # if remaining_iterations > 0 or not os.path.exists(frozen_model_path):
            # print('freezing graph')
            # self.freeze_graph_model(sess)
            # else:
            # print('Nothing to be done')
            print('total time -> {:.2f} secs'.format(time.time() - init_time))
        try:
            tf.reset_default_graph()
            shutil.copyfile(os.path.join(PATH, '../Dataset/dataset.pickle'),
                            os.path.join(topology_path, 'dataset.pickle'))
        except:
            pass