コード例 #1
0
ファイル: test.py プロジェクト: NurievSiroj/DexiNed
    def run(self, session):

        self.model.setup_testing(session)
        if self.args.use_dataset:
            test_data= data_parser(self.args)
            n_data = len(test_data[1])
        else:
            test_data=get_single_image(self.args)
            n_data = len(test_data)
        print_info('Writing PNGs at {}'.format(self.args.base_dir_results))

        if self.args.batch_size_test==1 and self.args.use_dataset:
            for i in range(n_data):
                im, em, file_name = get_testing_batch(self.args,
                                    [test_data[0][test_data[1][i]], test_data[1][i]], use_batch=False)
                self.img_info = file_name
                edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})

                self.save_egdemaps(edgemap, single_image=True)
                print_info('Done testing {}, {}'.format(self.img_info[0], self.img_info[1]))

        # for individual images
        elif self.args.batch_size_test==1 and not self.args.use_dataset:
            for i in range(n_data):
                im, file_name = get_single_image(self.args,file_path=test_data[i])
                self.img_info  = file_name
                edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})
                self.save_egdemaps(edgemap, single_image=True)
                print_info('Done testing {}, {}'.format(self.img_info[0], self.img_info[1]))
コード例 #2
0
ファイル: test.py プロジェクト: kandilidinesh/DexiNed
    def run(self, session):

        self.model.setup_testing(session)
        if self.args.use_dataset:
            test_data= data_parser(self.args)
            n_data = len(test_data[1])
        else:
            test_data=get_single_image(self.args)
            n_data = len(test_data)
        print_info('Writing PNGs at {}'.format(self.args.base_dir_results))

        if self.args.batch_size_test==1 and self.args.use_dataset:
            for i in range(n_data):
                im, em, file_name = get_testing_batch(self.args,
                                    [test_data[0][test_data[1][i]], test_data[1][i]], use_batch=False)
                self.img_info = file_name
                
                #Dexi Start Time
                startDexi = time.time()
                
                #Edge map creation from the pretrained model
                edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})
                
                #Dexi End Time
                endDexi = time.time()
                secondsDexi = endDexi - startDexi
                print_info('Time taken for DexiNED: {} seconds'.format(secondsDexi))
               
                self.save_egdemaps(edgemap, single_image=True)
                print_info('Done testing {}, {}'.format(self.img_info[0], self.img_info[1]))

        # for individual images
        elif self.args.batch_size_test==1 and not self.args.use_dataset:
            for i in range(n_data):
                im, file_name = get_single_image(self.args,file_path=test_data[i])
                self.img_info  = file_name
                
                #Dexi Start Time
                startDexi = time.time()
                
                edgemap = session.run(self.model.predictions, feed_dict={self.model.images: [im]})
                
                #Dexi End Time
                endDexi = time.time()
                secondsDexi = endDexi - startDexi
                print_info('Time taken for DexiNED: {} seconds'.format(secondsDexi))
                
                self.save_egdemaps(edgemap, single_image=True)
                print_info('Done testing {}, {}'.format(self.img_info[0], self.img_info[1]))
コード例 #3
0
    def run(self, sess):
        if not self.init:
            return
        #This will return the Files Path, Number of Files, Train Indices, Validation Indices using cache_info
        train_data = data_parser(self.args)

        self.model.setup_training(sess)
        if self.args.lr_scheduler is not None:
            global_step = tf.Variable(0, trainable=False, dtype=tf.int64)
        if self.args.lr_scheduler is None:
            learning_rate = tf.constant(self.args.learning_rate,
                                        dtype=tf.float16)
        else:
            raise NotImplementedError(
                'Learning rate scheduler type [%s] is not implemented',
                self.args.lr_scheduler)
        opt = tf.train.AdamOptimizer(learning_rate)
        trainG = opt.minimize(self.model.loss)  # like hed
        saver = tf.train.Saver(max_to_keep=7)

        sess.run(tf.global_variables_initializer())
        # here to recovery previous training
        if self.args.use_previous_trained:
            if self.args.dataset_name.lower(
            ) != 'biped':  # using biped pretrained to use in other dataset
                model_path = os.path.join(
                    'checkpoints',
                    self.args.model_name + '_' + self.args.train_dataset,
                    'train')
            else:
                model_path = os.path.join(
                    self.args.checkpoint_dir,
                    self.args.model_name + '_' + self.args.train_dataset)
                model_path = os.path.join(model_path, 'train')
            if not os.path.exists(model_path) or len(
                    os.listdir(model_path)) == 0:  # :
                ini = 0
                maxi = self.args.max_iterations + 1
                print_warning(
                    'There is not previous trained data for the current model... and'
                )
                print_warning(
                    '*** The training process is starting from scratch ***')
            else:
                # restoring using the last checkpoint
                assert (
                    len(os.listdir(model_path)) != 0
                ), 'There is not previous trained data for the current model...'
                last_ckpt = tf.train.latest_checkpoint(model_path)
                saver.restore(sess, last_ckpt)
                ini = self.args.max_iterations
                maxi = ini + self.args.max_iterations + 1  # check
                print_info(
                    '--> Previous model restored successfully: {}'.format(
                        last_ckpt))
        else:
            print_warning(
                '*** The training process is starting from scratch ***')
            ini = 0
            maxi = ini + self.args.max_iterations
        prev_loss = 1000.
        prev_val = None

        #Checkpoint Directory for CDIBD will be /checkpoints/DXN_CDIBD/train
        checkpoint_dir = os.path.join(
            self.args.checkpoint_dir,
            self.args.model_name + '_' + self.args.train_dataset,
            self.args.model_state)
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        fig = plt.figure()
        for idx in range(ini, maxi):

            x_batch, y_batch, _ = get_training_batch(self.args, train_data)
            run_metadata = tf.RunMetadata()

            _, summary, loss, pred_maps = sess.run([
                trainG, self.model.merged_summary, self.model.loss,
                self.model.predictions
            ],
                                                   feed_dict={
                                                       self.model.images:
                                                       x_batch,
                                                       self.model.edgemaps:
                                                       y_batch
                                                   })
            if idx % 5 == 0:
                self.model.train_writer.add_run_metadata(
                    run_metadata, 'step{:06}'.format(idx))
                self.model.train_writer.add_summary(summary, idx)
                print(time.ctime(), '[{}/{}]'.format(idx, maxi),
                      ' TRAINING loss: %.5f' % loss,
                      'prev_loss: %.5f' % prev_loss)

            # saving trained parameters
            save_inter = ini + self.args.save_interval
            if prev_loss > loss:
                saver.save(sess,
                           os.path.join(checkpoint_dir, self.args.model_name),
                           global_step=idx)
                prev_loss = loss
                print("Weights saved in the lowest loss", idx, " Current Loss",
                      prev_loss)

            if idx % self.args.save_interval == 0:
                saver.save(sess,
                           os.path.join(checkpoint_dir, self.args.model_name),
                           global_step=idx)
                prev_loss = loss
                print("Weights saved in the interval", idx, " Current Loss",
                      prev_loss)

            # ********* for validation **********
            if (idx + 1) % self.args.val_interval == 0:
                pause_show = 0.01
                imgs_list = []
                img = x_batch[2][:, :, 0:3]
                gt_mp = y_batch[2]
                imgs_list.append(img)
                imgs_list.append(gt_mp)
                for i in range(len(pred_maps)):
                    tmp = pred_maps[i][2, ...]
                    imgs_list.append(tmp)
                vis_imgs = visualize_result(imgs_list, self.args)
                fig.suptitle("Iterac:" + str(idx + 1) + " Loss:" +
                             '%.5f' % loss + " training")
                fig.add_subplot(1, 1, 1)
                plt.imshow(np.uint8(vis_imgs))

                print("Evaluation in progress...")
                plt.draw()
                plt.pause(pause_show)

                im, em, _ = get_validation_batch(self.args, train_data)
                summary, error, pred_val = sess.run([
                    self.model.merged_summary, self.model.error,
                    self.model.fuse_output
                ],
                                                    feed_dict={
                                                        self.model.images: im,
                                                        self.model.edgemaps: em
                                                    })
                if error <= 0.08:
                    saver.save(sess,
                               os.path.join(checkpoint_dir,
                                            self.args.model_name),
                               global_step=idx)
                    prev_loss = loss
                    print(
                        "Parameters saved in the validation stage when its error is <=0.08::",
                        error)

                self.model.val_writer.add_summary(summary, idx)
                print_info(('[{}/{}]'.format(idx, self.args.max_iterations),
                            'VALIDATION error: %0.5f' % error,
                            'pError: %.5f' % prev_loss))
                if (idx + 1) % (self.args.val_interval * 150) == 0:
                    print('updating visualisation')
                    plt.close()
                    fig = plt.figure()

        saver.save(sess,
                   os.path.join(checkpoint_dir, self.args.model_name),
                   global_step=idx)
        print("Final Weights saved", idx, " Current Loss", loss)
        self.model.train_writer.close()