Beispiel #1
0
    def log_to_tensorboard(self, test_filename, psnr, save_meta_data=True):

        if self.enable_log is False:
            return

        # todo
        save_meta_data = False

        org_image = util.set_image_alignment(util.load_image(test_filename, print_console=False), self.scale)

        if len(org_image.shape) >= 3 and org_image.shape[2] == 3 and self.channels == 1:
            org_image = util.convert_rgb_to_y(org_image)

        input_image = util.resize_image_by_pil(org_image, 1.0 / self.scale, resampling_method=self.resampling_method)
        bicubic_image = util.resize_image_by_pil(input_image, self.scale, resampling_method=self.resampling_method)

        feed_dict = {self.x: input_image.reshape([1, input_image.shape[0], input_image.shape[1], input_image.shape[2]]),
                     self.x2: bicubic_image.reshape(
                         [1, bicubic_image.shape[0], bicubic_image.shape[1], bicubic_image.shape[2]]),
                     self.y: org_image.reshape([1, org_image.shape[0], org_image.shape[1], org_image.shape[2]]),
                     self.dropout: 1.0,
                     self.is_training: 0}

        if save_meta_data:
            # profiler = tf.profiler.Profile(self.sess.graph)

            run_metadata = tf.RunMetadata()
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            summary_str, _ = self.sess.run([self.summary_op, self.loss], feed_dict=feed_dict, options=run_options,
                                           run_metadata=run_metadata)
            self.test_writer.add_run_metadata(run_metadata, "step%d" % self.epochs_completed)

            filename = self.checkpoint_dir + "/" + self.name + "_metadata.txt"
            with open(filename, "w") as out:
                out.write(str(run_metadata))

            # filename = self.checkpoint_dir + "/" + self.name + "_memory.txt"
            # tf.profiler.write_op_log(
            # 	tf.get_default_graph(),
            # 	log_dir=self.checkpoint_dir,
            # 	#op_log=op_log,
            # 	run_meta=run_metadata)

            tf.contrib.tfprof.model_analyzer.print_model_analysis(
                tf.get_default_graph(), run_meta=run_metadata,
                tfprof_options=tf.contrib.tfprof.model_analyzer.PRINT_ALL_TIMING_MEMORY)

        else:
            summary_str, _ = self.sess.run([self.summary_op, self.loss], feed_dict=feed_dict)

        self.train_writer.add_summary(summary_str, self.epochs_completed)
        if not self.use_l1_loss:
            util.log_scalar_value(self.train_writer, 'PSNR', self.training_psnr_sum / self.training_step,
                                  self.epochs_completed)
        util.log_scalar_value(self.train_writer, 'LR', self.lr, self.epochs_completed)
        self.train_writer.flush()

        util.log_scalar_value(self.test_writer, 'PSNR', psnr, self.epochs_completed)
        self.test_writer.flush()
Beispiel #2
0
    def evaluate_test_batch(self,
                            save_meta_data=False,
                            trial=0,
                            log_profile=True):

        save_meta_data = save_meta_data and self.save_meta_data and (trial
                                                                     == 0)
        feed_dict = {
            self.x: self.test.input.images,
            self.x2: self.test.input.hr_images,
            self.y: self.test.true.hr_images,
            self.dropout: 1.0,
            self.is_training: 0
        }

        if log_profile and (self.save_loss or self.save_weights
                            or save_meta_data):

            if save_meta_data:
                # profiler = tf.profiler.Profile(self.sess.graph)

                run_metadata = tf.RunMetadata()
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                summary_str, mse = self.sess.run([self.summary_op, self.mse],
                                                 feed_dict=feed_dict,
                                                 options=run_options,
                                                 run_metadata=run_metadata)
                self.test_writer.add_run_metadata(
                    run_metadata, "step%d" % self.epochs_completed)

                filename = self.checkpoint_dir + "/" + self.name + "_metadata.txt"
                with open(filename, "w") as out:
                    out.write(str(run_metadata))

                # filename = self.checkpoint_dir + "/" + self.name + "_memory.txt"
                # tf.profiler.write_op_log(
                # 	tf.get_default_graph(),
                # 	log_dir=self.checkpoint_dir,
                # 	#op_log=op_log,
                # 	run_meta=run_metadata)

                tf.contrib.tfprof.model_analyzer.print_model_analysis(
                    tf.get_default_graph(),
                    run_meta=run_metadata,
                    tfprof_options=tf.contrib.tfprof.model_analyzer.
                    PRINT_ALL_TIMING_MEMORY)

            else:
                summary_str, mse = self.sess.run([self.summary_op, self.mse],
                                                 feed_dict=feed_dict)

            self.train_writer.add_summary(summary_str, self.epochs_completed)
            util.log_scalar_value(self.train_writer, 'training_PSNR',
                                  self.training_psnr_sum / self.training_step,
                                  self.epochs_completed)
            util.log_scalar_value(self.train_writer, 'LR', self.lr,
                                  self.epochs_completed)
            self.train_writer.flush()

            util.log_scalar_value(self.test_writer, 'PSNR', util.get_psnr(mse),
                                  self.epochs_completed)
            self.test_writer.flush()
        else:
            mse = self.sess.run(self.mse, feed_dict=feed_dict)

        return mse