コード例 #1
0
    def print_status(self, mse):

        psnr = util.get_psnr(mse, max_value=self.max_value)

        if self.step == 0:
            print("Initial MSE:%f PSNR:%f" % (mse, psnr))
        else:
            processing_time = (time.time() - self.start_time) / self.step
            print("%s Step:%d MSE:%f PSNR:%f (Training PSNR:%0.3f)" %
                  (util.get_now_date(), self.step, mse, psnr,
                   self.training_psnr_sum / self.training_step))
            print("Epoch:%d (Step:%s) LR:%f (%2.3fsec/step) MinPSNR:%0.3f" %
                  (self.epochs_completed, "{:,}".format(self.step), self.lr,
                   processing_time, util.get_psnr(self.min_validation_mse)))
コード例 #2
0
    def train_batch(self):

        _, mse = self.sess.run(
            [self.training_optimizer, self.mse],
            feed_dict={
                self.x: self.batch_input,
                self.x2: self.batch_input_quad,
                self.y: self.batch_true_quad,
                self.lr_input: self.lr,
                self.dropout_input: self.dropout
            })
        self.training_psnr_sum += util.get_psnr(mse, max_value=self.max_value)
        self.training_step += 1
        self.step += 1
コード例 #3
0
    def update_epoch_and_lr(self, mse):
        lr_updated = False

        if self.min_validation_mse < 0 or self.min_validation_mse > mse:
            # update new mse
            self.min_validation_epoch = self.epochs_completed
            self.min_validation_mse = mse
        else:
            if self.epochs_completed > self.min_validation_epoch + self.lr_decay_epoch:
                # set new learning rate
                self.min_validation_epoch = self.epochs_completed
                self.lr *= self.lr_decay
                lr_updated = True

        psnr = util.get_psnr(mse, max_value=self.max_value)
        self.csv_epochs.append(self.epochs_completed)
        self.csv_psnr.append(psnr)
        self.csv_training_psnr.append(self.training_psnr_sum /
                                      self.training_step)

        return lr_updated