Example #1
0
    def evaluate(self, save_dir='data/', prefix=''):
        self.load()                             # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        # Set to evaluation mode for batch_norm layers
        self.model.eval()
        
        saved_model_str = self.saved_model.replace('/','_') + prefix
        # Get the file names
        Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(saved_model_str))

        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                Xpred = self.model.inference(spectra).cpu().data.numpy()
                np.savetxt(fxt, geometry.cpu().data.numpy())
                np.savetxt(fyt, spectra.cpu().data.numpy())
                np.savetxt(fxp, Xpred)
                if self.flags.data_set != 'meta_material':
                    Ypred = simulator(self.flags.data_set, Xpred)
                    np.savetxt(fyp, Ypred)
        tk.record(1)                # Record the total time of the eval period
        return Ypred_file, Ytruth_file
Example #2
0
    def predict(self, Ytruth_file, save_dir='data/', prefix=''):
        self.load()                             # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model_b.cuda()
        self.model_b.eval()
        saved_model_str = self.saved_model.replace('/', '_') + prefix

        Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=',')     # Read the input
        if len(Ytruth.columns) == 1: # The file is not delimitered by ',' but ' '
            Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ')
        Ytruth_tensor = torch.from_numpy(Ytruth.values).to(torch.float)
        print('shape of Ytruth tensor :', Ytruth_tensor.shape)

        # Get the file names
        Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(saved_model_str))
        # keep time
        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
    
        if cuda:
            Ytruth_tensor = Ytruth_tensor.cuda()
        print('model in eval:', self.model_b)
        Xpred = self.model_b(Ytruth_tensor).detach().cpu().numpy()

        # Open those files to append
        with open(Ytruth_file, 'a') as fyt, open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            np.savetxt(fyt, Ytruth_tensor.cpu().data.numpy())
            np.savetxt(fxp, Xpred)
            if self.flags.data_set != 'Yang_sim':
                Ypred = simulator(self.flags.data_set, Xpred)
                np.savetxt(fyp, Ypred)
        tk.record(1)
        return Ypred_file, Ytruth_file
Example #3
0
    def evaluate(self, save_dir='data/', prefix=''):
        self.load()                             # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_') + prefix
        # Get the file names
        Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(saved_model_str))
        # keep time
        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))

        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                # Initialize the geometry first
                print('model in eval:', self.model)
                pi, sigma, mu = self.model(spectra)  # Get the output
                Xpred = mdn.sample(pi, sigma, mu).detach().cpu().numpy()
        tk.record(1)
        return Ypred_file, Ytruth_file
Example #4
0
    def evaluate(self, save_dir='data/', save_all=False):
        self.load()  # load the model as constructed
        try:
            bs = self.flags.backprop_step  # for previous code that did not incorporate this
        except AttributeError:
            print(
                "There is no attribute backprop_step, catched error and adding this now"
            )
            self.flags.backprop_step = 2
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_')
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        print("evalution output pattern:", Ypred_file)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(save_dir, 'evaluation_time.txt'))

        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                if self.flags.data_set == 'gaussian_mixture':
                    spectra = torch.nn.functional.one_hot(
                        spectra.to(torch.int64),
                        4).to(torch.float
                              )  # Change the gaussian labels into one-hot
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                # Initialize the geometry first
                Xpred, Ypred, loss = self.evaluate_one(spectra,
                                                       save_dir=save_dir,
                                                       save_all=save_all,
                                                       ind=ind)
                tk.record(
                    ind)  # Keep the time after each evaluation for backprop
                # self.plot_histogram(loss, ind)                                # Debugging purposes
                np.savetxt(fxt, geometry.cpu().data.numpy())
                np.savetxt(fyt, spectra.cpu().data.numpy())
                np.savetxt(fyp, Ypred)
                np.savetxt(fxp, Xpred)
        return Ypred_file, Ytruth_file
Example #5
0
 def evaluate_multiple_time(self, time=200, save_dir='../multi_eval/VAE/'):
     """
     Make evaluation multiple time for deeper comparison for stochastic algorithms
     :param save_dir: The directory to save the result
     :return:
     """
     tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
     save_dir += self.flags.data_set
     for i in range(time):
         self.evaluate(save_dir=save_dir, prefix='inference' + str(i))
         tk.record(i)
Example #6
0
    def predict(self, Ytruth_file, save_dir='data/', prefix=''):
        self.load()  # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_') + prefix

        Ytruth = pd.read_csv(Ytruth_file, header=None,
                             delimiter=',')  # Read the input
        if len(Ytruth.columns
               ) == 1:  # The file is not delimitered by ',' but ' '
            Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ')
        Ytruth_tensor = torch.from_numpy(Ytruth.values).to(torch.float)
        print('shape of Ytruth tensor :', Ytruth_tensor.shape)

        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        # keep time
        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))

        dim_x = self.flags.dim_x
        dim_y = self.flags.dim_y
        dim_z = self.flags.dim_z
        dim_tot = self.flags.dim_tot
        batch_size = len(Ytruth_tensor)
        # Create random value for the padding for yz
        pad_yz = self.flags.zeros_noise_scale * torch.randn(
            batch_size, dim_tot - dim_y - dim_z, device=device)
        if cuda:
            Ytruth_tensor = Ytruth_tensor.cuda()
        # Create a noisy z vector with noise level same as y
        z = torch.randn(batch_size, dim_z, device=device)
        y_cat = torch.cat((z, pad_yz, Ytruth_tensor), dim=1)
        # Initialize the x first
        Xpred = self.model(y_cat, rev=True)
        Xpred = Xpred[:, :dim_x].cpu().data.numpy()
        # Open those files to append
        with open(Ytruth_file,
                  'a') as fyt, open(Ypred_file,
                                    'a') as fyp, open(Xpred_file, 'a') as fxp:
            np.savetxt(fyt, Ytruth_tensor.cpu().data.numpy())
            np.savetxt(fxp, Xpred)
            if self.flags.data_set != 'Yang_sim':
                Ypred = simulator(self.flags.data_set, Xpred)
                np.savetxt(fyp, Ypred)
        tk.record(1)
        return Ypred_file, Ytruth_file
Example #7
0
    def evaluate(self, save_dir='data/', prefix=''):
        self.load()  # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        # Set to evaluation mode for batch_norm layers
        self.model.eval()
        # Set the dimensions
        dim_x = self.flags.dim_x
        dim_y = self.flags.dim_y
        dim_z = self.flags.dim_z
        dim_tot = self.flags.dim_tot
        saved_model_str = self.saved_model.replace('/', '_') + prefix
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))

        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (x, y) in enumerate(self.test_loader):
                if cuda:
                    x = x.cuda()  # Put data onto GPU
                    y = y.cuda()  # Put data onto GPU
                batch_size = len(x)
                # Create random value for the padding for yz
                pad_yz = self.flags.zeros_noise_scale * torch.randn(
                    batch_size, dim_tot - dim_y - dim_z, device=device)
                # Create a noisy z vector with noise level same as y
                z = torch.randn(batch_size, dim_z, device=device)
                print("shape of z:", np.shape(z))
                print("shape of pad_yz:", np.shape(pad_yz))
                print("shape of y:", np.shape(y))
                y_cat = torch.cat((z, pad_yz, y), dim=1)
                # Initialize the x first
                Xpred = self.model(y_cat, rev=True)
                Xpred = Xpred[:, :dim_x].cpu().data.numpy()
                #np.savetxt(fxt, x.cpu().data.numpy())
                #np.savetxt(fyt, y.cpu().data.numpy())
                if self.flags.data_set != 'meta_material':
                    Ypred = simulator(self.flags.data_set, Xpred)
                    np.savetxt(fyp, Ypred)
                #np.savetxt(fxp, Xpred)
            tk.record(1)
        return Ypred_file, Ytruth_file
Example #8
0
 def evaluate_multiple_time(self, time=200, save_dir='/home/sr365/MM_bench_multi_eval/Tandem/'):
     """
     Make evaluation multiple time for deeper comparison for stochastic algorithms
     :param save_dir: The directory to save the result
     :return:
     """
     save_dir = os.path.join(save_dir, self.flags.data_set)
     tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
     if not os.path.isdir(save_dir):
         os.makedirs(save_dir)
     for i in range(time):
         self.evaluate(save_dir=save_dir, prefix='inference' + str(i))
         tk.record(i)
Example #9
0
 def evaluate_multiple_time(self,
                            time=2048,
                            save_dir='/work/sr365/forward_filter/cINN/'):
     """
     Make evaluation multiple time for deeper comparison for stochastic algorithms
     :param save_dir: The directory to save the result
     :return:
     """
     save_dir += self.flags.data_set
     tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
     for i in range(time):
         self.evaluate(save_dir=save_dir, prefix='inference' + str(i))
         tk.record(i)
Example #10
0
    def evaluate(self,
                 save_dir='data/',
                 save_all=False,
                 MSE_Simulator=False,
                 save_misc=False,
                 save_Simulator_Ypred=False):
        self.load()  # load the model as constructed
        try:
            bs = self.flags.backprop_step  # for previous code that did not incorporate this
        except AttributeError:
            print(
                "There is no attribute backprop_step, catched error and adding this now"
            )
            self.flags.backprop_step = 300
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_')
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        print("evalution output pattern:", Ypred_file)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(save_dir, 'evaluation_time.txt'))

        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                # Initialize the geometry first
                logit = self.model(geometry)
                tk.record(
                    ind)  # Keep the time after each evaluation for backprop
                # suppress printing to evaluate time
                np.savetxt(fxt, geometry.cpu().data.numpy())
                np.savetxt(fyt, spectra.cpu().data.numpy())
                np.savetxt(fyp, logit.detach().cpu().numpy())
        return Ypred_file, Ytruth_file
Example #11
0
    def evaluate(self, save_dir='data/', prefix=''):
        self.load()  # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        # Set to evaluation mode for batch_norm layers
        self.model.eval()
        # Set the dimensions
        dim_x = self.flags.dim_x
        dim_z = self.flags.dim_z
        saved_model_str = self.saved_model.replace('/', '_') + prefix
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))

        tk = time_keeper(
            time_keeping_file=os.path.join(save_dir, 'evaluation time.txt'))
        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (x, y) in enumerate(self.test_loader):
                batch_size = len(x)
                # Create a noisy z vector with noise level same as y
                z = torch.randn(batch_size, dim_z, device=device)
                """
                # Initialize the x first
                if self.flags.data_set == 'gaussian_mixture':
                    y_prev = np.copy(y.data.numpy())
                    y = torch.nn.functional.one_hot(y.to(torch.int64), 4).to(torch.float) # Change the gaussian labels into one-hot
                """
                if cuda:
                    x = x.cuda()
                    y = y.cuda()
                Xpred = self.model(z, y, rev=True).cpu().data.numpy()
                np.savetxt(fxt, x.cpu().data.numpy())
                np.savetxt(fxp, Xpred)
                np.savetxt(fyt, y.cpu().data.numpy())
                if self.flags.data_set != 'meta_material':
                    Ypred = simulator(self.flags.data_set, Xpred)
                    np.savetxt(fyp, Ypred)
            tk.record(1)
        return Ypred_file, Ytruth_file
Example #12
0
    def predict_inverse(self,
                        Ytruth_file,
                        multi_flag,
                        save_dir='data/',
                        prefix=''):
        self.load()  # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_') + prefix

        Ytruth = pd.read_csv(Ytruth_file, header=None,
                             delimiter=',')  # Read the input
        if len(Ytruth.columns
               ) == 1:  # The file is not delimitered by ',' but ' '
            Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ')
        Ytruth_tensor = torch.from_numpy(Ytruth.values).to(torch.float)
        print('shape of Ytruth tensor :', Ytruth_tensor.shape)

        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        # keep time
        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))

        # Set the save_simulator_ytruth
        save_Simulator_Ypred = True
        if 'Yang' in self.flags.data_set:
            save_Simulator_Ypred = False

        if cuda:
            Ytruth_tensor = Ytruth_tensor.cuda()
        print('model in eval:', self.model)

        # Open those files to append
        with open(Ytruth_file,
                  'a') as fyt, open(Ypred_file,
                                    'a') as fyp, open(Xpred_file, 'a') as fxp:
            np.savetxt(fyt, Ytruth_tensor.cpu().data.numpy())
            for ind in range(len(Ytruth_tensor)):
                spectra = Ytruth_tensor[ind, :]
                Xpred, Ypred, loss = self.evaluate_one(
                    spectra,
                    save_dir=save_dir,
                    save_all=multi_flag,
                    ind=ind,
                    MSE_Simulator=False,
                    save_misc=False,
                    save_Simulator_Ypred=save_Simulator_Ypred)

                np.savetxt(fxp, Xpred)
                if self.flags.data_set != 'Yang_sim':
                    Ypred = simulator(self.flags.data_set, Xpred)
                    np.savetxt(fyp, Ypred)
                tk.record(1)
        return Ypred_file, Ytruth_file
Example #13
0
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()

        # Construct optimizer after the model moved to GPU
        self.optm = self.make_optimizer()
        self.lr_scheduler = self.make_lr_scheduler(self.optm)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        for epoch in range(self.flags.train_step):
            # Set to Training Mode
            train_loss = 0
            # boundary_loss = 0                 # Unnecessary during training since we provide geometries
            self.model.train()
            for j, (geometry, spectra) in enumerate(self.train_loader):
                if self.flags.data_set == 'gaussian_mixture':
                    spectra = torch.nn.functional.one_hot(
                        spectra.to(torch.int64),
                        4).to(torch.float
                              )  # Change the gaussian labels into one-hot
                if cuda:
                    geometry = geometry.cuda()  # Put data onto GPU
                    spectra = spectra.cuda()  # Put data onto GPU
                self.optm.zero_grad()  # Zero the gradient first
                logit = self.model(geometry)  # Get the output
                loss = self.make_loss(logit, spectra)  # Get the loss tensor
                loss.backward()  # Calculate the backward gradients
                self.optm.step()  # Move one step the optimizer
                train_loss += loss  # Aggregate the loss

            # Calculate the avg loss of training
            train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)

            if epoch % self.flags.eval_step:  # For eval steps, do the evaluations and tensor board
                # Record the training loss to the tensorboard
                self.log.add_scalar('Loss/train', train_avg_loss, epoch)

                # Set to Evaluation Mode
                self.model.eval()
                print("Doing Evaluation on the model now")
                test_loss = 0
                for j, (geometry, spectra) in enumerate(
                        self.test_loader):  # Loop through the eval set
                    if cuda:
                        geometry = geometry.cuda()
                        spectra = spectra.cuda()
                    logit = self.model(geometry)
                    loss = self.make_loss(logit, spectra)  # compute the loss
                    test_loss += loss  # Aggregate the loss

                # Record the testing loss to the tensorboard
                test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)
                self.log.add_scalar('Loss/test', test_avg_loss, epoch)

                print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
                      % (epoch, train_avg_loss, test_avg_loss ))

                # Model improving, save the model down
                if test_avg_loss < self.best_validation_loss:
                    self.best_validation_loss = test_avg_loss
                    self.save()
                    print("Saving the model down...")

                    if self.best_validation_loss < self.flags.stop_threshold:
                        print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
                              (epoch, self.best_validation_loss))
                        break

            # Learning rate decay upon plateau
            self.lr_scheduler.step(train_avg_loss)
        self.log.close()
        tk.record(1)  # Record at the end of the training
Example #14
0
    def evaluate(self,
                 save_dir='data/',
                 save_all=False,
                 MSE_Simulator=False,
                 save_misc=False,
                 save_Simulator_Ypred=False):
        """
        The function to evaluate how good the Neural Adjoint is and output results
        :param save_dir: The directory to save the results
        :param save_all: Save all the results instead of the best one (T_200 is the top 200 ones)
        :param MSE_Simulator: Use simulator loss to sort (DO NOT ENABLE THIS, THIS IS OK ONLY IF YOUR APPLICATION IS FAST VERIFYING)
        :param save_misc: save all the details that are probably useless
        :param save_Simulator_Ypred: Save the Ypred that the Simulator gives
        (This is useful as it gives us the true Ypred instead of the Ypred that the network "thinks" it gets, which is
        usually inaccurate due to forward model error)
        :return:
        """
        try:
            bs = self.flags.generations  # for previous code that did not incorporate this
        except AttributeError:
            print(
                "There is no attribute backprop_step, catched error and adding this now"
            )
            self.flags.generations = 300
        cuda = True if torch.cuda.is_available() else False
        #if cuda:
        #    self.model.cuda()
        #self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_')
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        print("evalution output pattern:", Ypred_file)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(save_dir, 'evaluation_time.txt'))

        # Open those files to append
        with open(Xtruth_file, 'w') as fxt,open(Ytruth_file, 'w') as fyt,\
                open(Ypred_file, 'w') as fyp, open(Xpred_file, 'w') as fxp:

            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                print("SAMPLE: {}".format(ind))

                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                # Initialize the geometry first
                Xpred, Ypred, loss = self.evaluate_one(
                    spectra,
                    save_dir=save_dir,
                    save_all=save_all,
                    ind=ind,
                    MSE_Simulator=MSE_Simulator,
                    save_misc=save_misc,
                    save_Simulator_Ypred=save_Simulator_Ypred)
                tk.record(
                    ind)  # Keep the time after each evaluation for backprop
                # self.plot_histogram(loss, ind)                                # Debugging purposes

                np.savetxt(fxt, geometry.cpu().data.numpy())
                np.savetxt(fyt, spectra.cpu().data.numpy())
                if self.flags.data_set != 'Yang_sim':
                    np.savetxt(fyp, Ypred)
                np.savetxt(fxp, Xpred)

                #if (ind+1)%self.flags.eval_step == 0:
                #    plotMSELossDistrib(Ypred_file,Ytruth_file,self.flags)

                if ind > (self.flags.xtra - 1):
                    print("THIS BREAK IS HIT!", self.flags.xtra)
                    break

        return Ypred_file, Ytruth_file
Example #15
0
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()

        # Construct optimizer after the model moved to GPU
        self.optm = self.make_optimizer()
        self.lr_scheduler = self.make_lr_scheduler(self.optm)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        for epoch in range(self.flags.train_step):
            # Set to Training Mode
            train_loss = 0
            # boundary_loss = 0                 # Unnecessary during training since we provide geometries
            self.model.train()
            for j, (geometry, spectra) in enumerate(self.train_loader):
                if cuda:
                    geometry = geometry.cuda()  # Put data onto GPU
                    spectra = spectra.cuda()  # Put data onto GPU
                #print('spectra = ', spectra)
                #print('geometry = ', geometry)
                self.optm.zero_grad()  # Zero the gradient first
                pi, sigma, mu = self.model(spectra)  # Get the output
                #print('spectra = {}, pi, sigma, mu = {}, {}, {}'.format(spectra.cpu().numpy(),
                #                        pi.detach().cpu().numpy()[0,:],
                #                        sigma.detach().cpu().numpy()[0,:,0],
                #                        mu.detach().cpu().numpy()[0,:,0]))
                #print('geometry shape', geometry.size())
                loss = self.make_loss(pi, sigma, mu,
                                      geometry)  # Get the loss tensor
                #loss = self.make_loss(pi, sigma, mu, geometry, warmup=epoch)               # Get the loss tensor
                #Xpred = mdn.sample(pi, sigma, mu).detach().cpu().numpy()
                #Ypred = torch.tensor(simulator(self.flags.data_set, Xpred), requires_grad=False)
                #if cuda:
                #    Ypred = Ypred.cuda()
                #simulator_loss = nn.functional.mse_loss(Ypred, spectra).detach().cpu().numpy() # Get the loss tensor
                #print('nll loss at epoch {}, batch {} is {} '.format(epoch, j, loss.detach().cpu().numpy()))
                loss.backward()  # Calculate the backward gradients
                # gradient clipping
                torch.nn.utils.clip_grad_value_(self.model.parameters(), 1)
                self.optm.step()  # Move one step the optimizer
                train_loss += loss  # Aggregate the loss
                # boundary_loss += self.Boundary_loss                 # Aggregate the BDY loss

            # Calculate the avg loss of training
            train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)
            # boundary_avg_loss = boundary_loss.cpu().data.numpy() / (j + 1)

            if epoch % self.flags.eval_step == 0:  # For eval steps, do the evaluations and tensor board
                # Record the training loss to the tensorboard
                self.log.add_scalar('Loss/train', train_avg_loss, epoch)
                #self.log.add_scalar('Loss/simulator_train', simulator_loss, epoch)
                # self.log.add_scalar('Loss/BDY_train', boundary_avg_loss, epoch)

                # Set to Evaluation Mode
                self.model.eval()
                print("Doing Evaluation on the model now")
                test_loss = 0
                for j, (geometry, spectra) in enumerate(
                        self.test_loader):  # Loop through the eval set
                    if cuda:
                        geometry = geometry.cuda()
                        spectra = spectra.cuda()
                    pi, sigma, mu = self.model(spectra)  # Get the output
                    if self.flags.data_set == 'meta_material':
                        loss = self.make_loss(pi, sigma, mu,
                                              geometry)  # Get the loss tensor
                        test_loss += loss.detach().cpu().numpy()
                    else:
                        Xpred = mdn.sample(pi, sigma, mu).numpy()
                        Ypred_np = simulator(self.flags.data_set, Xpred)
                        mae, mse = compare_truth_pred(Ypred_np,
                                                      spectra.cpu().numpy(),
                                                      cut_off_outlier_thres=10,
                                                      quiet_mode=True)
                        test_loss += np.mean(mse)  # Aggregate the loss
                    break
                    # only get the first batch that is enough

                # Record the testing loss to the tensorboard
                test_avg_loss = test_loss / (j + 1)
                self.log.add_scalar('Loss/test', test_avg_loss, epoch)

                print("This is Epoch %d, training loss %.5f, validation loss %.5f"\
                      % (epoch, train_avg_loss, test_avg_loss ))
                #print("This is Epoch %d, training loss %.5f, validation loss %.5f, training simulator loss %.5f" \
                #      % (epoch, train_avg_loss, test_avg_loss, simulator_loss ))
                # Plotting the first spectra prediction for validation
                # f = self.compare_spectra(Ypred=logit[0,:].cpu().data.numpy(), Ytruth=spectra[0,:].cpu().data.numpy())
                # self.log.add_figure(tag='spectra compare',figure=f,global_step=epoch)

                # Model improving, save the model down
                if test_avg_loss < self.best_validation_loss:
                    self.best_validation_loss = test_avg_loss
                    self.save()
                    print("Saving the model down...")

                    if self.best_validation_loss < self.flags.stop_threshold:
                        print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
                              (epoch, self.best_validation_loss))
                        return None

            # Learning rate decay upon plateau
            self.lr_scheduler.step(train_avg_loss)
        self.log.close()
        tk.record(1)  # Record the total time of the training peroid
Example #16
0
    def evaluate(self, save_dir='data/', prefix=''):
        self.load()  # load the model as constructed
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model_b.cuda()
            self.model_f.cuda()

        # Set to evaluation mode for batch_norm layers
        self.model_f.eval()
        self.model_b.eval()

        print('using data set simulator: ', self.flags.data_set)

        saved_model_str = self.saved_model.replace('/', '_') + prefix
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        # For gaussian itself
        #Ypre_pred_file = os.path.join(save_dir, 'test_Ypre_pred_{}.csv'.format(saved_model_str))
        #YSIM_Truth_file = os.path.join(save_dir, 'test_YSim_truth_{}.csv'.format(saved_model_str))

        tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))
        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp: #,\
            #open(Ypre_pred_file, 'a') as fypp, open(YSIM_Truth_file, 'a') as fyst:
            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                """
                Older version when we have gaussian_mixture data back then
                if self.flags.data_set == 'gaussian_mixture':
                    spectra_origin = np.copy(spectra.cpu().data.numpy())
                    spectra = torch.nn.functional.one_hot(spectra.to(torch.int64), 4).to(torch.float) # Change the gaussian labels into one-hot
                np.savetxt(fxt, geometry.cpu().data.numpy())
                if self.flags.data_set == 'gaussian_mixture':
                    Xpred = self.model_b(spectra)
                    #Ypre_pred = self.model_f(Xpred).cpu().data.numpy()
                    Xpred = Xpred.cpu().data.numpy()
                    Ypred = simulator(self.flags.data_set, Xpred)
                    #Ysim_truth = simulator(self.flags.data_set, geometry.cpu().data.numpy())
                    #np.savetxt(fyst, Ysim_truth)
                    np.savetxt(fyp, Ypred)
                    np.savetxt(fxp, Xpred)
                    np.savetxt(fyt, spectra_origin)
                    #np.savetxt(fypp, Ypre_pred)
                else:
                """
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                Xpred = self.model_b(spectra)
                #Ypred = self.model_f(Xpred).cpu().data.numpy()
                np.savetxt(fyt, spectra.cpu().data.numpy())
                np.savetxt(fxt, geometry.cpu().data.numpy())
                if self.flags.data_set != 'meta_material':
                    Ypred = simulator(self.flags.data_set,
                                      Xpred.cpu().data.numpy())
                    np.savetxt(fyp, Ypred)
                if self.flags.data_set == 'ballistics':
                    Xpred[:, 3] *= 15
                np.savetxt(fxp, Xpred.cpu().data.numpy())
        tk.record(1)
        return Ypred_file, Ytruth_file
Example #17
0
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        """
        Forward Training part
        """
        self.best_forward_validation_loss = self.best_validation_loss

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model_f.cuda()
            self.model_b.cuda()
        if self.load_forward_ckpt_dir is None:
            print("Start Forward Training now")
            # Construct optimizer after the model moved to GPU
            self.optm_f = self.make_optimizer_f()
            self.lr_scheduler = self.make_lr_scheduler(self.optm_f)

            for epoch in range(self.flags.train_step):
                # Set to Training Mode
                train_loss = 0
                # boundary_loss = 0                 # Unnecessary during training since we provide geometries
                self.model_f.train()
                for j, (geometry, spectra) in enumerate(self.train_loader):
                    if cuda:
                        geometry = geometry.cuda()  # Put data onto GPU
                        spectra = spectra.cuda()  # Put data onto GPU
                    self.optm_f.zero_grad()  # Zero the gradient first
                    S_out = self.model_f(geometry)  # Get the output
                    loss = self.make_loss(S_out,
                                          spectra)  # Get the loss tensor
                    loss.backward()  # Calculate the backward gradients
                    self.optm_f.step()  # Move one step the optimizer
                    train_loss += loss  # Aggregate the loss
                    # boundary_loss += self.Boundary_loss                   # Aggregate the BDY loss

                # Calculate the avg loss of training
                train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)
                # boundary_avg_loss = boundary_loss.cpu().data.numpy() / (j + 1)

                if epoch % self.flags.eval_step == 0:  # For eval steps, do the evaluations and tensor board
                    # Record the training loss to the tensorboard
                    self.log.add_scalar('Loss/forward_train', train_avg_loss,
                                        epoch)
                    # self.log.add_scalar('Loss/BDY_train', boundary_avg_loss, epoch)

                    # Set to Evaluation Mode
                    self.model_f.eval()
                    print("Doing Evaluation on the forward model now")
                    test_loss = 0
                    for j, (geometry, spectra) in enumerate(
                            self.test_loader):  # Loop through the eval set
                        #if self.flags.data_set == 'gaussian_mixture':
                        #    spectra = torch.nn.functional.one_hot(spectra.to(torch.int64), 4).to(torch.float) # Change the gaussian labels into one-hot
                        if cuda:
                            geometry = geometry.cuda()
                            spectra = spectra.cuda()
                        logit = self.model_f(geometry)
                        loss = self.make_loss(logit,
                                              spectra)  # compute the loss
                        test_loss += loss  # Aggregate the loss

                    # Record the testing loss to the tensorboard
                    test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)
                    self.log.add_scalar('Loss/forward_test', test_avg_loss,
                                        epoch)

                    print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
                          % (epoch, train_avg_loss, test_avg_loss ))

                    # Model improving, save the model down
                    if test_avg_loss < self.best_forward_validation_loss:
                        self.best_forward_validation_loss = test_avg_loss
                        self.save_f()
                        print("Saving the model down...")

                        if self.best_forward_validation_loss < self.flags.stop_threshold:
                            print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
                                  (epoch, self.best_forward_validation_loss))
                            break

                # Learning rate decay upon plateau
                self.lr_scheduler.step(train_avg_loss)
        else:
            print(
                "Loading the pre-trained forward model instead of training it")
            self.load_f()
            # Set to Evaluation Mode
            self.model_f.eval()
            print("Doing Evaluation on the forward model now")
            test_loss = 0
            for j, (geometry, spectra) in enumerate(
                    self.test_loader):  # Loop through the eval set
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                logit = self.model_f(geometry)
                loss = self.make_loss(logit, spectra)  # compute the loss
                test_loss += loss  # Aggregate the loss

            # Record the testing loss to the tensorboard
            test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)
            print("Test loss of forward model loaded is", test_avg_loss)
        """
        Backward Training Part
        """
        self.model_b.train()
        self.model_f.eval()
        print("Now, start Backward Training")
        # Construct optimizer after the model moved to GPU
        self.optm_b = self.make_optimizer_b()
        self.lr_scheduler = self.make_lr_scheduler(self.optm_b)

        for epoch in range(self.flags.train_step):
            # Set to Training Mode
            train_loss = 0
            # boundary_loss = 0                 # Unnecessary during training since we provide geometries
            self.model_b.train()
            self.model_f.eval()
            for j, (geometry, spectra) in enumerate(self.train_loader):
                if cuda:
                    geometry = geometry.cuda()  # Put data onto GPU
                    spectra = spectra.cuda()  # Put data onto GPU
                self.optm_b.zero_grad()  # Zero the gradient first
                #if self.flags.data_set == 'gaussian_mixture':
                #    spectra = torch.nn.functional.one_hot(spectra.to(torch.int64), 4).to(torch.float) # Change the gaussian labels into one-hot
                G_out = self.model_b(spectra)  # Get the geometry prediction
                # print("G_out.size", G_out.size())
                S_out = self.model_f(G_out)  # Get the spectra prediction
                loss = self.make_loss(S_out, spectra,
                                      G=G_out)  # Get the loss tensor
                loss.backward()  # Calculate the backward gradients
                self.optm_b.step()  # Move one step the optimizer
                train_loss += loss  # Aggregate the loss
                # boundary_loss += self.Boundary_loss                   # Aggregate the BDY loss

            # Testing code #
            #if epoch == self.flags.train_step - 1:
            #    print('Training Ypred is', S_out.cpu().data.numpy())
            #    print('Training Ytruth is', spectra.cpu().data.numpy())

            # Calculate the avg loss of training
            train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)
            # boundary_avg_loss = boundary_loss.cpu().data.numpy() / (j + 1)

            if epoch % self.flags.eval_step == 0:  # For eval steps, do the evaluations and tensor board
                # Record the training loss to the tensorboard
                self.log.add_scalar('Loss/backward_train', train_avg_loss,
                                    epoch)
                self.log.add_scalar('Loss/BDY_train',
                                    self.Boundary_loss.cpu().data.numpy(),
                                    epoch)

                # Set to Evaluation Mode
                self.model_b.eval()
                self.model_f.eval()
                print("Doing Evaluation on the backward model now")
                test_loss = 0
                for j, (geometry, spectra) in enumerate(
                        self.test_loader):  # Loop through the eval set
                    if cuda:
                        geometry = geometry.cuda()
                        spectra = spectra.cuda()
                    G_out = self.model_b(
                        spectra)  # Get the geometry prediction
                    S_out = self.model_f(G_out)  # Get the spectra prediction
                    loss = self.make_loss(S_out, spectra,
                                          G=G_out)  # compute the loss
                    test_loss += loss  # Aggregate the loss

                # Record the testing loss to the tensorboard
                test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)
                self.log.add_scalar('Loss/backward_test', test_avg_loss, epoch)
                self.log.add_scalar('Loss/BDY_test',
                                    self.Boundary_loss.cpu().data.numpy(),
                                    epoch)

                # Testing code #
                #print('Testing Ypred is', S_out.cpu().data.numpy())
                #print('Testing Ytruth is', spectra.cpu().data.numpy())

                print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
                      % (epoch, train_avg_loss, test_avg_loss))

                # Model improving, save the model down
                if test_avg_loss < self.best_validation_loss:
                    self.best_validation_loss = test_avg_loss
                    self.save_b()
                    print("Saving the backward model down...")

                    if self.best_validation_loss < -1:  #self.flags.stop_threshold:
                        print("Training finished EARLIER at epoch %d, reaching loss of %.5f" % \
                              (epoch, self.best_validation_loss))
                        self.log.close()
                        break

            # Learning rate decay upon plateau
            self.lr_scheduler.step(train_avg_loss)
        self.log.close()
        tk.record(1)  # Record the total time of the training peroid
        """
Example #18
0
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        print("Starting training now")
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()

        # Construct optimizer after the model moved to GPU
        self.optm = self.make_optimizer()
        self.lr_scheduler = self.make_lr_scheduler(self.optm)

        dim_x = self.flags.dim_x
        dim_y = self.flags.dim_y
        dim_z = self.flags.dim_z
        dim_tot = self.flags.dim_tot

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        for epoch in range(self.flags.train_step):
            # Set to Training Mode
            train_loss = 0
            self.model.train()
            # If MMD on x-space is present from the start, the model can get stuck.
            # Instead, ramp it up exponetially.
            loss_factor = min(
                1., 2. * 0.002**(1. - (float(epoch) / self.flags.train_step)))

            for j, (x, y) in enumerate(self.train_loader):
                batch_size = len(x)
                if self.flags.data_set == 'gaussian_mixture':
                    y = y.unsqueeze(1)

                ######################
                # Preparing the data #
                ######################
                # Pad the x, y with zero_noise
                y_clean = y.clone()  # keep a copy of y for backward
                x_pad = self.flags.zeros_noise_scale * torch.randn(
                    batch_size, dim_tot - dim_x)
                y_pad = self.flags.zeros_noise_scale * torch.randn(
                    batch_size, dim_tot - dim_y - dim_z)
                z = torch.randn(batch_size, dim_z)
                if cuda:
                    x = x.cuda()  # Put data onto GPU
                    y = y.cuda()  # Put data onto GPU
                    x_pad = x_pad.cuda()
                    y_pad = y_pad.cuda()
                    y_clean = y_clean.cuda()
                    z = z.cuda()

                # Concate the x and y with pads and add y with small purtubation
                y += self.flags.y_noise_scale * torch.randn(
                    batch_size, dim_y, device=device)

                x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y),
                                                               dim=1)

                ################
                # Forward step #
                ################
                self.optm.zero_grad()  # Zero the gradient first
                ypred = self.model(x)  # Get the Ypred
                #y_without_pad = torch.cat((y[:, :dim_z], y[:, -dim_y:]), dim=1)

                # Do the same thing for ypred
                #y_block_grad = torch.cat((ypred[:, :dim_z], ypred[:, -dim_y:]), dim=1)

                # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)
                MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:],
                                            labels=y[:, dim_z:])

                # Get the MMD loss for latent
                #MMD_loss_latent = self.MMD(y_block_grad, y_without_pad)
                #Forward_loss = self.flags.lambda_mse * MSE_loss_y + self.flags.lambda_z * MMD_loss_latent

                # Use the maximum likelihood method
                log_det = self.model.log_jacobian(x=x)
                #print("The log determinant is", log_det)
                Forward_loss = 0.5 * (
                    MSE_loss_y / self.flags.lambda_mse +
                    torch.mean(torch.pow(z, 2))) - torch.mean(log_det)
                Forward_loss.backward()
                """
                For a maximum likelihood method, there is no inverse step
                #################
                # Backward step #
                #################
                # Create random value for the padding for yz
                pad_yz = self.flags.zeros_noise_scale * torch.randn(batch_size,
                                                                    dim_tot - dim_y - dim_z, device=device)
                # Add noise to the backward y value
                y = y_clean + self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)

                # Create a noisy z vector with noise level same as y
                noise_on_z = self.flags.y_noise_scale * torch.randn(batch_size, dim_z, device=device)

                # Add the noise to the outcome of z
                orig_z_perturbed = ypred.data[:, :dim_z] + noise_on_z

                # Set up the input of reverse network
                y_rev = torch.cat((orig_z_perturbed, pad_yz, y), dim=1)

                rand_z = torch.randn(batch_size, dim_z, device=device)
                # set up the randomized input of reverse netowrk
                y_rev_rand = torch.cat((rand_z, pad_yz, y), dim=1)

                # Get the output of the inverse model
                xpred_rev = self.model(y_rev, rev=True)
                xpred_rev_rand = self.model(y_rev_rand, rev=True)

                # Set the Losses
                MMD_loss_x = self.MMD(xpred_rev_rand[:, :dim_x], x[:, :dim_x])
                MSE_loss_x = self.make_loss(xpred_rev, x)

                Backward_loss = self.flags.lambda_mse * MSE_loss_x + \
                                loss_factor * self.flags.lambda_rev * MMD_loss_x

                Backward_loss.backward()
                """
                ######################
                #  Gradient Clipping #
                ######################
                for parameter in self.model.parameters():
                    parameter.grad.data.clamp_(-self.flags.grad_clamp,
                                               self.flags.grad_clamp)

                #########################
                # Descent your gradient #
                #########################
                self.optm.step()  # Move one step the optimizer

                # L2 + MMD training
                #train_loss += Backward_loss + Forward_loss                                  # Aggregate the loss
                # MLE training
                train_loss += Forward_loss

            # Calculate the avg loss of training
            train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)

            if epoch % self.flags.eval_step == 0:  # For eval steps, do the evaluations and tensor board
                # Record the training loss to the tensorboard
                self.log.add_scalar('Loss/total_train', train_avg_loss, epoch)
                self.log.add_scalar('Loss/MSE_y_train', MSE_loss_y, epoch)
                #self.log.add_scalar('Loss/MSE_x_train', MSE_loss_x, epoch)
                #self.log.add_scalar('Loss/MMD_z_train', MMD_loss_latent, epoch)
                #self.log.add_scalar('Loss/MMD_x_train', MMD_loss_x, epoch)

                # Set to Evaluation Mode
                self.model.eval()
                print("Doing Evaluation on the model now")

                test_loss = 0
                for j, (x, y) in enumerate(
                        self.test_loader):  # Loop through the eval set
                    batch_size = len(x)
                    if self.flags.data_set == 'gaussian_mixture':
                        y = y.unsqueeze(1)

                    ######################
                    # Preparing the data #
                    ######################
                    # Pad the x, y with zero_noise
                    y_clean = y.clone()  # keep a copy of y for backward
                    x_pad = self.flags.zeros_noise_scale * torch.randn(
                        batch_size, dim_tot - dim_x)
                    y_pad = self.flags.zeros_noise_scale * torch.randn(
                        batch_size, dim_tot - dim_y - dim_z)
                    z = torch.randn(batch_size, dim_z)
                    if cuda:
                        x = x.cuda()  # Put data onto GPU
                        y = y.cuda()  # Put data onto GPU
                        x_pad = x_pad.cuda()
                        y_pad = y_pad.cuda()
                        y_clean = y_clean.cuda()
                        z = z.cuda()

                    # Concate the x and y with pads and add y with small purtubation
                    y += self.flags.y_noise_scale * torch.randn(
                        batch_size, dim_y, device=device)

                    x, y = torch.cat((x, x_pad), dim=1), torch.cat(
                        (z, y_pad, y), dim=1)

                    ################
                    # Forward step #
                    ################
                    self.optm.zero_grad()  # Zero the gradient first
                    ypred = self.model(x)  # Get the Ypred
                    #y_without_pad = torch.cat((y[:, :dim_z], y[:, -dim_y:]), dim=1)

                    # Do the same thing for ypred
                    #y_block_grad = torch.cat((ypred[:, :dim_z], ypred[:, -dim_y:]), dim=1)

                    # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)
                    MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:],
                                                labels=y[:, dim_z:])

                    # Get the MMD loss for latent
                    #MMD_loss_latent = self.MMD(y_block_grad, y_without_pad)
                    #Forward_loss = self.flags.lambda_mse * MSE_loss_y + self.flags.lambda_z * MMD_loss_latent
                    log_det = self.model.log_jacobian(x=x)
                    #print("The log determinant is", log_det)
                    Forward_loss = 0.5 * (
                        MSE_loss_y / self.flags.lambda_mse +
                        torch.mean(torch.pow(z, 2))) - torch.mean(log_det)
                    """
                    #################
                    # Backward step #
                    #################
                    # Create random value for the padding for yz
                    pad_yz = self.flags.zeros_noise_scale * torch.randn(batch_size,
                                                                        dim_tot - dim_y - dim_z, device=device)
                    # Add noise to the backward y value
                    y = y_clean + self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)

                    # Create a noisy z vector with noise level same as y
                    noise_on_z = self.flags.y_noise_scale * torch.randn(batch_size, dim_z, device=device)

                    # Add the noise to the outcome of z
                    orig_z_perturbed = ypred.data[:, :dim_z] + noise_on_z

                    # Set up the input of reverse network
                    y_rev = torch.cat((orig_z_perturbed, pad_yz, y), dim=1)

                    rand_z = torch.randn(batch_size, dim_z, device=device)
                    # set up the randomized input of reverse network
                    y_rev_rand = torch.cat((rand_z, pad_yz, y), dim=1)

                    # Get the output of the inverse model
                    xpred_rev = self.model(y_rev, rev=True)
                    xpred_rev_rand = self.model(y_rev_rand, rev=True)

                    # Set the Losses
                    MMD_loss_x = self.MMD(xpred_rev_rand[:, :dim_x], x[:, :dim_x])
                    MSE_loss_x = self.make_loss(xpred_rev, x)

                    Backward_loss = self.flags.lambda_mse * MSE_loss_x + \
                                    loss_factor * self.flags.lambda_rev * MMD_loss_x


                    test_loss += Backward_loss + Forward_loss  # Aggregate the loss
                    """
                    test_loss += Forward_loss
                # Aggregate the other loss (in np form)

                # Record the testing loss to the tensorboard
                test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)

                self.log.add_scalar('Loss/total_test', test_avg_loss, epoch)
                self.log.add_scalar('Loss/MSE_y_test', MSE_loss_y, epoch)
                #self.log.add_scalar('Loss/MSE_x_test', MSE_loss_x, epoch)
                #self.log.add_scalar('Loss/MMD_z_test', MMD_loss_latent, epoch)
                #self.log.add_scalar('Loss/MMD_x_test', MMD_loss_x, epoch)

                print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
                      % (epoch, train_avg_loss, test_avg_loss ))

                # Model improving, save the model down
                if test_avg_loss < self.best_validation_loss:
                    self.best_validation_loss = train_avg_loss
                    self.save()
                    print("Saving the model down...")

                    if self.best_validation_loss < self.flags.stop_threshold:
                        print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
                              (epoch, self.best_validation_loss))
                        break

            # Learning rate decay upon plateau
            self.lr_scheduler.step(train_avg_loss)
        tk.record(1)  # Record the total time of the training peroid
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()

        # Construct optimizer after the model moved to GPU
        self.optm = self.make_optimizer()
        self.lr_scheduler = self.make_lr_scheduler(self.optm)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        # Set up the total number of training samples allowed to see
        total_training_samples = 0
        train_end_flag = False
        for epoch in range(self.flags.train_step):
            if train_end_flag:  # Training is ended due to max sample reached
                break
            # Set to Training Mode
            epoch_samples = 0
            metrics = defaultdict(float)
            self.model.train()
            iou_sum_train = 0
            for j, sample in enumerate(self.train_loader):
                inputs = sample['image']  # Get the input
                labels = sample['labels']  # Get the labels
                if cuda:
                    inputs = inputs.cuda()  # Put data onto GPU
                    labels = labels.cuda()  # Put data onto GPU
                self.optm.zero_grad()  # Zero the gradient first
                logit = self.model(inputs.float())  # Get the output
                loss = self.make_loss(logit,
                                      labels,
                                      metrics,
                                      bce_weight=self.flags.bce_weight,
                                      boundary_weight=self.flags.
                                      boundary_weight)  # Get the loss tensor
                loss.backward()  # Calculate the backward gradients
                self.optm.step()  # Move one step the optimizer
                epoch_samples += inputs.size(0)
                total_training_samples += inputs.size(0)

                # change from epoch base to mini-batch base
                if j % self.flags.eval_step == 0:
                    IoU = self.compute_iou(logit, labels)
                    iou_sum_train += IoU
                    IoU_aggregate = iou_sum_train / total_training_samples
                    self.print_metrics(metrics, epoch_samples, 'training')
                    print('training IoU in current batch {} is'.format(j), IoU)
                    print('training IoU uptillnow {} is'.format(j),
                          IoU_aggregate)
                    self.log.add_scalar('training/bce',
                                        metrics['bce'] / epoch_samples, j)
                    self.log.add_scalar('training/dice',
                                        metrics['dice'] / epoch_samples, j)
                    self.log.add_scalar('training/loss',
                                        metrics['loss'] / epoch_samples, j)
                    self.log.add_scalar('training/IoU', IoU, j)
                    # Set eval mode
                    self.model.eval()
                    # Set to Training Mode
                    test_epoch_samples = 0
                    test_metrics = defaultdict(float)
                    iou_sum = 0
                    for jj, sample in enumerate(self.test_loader):
                        inputs = sample['image']  # Get the input
                        labels = sample['labels']  # Get the labels
                        if cuda:
                            inputs = inputs.cuda()  # Put data onto GPU
                            labels = labels.cuda()  # Put data onto GPU
                        self.optm.zero_grad()  # Zero the gradient first
                        logit = self.model(inputs.float())  # Get the output
                        loss = self.make_loss(logit,
                                              labels,
                                              test_metrics,
                                              bce_weight=self.flags.bce_weight
                                              )  # Get the loss tensor
                        test_epoch_samples += inputs.size(0)
                        IoU = self.compute_iou(logit, labels)
                        iou_sum += IoU
                        if test_epoch_samples > self.flags.max_test_sample:
                            break
                    IoU = iou_sum / (jj + 1)
                    self.print_metrics(metrics, test_epoch_samples, 'testing')
                    print('IoU in current test batch is', IoU)
                    self.log.add_scalar(
                        'test/bce', test_metrics['bce'] / test_epoch_samples,
                        j)
                    self.log.add_scalar(
                        'test/dice', test_metrics['dice'] / test_epoch_samples,
                        j)
                    self.log.add_scalar(
                        'test/loss', test_metrics['loss'] / test_epoch_samples,
                        j)
                    self.log.add_scalar('test/IoU', IoU, j)
                    self.plot_eval_graph(inputs.cpu().numpy(),
                                         logit.detach().cpu().numpy(),
                                         labels.detach().cpu().numpy(), j)
                    #raise Exception("Testing stop point for getting shapes")

                if loss.cpu().data.numpy() < self.best_validation_loss:
                    self.best_validation_loss = loss.cpu().data.numpy()
                # Learning rate decay upon plateau
                self.lr_scheduler.step(loss)

                if total_training_samples > self.flags.max_train_sample:
                    print(
                        "Maximum training samples requirement meet, I have been training for more than ",
                        total_training_samples, " samples.")
                    train_end_flag = True
                    break

        self.log.close()
        tk.record(999)  # Record at the end of the training

        # Save the module at the end
        self.save()
Example #20
0
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        print("Starting training now")
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()

        # Construct optimizer after the model moved to GPU
        self.optm = self.make_optimizer()
        self.lr_scheduler = self.make_lr_scheduler(self.optm)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        for epoch in range(self.flags.train_step):
            # Set to Training Mode
            train_loss = 0
            loss_aggregate_list = np.array([0., 0.,
                                            0.])  # kl_loss, mse_loss, bdy_loss
            self.model.train()
            for j, (geometry, spectra) in enumerate(self.train_loader):
                if self.flags.data_set == 'gaussian_mixture':
                    spectra = spectra.unsqueeze(1)
                if cuda:
                    geometry = geometry.cuda()  # Put data onto GPU
                    spectra = spectra.cuda()  # Put data onto GPU
                self.optm.zero_grad()  # Zero the gradient first
                G_pred, z_mean, z_log_var = self.model(geometry,
                                                       spectra)  # Get G_pred
                loss, loss_list = self.make_loss(logit=G_pred,
                                                 labels=geometry,
                                                 z_mean=z_mean,
                                                 z_log_var=z_log_var)
                loss.backward()  # Calculate the backward gradients
                self.optm.step()  # Move one step the optimizer
                train_loss += loss  # Aggregate the loss
                loss_aggregate_list += loss_list  # Aggregate the other loss (in np form)

            # Calculate the avg loss of training
            train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)
            loss_aggregate_list /= (j + 1)

            if epoch % self.flags.eval_step == 0:  # For eval steps, do the evaluations and tensor board
                # Record the training loss to the tensorboard
                self.log.add_scalar('Loss/total_train', train_avg_loss, epoch)
                self.log.add_scalar('Loss/kl_train', loss_aggregate_list[0],
                                    epoch)
                self.log.add_scalar('Loss/mse_train', loss_aggregate_list[1],
                                    epoch)
                self.log.add_scalar('Loss/bdy_train', loss_aggregate_list[2],
                                    epoch)
                self.log.add_histogram('z_mean',
                                       z_mean.cpu().data.numpy(), epoch)
                self.log.add_histogram('z_log_var',
                                       z_log_var.cpu().data.numpy(), epoch)

                # Set to Evaluation Mode
                self.model.eval()
                print("Doing Evaluation on the model now")
                test_loss = 0
                loss_aggregate_list = np.array(
                    [0., 0., 0.])  # kl_loss, mse_loss, bdy_loss
                for j, (geometry, spectra) in enumerate(
                        self.test_loader):  # Loop through the eval set
                    if self.flags.data_set == 'gaussian_mixture':
                        spectra = spectra.unsqueeze(1)
                    if cuda:
                        geometry = geometry.cuda()
                        spectra = spectra.cuda()
                    G_pred, z_mean, z_log_var = self.model(
                        geometry, spectra)  # Get G_pred
                    loss, loss_list = self.make_loss(
                        logit=G_pred,
                        labels=geometry,
                        z_mean=z_mean,
                        z_log_var=z_log_var)  # Get the loss tensor
                    test_loss += loss  # Aggregate the loss
                    loss_aggregate_list += loss_list  # Aggregate the other loss (in np form)

                # Record the testing loss to the tensorboard
                test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)
                loss_aggregate_list /= (j + 1)
                self.log.add_scalar('Loss/total_test', test_avg_loss, epoch)
                self.log.add_scalar('Loss/kl_test', loss_aggregate_list[0],
                                    epoch)
                self.log.add_scalar('Loss/mse_test', loss_aggregate_list[1],
                                    epoch)
                self.log.add_scalar('Loss/bdy_test', loss_aggregate_list[2],
                                    epoch)

                print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
                      % (epoch, train_avg_loss, test_avg_loss ))

                # Model improving, save the model down
                if test_avg_loss < self.best_validation_loss:
                    self.best_validation_loss = test_avg_loss
                    self.save()
                    print("Saving the model down...")

                    if self.best_validation_loss < self.flags.stop_threshold:
                        print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
                              (epoch, self.best_validation_loss))
                        break

            # Learning rate decay upon plateau
            self.lr_scheduler.step(train_avg_loss)
        tk.record(1)  # Record the total time of the training peroid
Example #21
0
    def evaluate(self,
                 save_dir='data/',
                 save_all=False,
                 MSE_Simulator=False,
                 save_misc=False,
                 save_Simulator_Ypred=False):
        """
        The function to evaluate how good the Neural Adjoint is and output results
        :param save_dir: The directory to save the results
        :param save_all: Save all the results instead of the best one (T_200 is the top 200 ones)
        :param MSE_Simulator: Use simulator loss to sort (DO NOT ENABLE THIS, THIS IS OK ONLY IF YOUR APPLICATION IS FAST VERIFYING)
        :param save_misc: save all the details that are probably useless
        :param save_Simulator_Ypred: Save the Ypred that the Simulator gives
        (This is useful as it gives us the true Ypred instead of the Ypred that the network "thinks" it gets, which is
        usually inaccurate due to forward model error)
        :return:
        """
        self.load()  # load the model as constructed
        try:
            bs = self.flags.backprop_step  # for previous code that did not incorporate this
        except AttributeError:
            print(
                "There is no attribute backprop_step, catched error and adding this now"
            )
            self.flags.backprop_step = 300
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_')
        #saved_model_str = self.flags.eval_model
        # Get the file names
        Ypred_file = os.path.join(save_dir,
                                  'test_Ypred_{}.csv'.format(saved_model_str))
        Xtruth_file = os.path.join(
            save_dir, 'test_Xtruth_{}.csv'.format(saved_model_str))
        Ytruth_file = os.path.join(
            save_dir, 'test_Ytruth_{}.csv'.format(saved_model_str))
        Xpred_file = os.path.join(save_dir,
                                  'test_Xpred_{}.csv'.format(saved_model_str))
        print("evalution output pattern:", Ypred_file)

        # Logging
        tk = time_keeper(
            time_keeping_file=os.path.join(save_dir, 'evaluation_time.txt'))
        min_mse_hist = np.empty(len(self.test_loader))

        # Open those files to append
        with open(Xtruth_file, 'a') as fxt,open(Ytruth_file, 'a') as fyt,\
                open(Ypred_file, 'a') as fyp, open(Xpred_file, 'a') as fxp:
            # Loop through the eval data and evaluate
            for ind, (geometry, spectra) in enumerate(self.test_loader):
                print("Sample #:\t", ind, " / ", len(self.test_loader))
                if cuda:
                    geometry = geometry.cuda()
                    spectra = spectra.cuda()
                # Initialize the geometry first
                Xpred, Ypred, loss = self.evaluate_one(
                    spectra,
                    save_dir=save_dir,
                    save_all=save_all,
                    ind=ind,
                    MSE_Simulator=MSE_Simulator,
                    save_misc=save_misc,
                    save_Simulator_Ypred=save_Simulator_Ypred)
                tk.record(
                    ind)  # Keep the time after each evaluation for backprop

                if save_misc:
                    np.savetxt(
                        'visualize_final/point{}_Xtruth.csv'.format(ind),
                        geometry.cpu().data.numpy())
                    np.savetxt(
                        'visualize_final/point{}_Ytruth.csv'.format(ind),
                        spectra.cpu().data.numpy())
                # suppress printing to evaluate time
                geo = geometry.cpu().data.numpy()
                spec = spectra.cpu().data.numpy()
                np.savetxt(fxt, geo)
                np.savetxt(fyt, spec)
                if self.flags.data_set != 'meta_material':
                    np.savetxt(fyp, Ypred)
                np.savetxt(fxp, Xpred)

                min_mse = np.min(loss)

                # Logging
                self.log.add_scalar('NA/avg_mse', np.mean(loss), ind)
                self.log.add_scalar('NA/min_mse', min_mse, ind)
        '''
        f = plt.figure()
        for i,c in enumerate([0.2,0.5,0.8]):
            plt.plot(range(len(spec[0])),worst_samples['truth_spect'][i], '-.',color=c)
            plt.plot(range(len(spec[0])),worst_samples['pred_spect'][i], '-',color=c,
                     label=np.round(worst_samples['mse'][i],6))
        plt.xlabel("Wavelength")
        plt.ylabel("Spectra")
        plt.legend(loc="upper left")
        plt.suptitle("3 Random Spectral Fits")
        plt.savefig(os.path.join('data','misc_fits_{}.png'.format(saved_model_str)))
        '''

        return Ypred_file, Ytruth_file
Example #22
0
    def evaluate(self,
                 save_dir='data/',
                 save_all=False,
                 MSE_Simulator=False,
                 save_misc=False,
                 save_Simulator_Ypred=True):
        """
        The function to evaluate how good the models is (outputs validation loss)
        Note that Ypred and Ytruth still refer to spectra, while Xpred and Xtruth still refer to geometries.
        :return:
        """

        self.load()  # load the model as constructed

        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()
        self.model.eval()
        saved_model_str = self.saved_model.replace('/', '_')
        # Get the file names
        Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(
            saved_model_str))  #Input associated? No real value
        Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(
            saved_model_str))  #Output to compare against
        Ytruth_file = os.path.join(
            save_dir,
            'test_Ytruth_{}.csv'.format(saved_model_str))  #Input of Neural Net
        Xpred_file = os.path.join(
            save_dir,
            'test_Xpred_{}.csv'.format(saved_model_str))  #Output of Neural Net
        print("evalution output pattern:", Ypred_file)

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(save_dir, 'evaluation_time.txt'))

        # Open those files to append
        with open(Xtruth_file, 'w') as fxt,open(Ytruth_file, 'w') as fyt,\
                open(Ypred_file, 'w') as fyp, open(Xpred_file, 'w') as fxp:

            # Loop through the eval data and evaluate
            geometry, spectra = next(iter(self.test_loader))

            if cuda:
                geometry = geometry.cuda()
                spectra = spectra.cuda()

            # Initialize the geometry first
            Xpred = self.model(spectra).cpu().data.numpy()
            Ytruth = spectra.cpu().data.numpy()

            if save_Simulator_Ypred and not (self.flags.data_set == 'Yang'
                                             or self.flags.data_set
                                             == 'Yang_sim'):
                Ypred = simulator(self.flags.data_set, Xpred)
            else:
                Ypred = spectra.cpu().data.numpy()

            MSE_List = np.mean(np.power(Ypred - Ytruth, 2), axis=1)
            mse = np.mean(MSE_List)
            print(mse)

            np.savetxt(fxt, geometry.cpu().data.numpy())
            np.savetxt(fyt, Ytruth)
            if self.flags.data_set != 'Yang':
                np.savetxt(fyp, Ypred)
            np.savetxt(fxp, Xpred)

        return Ypred_file, Ytruth_file
Example #23
0
    def train(self):
        """
        The major training function. This would start the training using information given in the flags
        :return: None
        """
        print("Starting training now")
        cuda = True if torch.cuda.is_available() else False
        if cuda:
            self.model.cuda()

        # Construct optimizer after the model moved to GPU
        self.optm = self.make_optimizer()
        self.lr_scheduler = self.make_lr_scheduler(self.optm)

        dim_x = self.flags.dim_x
        dim_y = self.flags.dim_y
        dim_z = self.flags.dim_z

        # Time keeping
        tk = time_keeper(
            time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))

        for epoch in range(self.flags.train_step):
            # Set to Training Mode
            train_loss = 0
            self.model.train()
            # If MMD on x-space is present from the start, the model can get stuck.
            # Instead, ramp it up exponetially.
            loss_factor = min(
                1., 2. * 0.002**(1. - (float(epoch) / self.flags.train_step)))

            for j, (x, y) in enumerate(self.train_loader):
                batch_size = len(x)
                if self.flags.data_set == 'gaussian_mixture':
                    y = torch.nn.functional.one_hot(y.to(torch.int64), 4).to(
                        torch.float)  # Change the gaussian labels into one-hot

                ######################
                # Preparing the data #
                ######################
                if cuda:
                    x = x.cuda()  # Put data onto GPU
                    y = y.cuda()  # Put data onto GPU

                ################
                # Forward step #
                ################
                self.optm.zero_grad()  # Zero the gradient first
                z = self.model(x, y)  # Get the zpred
                loss, jac, zz = self.make_loss(z)  # Make the z loss
                loss.backward()

                ######################
                #  Gradient Clipping #
                ######################
                for parameter in self.model.parameters():
                    parameter.grad.data.clamp_(-self.flags.grad_clamp,
                                               self.flags.grad_clamp)

                #########################
                # Descent your gradient #
                #########################
                self.optm.step()  # Move one step the optimizer

                train_loss += loss  # Aggregate the loss

            # Calculate the avg loss of training
            train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)

            if epoch % self.flags.eval_step == 0:  # For eval steps, do the evaluations and tensor board
                # Record the training loss to the tensorboard
                self.log.add_scalar('Loss/total_train', train_avg_loss, epoch)
                self.log.add_scalar('Loss/train_jac', jac, epoch)
                self.log.add_scalar('Loss/train_zz', zz, epoch)

                # Set to Evaluation Mode
                self.model.eval()
                print("Doing Evaluation on the model now")

                test_loss = 0
                for j, (x, y) in enumerate(
                        self.test_loader):  # Loop through the eval set
                    batch_size = len(x)
                    if self.flags.data_set == 'gaussian_mixture':
                        y = torch.nn.functional.one_hot(
                            y.to(torch.int64),
                            4).to(torch.float
                                  )  # Change the gaussian labels into one-hot

                    ######################
                    # Preparing the data #
                    ######################
                    if cuda:
                        x = x.cuda()  # Put data onto GPU
                        y = y.cuda()  # Put data onto GPU

                    ################
                    # Forward step #
                    ################
                    self.optm.zero_grad()  # Zero the gradient first
                    z = self.model(x, y)  # Get the zpred
                    loss, jac, zz = self.make_loss(z)  # Make the z loss

                    test_loss += loss  # Aggregate the loss

                # Record the testing loss to the tensorboard
                test_avg_loss = test_loss.cpu().data.numpy() / (j + 1)

                self.log.add_scalar('Loss/total_test', test_avg_loss, epoch)
                self.log.add_scalar('Loss/test_jac', jac, epoch)
                self.log.add_scalar('Loss/test_zz', zz, epoch)

                print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
                      % (epoch, train_avg_loss, test_avg_loss ))

                # Model improving, save the model down
                if test_avg_loss < self.best_validation_loss:
                    self.best_validation_loss = train_avg_loss
                    self.save()
                    print("Saving the model down...")

                    if self.best_validation_loss < self.flags.stop_threshold:
                        print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
                              (epoch, self.best_validation_loss))
                        break

            # Learning rate decay upon plateau
            self.lr_scheduler.step(train_avg_loss)
        tk.record(1)  # Record the total time of the training peroid