コード例 #1
0
    def loss_and_acc_test(self, data_loader):
        mean_loss = 0
        mean_accuracy = 0

        for sample_id, batch in tqdm(enumerate(data_loader)):
            inputs, _ = self.process_batch_data(batch, test=True)
            # compute forward pass
            outputs, _, _, _, _, _ = self.model(
                measure_score_tensor=inputs,
                measure_metadata_tensor=None,
                train=False
            )
            # compute loss
            recons_loss = self.reconstruction_loss(
                x=inputs, x_recons=outputs
            )
            loss = recons_loss
            # compute mean loss and accuracy
            mean_loss += to_numpy(loss.mean())
            accuracy = self.mean_accuracy(
                weights=outputs,
                targets=inputs
            )
            mean_accuracy += to_numpy(accuracy)
        mean_loss /= len(data_loader)
        mean_accuracy /= len(data_loader)
        return (
            mean_loss,
            mean_accuracy
        )
コード例 #2
0
 def compute_representations(self, data_loader, num_batches=None):
     latent_codes = []
     attributes = []
     if num_batches is None:
         num_batches = 200
     for batch_id, batch in tqdm(enumerate(data_loader)):
         inputs, labels = self.process_batch_data(batch)
         _, _, _, z_tilde, _ = self.model(inputs)
         latent_codes.append(to_numpy(z_tilde.cpu()))
         attributes.append(to_numpy(labels))
         if batch_id == num_batches:
             break
     latent_codes = np.concatenate(latent_codes, 0)
     attributes = np.concatenate(attributes, 0)
     attributes, attr_list = self._extract_relevant_attributes(attributes)
     return latent_codes, attributes, attr_list
コード例 #3
0
 def compute_representations(self, data_loader, num_batches=None):
     latent_codes = []
     attributes = []
     if num_batches is None:
         num_batches = 200
     for batch_id, batch in tqdm(enumerate(data_loader)):
         inputs, latent_attributes = self.process_batch_data(batch)
         _, _, _, _, z_tilde, _ = self.model(inputs, None, train=False)
         latent_codes.append(to_numpy(z_tilde.cpu()))
         attributes.append(to_numpy(latent_attributes))
         if batch_id == num_batches:
             break
     latent_codes = np.concatenate(latent_codes, 0)
     attributes = np.concatenate(attributes, 0)
     attr_list = [attr for attr in self.attr_dict.keys()]
     return latent_codes, attributes, attr_list
コード例 #4
0
    def loss_and_acc_on_epoch(self, data_loader, epoch_num=None, train=True):
        """
        Computes the loss and accuracy for an epoch
        :param data_loader: torch dataloader object
        :param epoch_num: int, used to change training schedule
        :param train: bool, performs the backward pass and gradient descent if TRUE
        :return: loss values and accuracy percentages
        """
        mean_loss = 0
        mean_accuracy = 0
        for batch_num, batch in tqdm(enumerate(data_loader)):
            # update training scheduler
            if train:
                self.update_scheduler(epoch_num)

            # process batch data
            batch_data = self.process_batch_data(batch)

            # zero the gradients
            self.zero_grad()

            # compute loss for batch
            loss, accuracy = self.loss_and_acc_for_batch(
                batch_data, epoch_num, batch_num, train=train
            )

            # compute backward and step if train
            if train:
                loss.backward()
                # self.plot_grad_flow()
                self.step()

            # compute mean loss and accuracy
            mean_loss += to_numpy(loss.mean())
            if accuracy is not None:
                mean_accuracy += to_numpy(accuracy)

            if train:
                self.global_iter += 1

        mean_loss /= len(data_loader)
        mean_accuracy /= len(data_loader)
        return (
            mean_loss,
            mean_accuracy
        )
コード例 #5
0
    def loss_and_acc_test(self, data_loader):
        mean_loss = 0
        mean_accuracy = 0

        for sample_id, batch in tqdm(enumerate(data_loader)):
            inputs, _ = self.process_batch_data(batch)
            inputs = to_cuda_variable(inputs)
            # compute forward pass
            outputs, _, _, _, _ = self.model(inputs)
            # compute loss
            recons_loss = self.reconstruction_loss(inputs, outputs,
                                                   self.dec_dist)
            loss = recons_loss
            # compute mean loss and accuracy
            mean_loss += to_numpy(loss.mean())
            accuracy = self.mean_accuracy(weights=torch.sigmoid(outputs),
                                          targets=inputs)
            mean_accuracy += to_numpy(accuracy)
        mean_loss /= len(data_loader)
        mean_accuracy /= len(data_loader)
        return (mean_loss, mean_accuracy)
コード例 #6
0
    def loss_and_acc_on_epoch(self, data_loader, epoch_num=None, train=True):
        """
        Computes the loss and accuracy for an epoch
        :param data_loader: torch dataloader object
        :param epoch_num: int, used to change training schedule
        :param train: bool, performs the backward pass and gradient descent if TRUE
        :return: loss values and accuracy percentages
        """
        mean_loss = 0
        mean_accuracy = 0
        mean_D_loss = 0
        mean_D_accuracy = 0
        for batch_num, batch in tqdm(enumerate(data_loader)):
            # update training scheduler
            if train:
                self.update_scheduler(epoch_num)

            # process batch data
            batch_1, batch_2 = self.process_batch_data(batch)

            # zero the gradients
            self.zero_grad()

            # compute loss for batch
            vae_loss, accuracy, D_z = self.loss_and_acc_for_batch(batch_1,
                                                                  epoch_num,
                                                                  batch_num,
                                                                  train=train)

            # compute backward and step if train
            if train:
                vae_loss.backward(retain_graph=True)
                # self.plot_grad_flow()
                self.step()

            # compute Discriminator loss
            D_loss, D_acc = self.loss_and_acc_for_batch_D(batch_2,
                                                          D_z,
                                                          epoch_num,
                                                          batch_num,
                                                          train=train)

            if train:
                self.D_optim.zero_grad()
                D_loss.backward()
                self.D_optim.step()

            # log batch_wise:
            self.writer.add_scalar('batch_wise/vae_loss', vae_loss.item(),
                                   self.global_iter)
            self.writer.add_scalar('batch_wise/D_loss', D_loss.item(),
                                   self.global_iter)

            # compute mean loss and accuracy
            mean_loss += to_numpy(vae_loss.mean())
            if accuracy is not None:
                mean_accuracy += to_numpy(accuracy)
            mean_D_loss += to_numpy(D_loss.mean())
            mean_D_accuracy += to_numpy(D_acc.mean())

            if train:
                self.global_iter += 1

        mean_loss /= len(data_loader)
        mean_accuracy /= len(data_loader)
        mean_D_loss /= len(data_loader)
        mean_D_accuracy /= len(data_loader)
        return (mean_loss, mean_accuracy), (mean_D_loss, mean_D_accuracy)