def unseen_phase(self):
     self.model.eval()
     losses = []
     preds = torch.empty(0, 9).long().to(self.device)
     acts = torch.empty(0, 9).long().to(self.device)
     for i, batch in enumerate(dl):
         with torch.no_grad():
             inputs, labels = batch['image'], batch['labels'].long().view(
                 -1, 9)
             inputs, labels = inputs.to(device), labels.to(device)
             pred = nn.Softmax(dim=1)
             outputs = self.model(inputs)
             outputs = pred(outputs)
             preds = torch.cat((preds, outputs), 0)
             acts = torch.cat((acts, labels.long()), 0)
             for j in range(0, batch['labels'].size()[0]):
                 if torch.max(outputs, 1)[1][j] == torch.max(labels,
                                                             1)[1][j]:
                     losses.append(1)
                 else:
                     losses.append(0)
         del inputs
         del labels
         del batch
         torch.cuda.empty_cache()
     epoch_loss = (sum(losses) * 100.00) / len(losses)
     return epoch_loss, preds, acts
 def train_phase(self, tr='train'):
     if tr == 'train':
         self.model.train()
         dl = self.train_dl
     if tr == 'val':
         self.model.eval()
         dl = self.val_dl
     losses = []
     for i, batch in enumerate(dl):
         inputs, labels = batch['image'], batch['labels'].long().view(-1, 9)
         inputs, labels = inputs.to(self.device), labels.to(self.device)
         labels = torch.argmax(labels, 1)
         self.optimizer.zero_grad()
         if self.loss in ['normsoftmax', 'panc', 'pnca']:
             self.loss_optimizer.zero_grad()
         embeddings = self.model(inputs)
         indices_tuple = self.mining_func(embeddings, labels)
         eloss = self.loss_function(embeddings, labels, indices_tuple)
         losses.append(eloss.item())
         eloss.backward()
         if self.loss in ['normsoftmax', 'panc', 'pnca']:
             self.loss_optimizer.step()
         self.optimizer.step()
         del inputs
         del labels
         del embeddings
         torch.cuda.empty_cache()
     epoch_loss = sum(losses) / len(losses)
     return epoch_loss
Exemple #3
0
    def train_phase(self, tr='train'):
        if tr == 'train':
            self.model.train()
            dl = self.train_dl
        if tr == 'val':
            self.model.eval()
            dl = self.val_dl            
        losses = []
        for i, batch in enumerate(dl):
            inputs, labels = batch['image'], batch['labels'].long().view(-1,self.hubble_classes)
            inputs, labels = inputs.to(self.device), labels.to(self.device)
            self.optimizer.zero_grad()             
            outputs = self.model(inputs)           
    
            loss = self.criterion(outputs, torch.max(labels, 1)[1]) 
            losses.append(loss.item())
            loss = torch.sqrt(loss)           
            loss.backward()                   
            self.optimizer.step()                  
 
            del batch
            del inputs
            del labels
            torch.cuda.empty_cache()
        epoch_loss = sum(losses) / len(losses)
        return epoch_loss
 def train_phase(self, tr='train'):
     if tr == 'train':
         self.model.train()
         dl = self.train_dl
     if tr == 'val':
         self.model.eval()
         dl = self.val_dl
     losses = []
     for i, batch in enumerate(dl):
         inputs, labels = batch['image'], batch['labels'].long().view(-1, 9)
         inputs, labels = inputs.to(self.device), labels.to(self.device)
         self.optimizer.zero_grad()  # 1. Zero the parameter gradients
         pred = nn.Softmax(dim=1)
         outputs = self.model(inputs)
         loss = self.criterion(outputs, torch.max(labels, 1)[1])
         losses.append(loss.item())
         loss.backward()
         self.optimizer.step()
         del inputs
         del labels
         del outputs
         torch.cuda.empty_cache()
     epoch_loss = sum(losses) / len(losses)
     return epoch_loss