Example #1
0
def confidnet_score(model, test_loader, args):
    model.eval()  # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
    with torch.no_grad():
        if args.adv == False:
            correct, total = 0, 0
            uncertainties, pred, groundtruth = list(), list(), list()
            for i, (x, y) in enumerate(Bar(test_loader)):
                x, y = x.to(args.device), y.to(args.device, dtype=torch.long)                   
                outputs, uncertainty = model(x)
                pred.append(outputs)
                groundtruth.append(y)
                uncertainties.append(uncertainty)            
            pred = torch.cat(pred).cpu().detach().numpy()
            predict_label = np.argmax(pred, axis=1)
            groundtruth = torch.cat(groundtruth).cpu().detach().numpy()
            uncertainties = torch.cat(uncertainties).cpu().detach().numpy().flatten().tolist()

            binary_predicted_true = convert_predict_and_true_to_binary(predicted=predict_label, true=groundtruth)           
            binary_predicted_true = [True if b == 1 else False for b in binary_predicted_true]        
            return uncertainties, binary_predicted_true
        
        if args.adv == True:            
            uncertainties = list()
            for x, _ in Bar(test_loader):                
                x = x.to(args.device)
                outputs, uncertainty = model(x)                
                uncertainties.append(uncertainty)              
            uncertainties = torch.cat(uncertainties).cpu().detach().numpy().flatten().tolist()            
            return uncertainties
Example #2
0
def train2(_model, _criterion, _optimizer, _eval_data, full_adj, full_rel,
           _device, args):
    eval_loss = 0
    eval_pred, eval_true = np.zeros(0), np.zeros(0)
    data = Rating(_eval_data)
    data_loader = DataLoader(data, batch_size=args.batch_size, shuffle=True)
    _model.train()
    for data in Bar(data_loader):
        users, items, labels = data['user'].to(_device), data['item'].to(
            _device), data['label'].type(torch.float32).to(_device)
        _optimizer.zero_grad()
        outputs = _model(users,
                         items,
                         adj=full_adj,
                         rel=full_rel,
                         train_mode=False)
        loss = _criterion(outputs, labels)
        loss.backward()
        _optimizer.step()
        eval_loss += loss.item()
        # detach outputs and labels
        eval_pred = np.concatenate((eval_pred, outputs.detach().cpu().numpy()))
        eval_true = np.concatenate((eval_true, labels.detach().cpu().numpy()))
    logging.info(f'Eval loss: {eval_loss / len(data_loader)}')
    score = utils.auc_score(eval_pred, eval_true, 'micro')
    logging.info(f'Eval AUC : {score} micro')
Example #3
0
    def train(self):
        """Training the DAGMM model"""
        self.model = DAGMMTS(
            self.args.nin,
            self.args.nh,
            self.args.nout,
            self.args.nlayers,
            self.args.do,
            self.args.n_gmm,
            self.args.latent_dim,
            self.args.folded
            ).to(self.device)
        self.model.apply(weights_init_normal)
        optimizer = optim.Adam(self.model.parameters(), lr=self.args.lr)

        self.compute = ComputeLoss(self.model, self.args.lambda_gmm,
            self.device, self.args.n_gmm)
        self.model.train()
        for epoch in range(self.args.num_epochs):
            total_loss = 0
            recon_loss = 0
            gmm_loss = 0
            ce_loss = 0       
            for batch in Bar(self.train_loader):
                optimizer.zero_grad()
                x = batch[0].float().to(self.device)
                y = batch[1].long().to(self.device)
                m = batch[2].float().to(self.device)
                s = batch[3].float().to(self.device)
                if self.args.folded:
                    p = batch[4].float().to(self.device)
                    _, x_hat, z, gamma = self.model(x, m, s, p)
                else:
                    _, x_hat, z, gamma = self.model(x, m, s)

                loss, reconst_loss, sample_energy, cross_entropy = self.compute.forward(x, y, x_hat, z, gamma)
                loss.backward(retain_graph=True)
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5)
                optimizer.step()
                total_loss += loss.item()
                recon_loss += reconst_loss.item()
                gmm_loss += sample_energy.item()
                ce_loss += cross_entropy.item()

            total_loss /= len(self.train_loader)
            recon_loss /= len(self.train_loader)
            gmm_loss /= len(self.train_loader)
            ce_loss /= len(self.train_loader)

            total_loss_val = 0
            recon_loss_val = 0
            gmm_loss_val = 0
            ce_loss_val = 0
            
            print('Training DAGMMTS... Epoch: {}, Loss: {:.3f}'.format(epoch, total_loss))
            self.writer.add_scalars("total", {"train": total_loss, "val": total_loss_val}, global_step=epoch)
            self.writer.add_scalars("recon", {"train": recon_loss, "val": recon_loss_val}, global_step=epoch)
            self.writer.add_scalars("gmm", {"train": gmm_loss, "val": gmm_loss_val}, global_step=epoch)
            self.writer.add_scalars("cross_entropy", {"train": ce_loss, "val": ce_loss_val}, global_step=epoch)
        return
Example #4
0
    def train(self):
        """Training the autoencoder"""
        self.model = ae(self.args.latent_dim).to(self.device)
        self.model.apply(weights_init_normal)
        optimizer = optim.Adam(self.model.parameters(), lr=self.args.lr)

        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.args.lr_milestones, gamma=0.1)
        self.reconst = []
        self.reconst_t = []
        for epoch in range(self.args.num_epochs):
            total_loss = 0
            self.model.train()
            for x, _, _ in Bar(self.train_loader):
                x = x.float().to(self.device)

                optimizer.zero_grad()
                x_hat, _ = self.model(x)
                reconst_loss = F.mse_loss(x_hat, x, reduction='mean')

                reconst_loss.backward()
                optimizer.step()

                total_loss += reconst_loss.item()
            scheduler.step()
            print('Training Autoencoder... Epoch: {}, Loss: {:.3f}'.format(
                epoch, total_loss / len(self.train_loader)))
            self.reconst.append(total_loss / len(self.train_loader))
            loss_test, stop = self.test(epoch)
            self.reconst_t.append(loss_test)
            if stop:
                break
        self.load_weights()
Example #5
0
    def train(self):
        """Training the DAGMM model"""
        self.model = DAGMM(self.args.n_gmm,
                           self.args.latent_dim).to(self.device)
        self.model.apply(weights_init_normal)
        optimizer = optim.Adam(self.model.parameters(), lr=self.args.lr)

        self.compute = ComputeLoss(self.model, self.args.lambda_energy,
                                   self.args.lambda_cov, self.device,
                                   self.args.n_gmm)
        self.model.train()
        for epoch in range(self.args.num_epochs):
            total_loss = 0
            for x, _ in Bar(self.train_loader):
                x = x.float().to(self.device)
                optimizer.zero_grad()

                _, x_hat, z, gamma = self.model(x)

                loss = self.compute.forward(x, x_hat, z, gamma)
                loss.backward(retain_graph=True)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5)
                optimizer.step()

                total_loss += loss.item()
            print('Training DAGMM... Epoch: {}, Loss: {:.3f}'.format(
                epoch, total_loss / len(self.train_loader)))
Example #6
0
    def _train_epoch(self, dataloader):
        self.train()

        t_l1, t_ssim, t_huber = 0, 0, 0
        for i, batch in enumerate(Bar(dataloader)):
            self.optimizer.zero_grad()
            spectrs, slen, phonemes, plen, text, durations = batch
            out, pred_durations = self.forward((phonemes, plen, durations))

            l1 = self.loss_l1(out, spectrs, slen)
            ssim = self.loss_ssim(out, spectrs, slen)
            durations[durations < 1] = 1  # needed to prevent log(0)
            huber = self.loss_huber(pred_durations,
                                    torch.log(durations.float()), plen)

            loss = l1 + ssim + huber
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip)
            self.optimizer.step()
            self.step += 1

            t_l1 += l1.item()
            t_ssim += ssim.item()
            t_huber += huber.item()

            self.logger.add_scalar('batch/total', loss.item(),
                                   self.epoch * len(dataloader) + i)

        # report average cost per batch
        self.logger.add_scalar('train/l1', t_l1 / i, self.epoch)
        self.logger.add_scalar('train/ssim', t_ssim / i, self.epoch)
        self.logger.add_scalar('train/log(durations)_huber', t_huber / i,
                               self.epoch)
        return t_l1, t_ssim, t_huber
Example #7
0
    def pretrain(self):
        """ Pretraining the weights for the deep SVDD network using autoencoder"""
        ae = autoencoder(self.args.latent_dim).to(self.device)
        ae.apply(weights_init_normal)
        optimizer = optim.Adam(ae.parameters(),
                               lr=self.args.lr_ae,
                               weight_decay=self.args.weight_decay_ae)
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.args.lr_milestones, gamma=0.1)

        ae.train()
        for epoch in range(self.args.num_epochs_ae):
            total_loss = 0
            for x, _ in Bar(self.train_loader):
                x = x.float().to(self.device)

                optimizer.zero_grad()
                x_hat = ae(x)
                reconst_loss = torch.mean(
                    torch.sum((x_hat - x)**2, dim=tuple(range(1,
                                                              x_hat.dim()))))
                reconst_loss.backward()
                optimizer.step()

                total_loss += reconst_loss.item()
            scheduler.step()
            print('Pretraining Autoencoder... Epoch: {}, Loss: {:.3f}'.format(
                epoch, total_loss / len(self.train_loader)))
        self.save_weights_for_DeepSVDD(ae, self.train_loader)
Example #8
0
 def pretrain(self):
     """Here we train an stacked autoencoder which will be used as the initialization for the VaDE. 
     This initialization is usefull because reconstruction in VAEs would be weak at the begining
     and the models are likely to get stuck in local minima.
     """
     ae = Autoencoder(latent_dim=self.args.latent_dim,
                      n_classes=self.n_classes).to(self.device)
     ae.apply(weights_init_normal)
     ae.apply(weights_init_normal)
     optimizer = optim.Adam(ae.parameters(), lr=self.args.lr_ae)
     scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                 milestones=self.args.lr_milestones_ae, gamma=0.1)
     
     ae.train()
     for epoch in range(self.args.num_epochs_ae):
         total_loss = 0
         for x, _, _ in Bar(self.dataloader_train):
             x = x.float().to(self.device)
             
             optimizer.zero_grad()
             x_hat = ae(x)
             reconst_loss = F.mse_loss(x_hat, x, reduction='mean')
             reconst_loss.backward()
             optimizer.step()
             
             total_loss += reconst_loss.item()
         scheduler.step()
         print('Pretraining Autoencoder... Epoch: {}, Loss: {:.3f}'.format(
                epoch, total_loss/len(self.dataloader_train)))
     torch.save({'model': ae.state_dict()}, 'vade/weights/pretrained_parameters_{}.pth'.format(
                                             self.args.anormal_class))   
def test_model(dataloader, model, device):
    #Initialize and accumalate ground truth, predictions, and image indices
    GT = np.array(0)
    Predictions = np.array(0)
    running_corrects = 0
    model.eval()
    # Iterate over data.
    with torch.no_grad():
        for idx, (inputs, labels) in enumerate(Bar(dataloader)):
            inputs = inputs.to(device)
            labels = labels.to(device)

            # forward
            outputs = model(inputs.float())
            _, preds = torch.max(outputs, 1)

            #If validation, accumulate labels for confusion matrix
            GT = np.concatenate((GT, labels.detach().cpu().numpy()), axis=None)
            Predictions = np.concatenate(
                (Predictions, preds.detach().cpu().numpy()), axis=None)

            running_corrects += torch.sum(preds == labels.data.long())

    test_acc = running_corrects.double() / len(dataloader.dataset)
    print()
    print('Test Accuracy: {:4f}'.format(test_acc))

    return GT[1:], Predictions[1:]
Example #10
0
    def _train_epoch(self, dataloader):
        self.train()

        t_l1, t_att = 0, 0
        for i, batch in enumerate(Bar(dataloader)):
            self.optimizer.zero_grad()
            spectrs, slen, phonemes, plen, text = batch

            s = add_random_noise(spectrs, hp.noise)
            s = degrade_some(self, s, phonemes, plen, hp.feed_ratio, repeat=hp.feed_repeat)
            s = frame_dropout(s, hp.replace_ratio)

            out, att_weights = self.forward(phonemes, s, plen)

            l1 = self.loss_l1(out, spectrs, slen)
            l_att = self.loss_att(att_weights, slen, plen)

            loss = l1 + l_att
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip)
            self.optimizer.step()
            self.step += 1

            t_l1 += l1.item()
            t_att += l_att.item()

            self.logger.add_scalar(
                'batch/total', loss.item(), self.step
            )

        # report average cost per batch
        self.logger.add_scalar('train/l1', t_l1 / i, self.epoch)
        self.logger.add_scalar('train/guided_att', t_att / i, self.epoch)
        return t_l1 / i, t_att / i
Example #11
0
def train(model, dataloader, optimizer, criterion, device):
    ''' Runs one epoch of training for a model. '''

    # Prepare the model
    model.to(device)
    model.train()

    # Creates metrics recorder
    metrics = Metrics()

    # Iterates over batches
    for (id_imgs, inputs, labels) in Bar(dataloader):

        # Clean gradients in the optimizer
        optimizer.zero_grad()

        # Transforming inputs
        inputs, labels = inputs.to(device), labels.to(device)

        # Forward Pass
        outputs = model(inputs)

        # Get loss
        loss = criterion(outputs, labels)

        # Backward Pass, updates weights and optimizer
        loss.backward()
        optimizer.step()

        # Register on metrics
        _, predicted = torch.max(outputs.data, 1)
        metrics.batch(labels=labels, preds=predicted, loss=loss.item())

    # Print training metrics
    metrics.print_one_liner()
Example #12
0
def train_model(net):
    total = 0
    correct = 0

    for epoch in range(num_epochs):
        print(f'Epoch: {epoch+1}')
        running_loss = 0.0

        for batch_id, (inputs, labels) in enumerate(Bar(train_loader)):
            # Send input tensors to device (needed for GPU support)
            inputs, labels = inputs.to(device), labels.to(device)

            # zero the paramater gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

        # Normalizing the loss by the total number of train batches
        running_loss /= len(train_loader)

        # Calculate training set Accuracy
        _, predicted = outputs.max(1)
        total += labels.size(0)
        correct += predicted.eq(labels).sum().item()
        train_accuracy = 100.*correct/total

        print(f"Epoch: {epoch+1} | Loss: {running_loss} | Training Accuracy: {train_accuracy}")
Example #13
0
def validate(model, dataloader, criterion, device):
    ''' Runs one epoch of validation for a model. '''

    # Prepare the model
    model.to(device)
    model.eval()

    # Creates metrics recorder
    metrics = Metrics()

    with torch.no_grad():
        # Iterates over batches
        for (id_imgs, inputs, labels) in Bar(dataloader):

            # Transforming inputs
            inputs, labels = inputs.to(device), labels.to(device)

            # Forward Pass
            outputs = model(inputs)

            # Get loss
            loss = criterion(outputs, labels)

            # Register on metrics
            _, predicted = torch.max(outputs.data, 1)
            metrics.batch(labels=labels, preds=predicted, loss=loss.item())

    # Print and return validation metrics
    return metrics.print_one_liner()
 def test(self, e=None):
     self.model.eval()
     total_loss = 0
     for x, y in Bar(self.test_generator):
         y_pred = self.model(x)
         loss = self.loss_fn(y_pred, y)
         total_loss += float(loss.detach().numpy())
     self.writer.add_scalar('Loss/test', total_loss, e)
Example #15
0
def save_as_tensors(args, images, mean, std):
    tensor_saver = TensorSaverDataset(images, args.data_dir, mean, std,
                                      args.resize_dim)
    saver_dataloader = DataLoader(tensor_saver,
                                  batch_size=32,
                                  shuffle=False,
                                  num_workers=8)
    for idx, data in enumerate(Bar(saver_dataloader)):
        batch = data.get('image')
def run_inference(cfg: omegaconf.DictConfig) -> None:

    logger.info(" .. Testing Will Be Starting in few seconds .. ")

    test_df = pd.read_csv(cfg.testing.test_csv)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoints = glob.glob(
        os.path.join(
            cfg.general.logs_dir, "checkpoints",
            f"{cfg.model.architecture_name}{cfg.classifiermode.num_classes}",
            "*.ckpt"))
    num_models = len(checkpoints)

    if num_models == 0:
        sys.exit()
    if cfg.classifiermode.num_classes == 1:
        test_preds = np.zeros(len(test_df))
    else:
        test_preds = np.zeros((len(test_df), 3))

    for checkpoint_id, checkpoint_path in enumerate(checkpoints):

        output_name = checkpoint_path.split("/")[2]
        seed = int(checkpoint_path.split("/")[3].split(".")[0].split("_")[1])
        utils.setup_environment(random_seed=seed,
                                gpu_list=cfg.general.gpu_list)
        model = TuniziDialectClassifier.load_from_checkpoint(checkpoint_path,
                                                             hydra_config=cfg)
        model.eval().to(device)
        test_predictions = []
        with torch.no_grad():
            for batch_idx, batch in enumerate(Bar(model.test_dataloader())):
                input_ids = batch["input_ids"]
                attention_mask = batch["attention_mask"]

                input_ids = input_ids.to(device, dtype=torch.long)
                attention_mask = attention_mask.to(device, dtype=torch.long)
                outputs = model.forward(input_ids,
                                        attention_mask=attention_mask)

                if cfg.classifiermode.num_classes == 1:
                    outputs = torch.sigmoid(outputs).detach().cpu().numpy()
                    test_predictions.append(outputs)
                else:
                    outputs = torch.softmax(outputs, 1).detach().cpu().numpy()
                    test_predictions.append(outputs)

        test_predictions = np.concatenate(test_predictions, axis=0)
        if cfg.classifiermode.num_classes == 1:
            test_predictions = test_predictions.reshape(
                test_predictions.shape[0])
        gc.collect()
        torch.cuda.empty_cache()
        utils.create_submission(test_df, output_name + str(seed),
                                test_predictions,
                                cfg.classifiermode.num_classes)
Example #17
0
def eval(model, test_loader, args):
    model.eval()  # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
    with torch.no_grad():
        correct, total = 0, 0    
        for i, (x, y) in enumerate(Bar(test_loader)):
            x, y = x.to(args.device), y.to(args.device, dtype=torch.long)
            outputs, uncertainty = model(x)
            _, predicted = torch.max(outputs.data, 1)
            total += y.size(0)
            correct += (predicted == y).sum().item()
        return 100 * correct / total
Example #18
0
def train(_model, _criterion, _optimizer, _minibatch, _train_data, _device,
          _args):
    global phase_iter
    phase_iter += 1
    logging.info(
        f'\n----- Starting training phase {phase_iter} ------ Estimator Epoch: {_minibatch.num_training_batches()}'
    )
    epoch = 0

    while not _minibatch.end():
        _model.train()
        logging.info(f'\n------------- Epoch: {epoch} -------------')
        epoch += 1
        _t0 = time.time()
        node, adj, rel = _minibatch.one_batch('train')
        node_tensor = torch.from_numpy(np.array(node)).to('cpu')
        adj = torch.from_numpy(adj).to('cpu')
        rel = torch.from_numpy(rel).to('cpu')
        reserve_node = {j: i for i, j in enumerate(node)}

        _t1 = time.time()
        logging.info(f'Sampling sub graph in {_t1-_t0: .3f} seconds')

        subgraph_rating = SubgraphRating(node, _train_data)
        data_loader = DataLoader(subgraph_rating,
                                 batch_size=_args.batch_size,
                                 drop_last=False,
                                 shuffle=True)
        _t2 = time.time()
        logging.info(f'Building DataLoader in {_t2-_t1: .3f} seconds')

        train_loss, eval_loss = 0, 0
        train_auc, eval_auc = [], []
        train_pred, train_true = np.zeros(0), np.zeros(0)
        for data in Bar(data_loader):
            users, items, labels = data['user'].to(_device), data['item'].to(
                _device), data['label'].type(torch.float32).to(_device)
            _optimizer.zero_grad()
            outputs = _model(users, items, reserve_node, node_tensor, adj, rel)
            loss = _criterion(outputs, labels)
            loss.backward()

            _optimizer.step()
            train_loss += loss.item()

            # detach outputs and labels
            train_pred = np.concatenate(
                (train_pred, outputs.detach().cpu().numpy()))
            train_true = np.concatenate(
                (train_true, labels.detach().cpu().numpy()))
        logging.info(f'Train loss: {train_loss / len(data_loader)}')
        score = utils.auc_score(train_pred, train_true, 'micro')
        logging.info(f'Train AUC : {score} micro')
Example #19
0
def train(args):
    check_paths(args)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # construct data loader
    dataset = MyDataset(args.image_path, args.message_path)
    loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True, drop_last=False)

    # construct neural networks
    encoder = nets.CSFE()
    decoder = nets.CSFD()
    if args.cuda:
        encoder.cuda()
        decoder.cuda()

    # construct optimizer
    params = list(encoder.parameters())+list(decoder.parameters())
    optimizer = torch.optim.Adam(params, lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)

    # define loss
    l2 = torch.nn.MSELoss()

    # training loop
    for e in range(args.epochs):
        
        for img, code in Bar(loader):

            # encode image and code
            enc = encoder(img, code)

            # channel
            #nothing for now

            # decode encoded image
            output = decoder(enc)

            # calculate loss
            ber_loss = l2(output, code)
            img_loss = l2(enc, img)
            total_loss = ber_loss + img_loss

            # occasionally print
            #print(total_loss)

            #backprop
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()
Example #20
0
def train(args):
    if args.d == 'imagenet' and args.train_uncertainty:
        train_dir = '../datasets/ilsvrc2012/images/train/'    
        test_dir = '../datasets/ilsvrc2012/images/val_loader/'    

        train_loader, test_loader = load_train_and_test_loader(train_dir, test_dir, args)
        
        if args.model == 'efficientnet-b7':
            from efficientnet_pytorch_model.model import EfficientNet as efn
            model = efn.from_name(args.model).to(args.device)
            state_dict = EfficientNet.from_pretrained(args.model).to(args.device).state_dict()
            model.load_state_dict(state_dict, strict=False)
        
        # model = freeze_layers(model=model, freeze_uncertainty_layers=False)  

        # for param in model.named_parameters():
        #     print(param[0], param[1].requires_grad)
        # exit()
        # accuracy = eval(model, test_loader, args)
        # print('Accuracy on testing data: %.2f' % (accuracy))

        model = freeze_layers(model=model, freeze_uncertainty_layers=False)
        # for param in model.named_parameters():
        #     print(param[0], param[1].requires_grad)
        # exit()

        # Loss and optimizer
        optimizer = torch.optim.Adam(model.parameters(), lr=0.00001)
        for epoch in range(args.epoch):
            total_loss = 0
            for i, (x, y) in enumerate(Bar(train_loader)):
                x, y = x.to(args.device), y.to(args.device, dtype=torch.long)                        
                
                # Backward and optimize
                optimizer.zero_grad()

                # Forward pass
                outputs, uncertainty = model(x)                    
                loss = confid_mse_loss((outputs, uncertainty), y, args=args)
                loss.backward()
                total_loss += loss
                optimizer.step()
                # break
            print('Running evaluation for uncertainty')
            accuracy, roc_score = eval_uncertainty(model=model, test_loader=test_loader, args=args)
            print('Epoch %i / %i -- Total loss: %f -- Accuracy on testing data: %.2f -- AUC on testing data: %.2f' % (epoch, args.epoch, total_loss, accuracy, roc_score))            
                        
            path_save = './model_confidnet/%s_%s/train_uncertainty/' % (args.d, args.model)
            if not os.path.exists(path_save):
                os.makedirs(path_save)
            torch.save(model.state_dict(), path_save + 'epoch_%i_acc-%.2f_auc-%.2f.pt' % (epoch, accuracy, roc_score))        
def eval_model(model, data_loader, loss_fn, device, n_examples):
    model = model.eval()
    losses = []
    correct_predictions = 0
    with torch.no_grad():
        for d in Bar(data_loader):
            input_ids = d["input_ids"].to(device)
            attention_mask = d["attention_mask"].to(device)
            targets = d["targets"].to(device)
            outputs = model(input_ids=input_ids, attention_mask=attention_mask)
            _, preds = torch.max(outputs, dim=1)
            loss = loss_fn(outputs, targets)
            correct_predictions += torch.sum(preds == targets)
            losses.append(loss.item())
    return correct_predictions.double() / n_examples, np.mean(losses)
Example #22
0
    def test(self, epoch):
        self.model.eval()

        total_loss = 0
        with torch.no_grad():
            for x, _, _ in Bar(self.val_loader):
                x = x.float().to(self.device)
                x_hat, _ = self.model(x)
                reconst_loss = F.mse_loss(x_hat, x, reduction='mean')
                total_loss += reconst_loss.item()
        loss = total_loss / len(self.val_loader)
        print('Testing Autoencoder... Epoch: {}, Loss: {:.3}'.format(
            epoch, loss))
        stop = self.es.count(loss, self.model)
        return loss, stop
def save_alignments_as_fertilities(model, dataloader, folder, durations_filename):
    """Save extracted alignments as durations
    
    Use the duration_Extraction model checkpoint to extract alignments and convert them into durations.
    For dataloader, use get_dataloader(64, 'cuda', start_idx=0, end_idx=13099, shuffle=False, sampler=SequentialSampler)
    """

    with open(os.path.join(folder, durations_filename), 'w') as file:
        for i, batch in enumerate(Bar(dataloader)):
            spectrs, slen, phonemes, plen, text = batch
            # supervised generation to get more reliable alignments
            out, alignment = model.generate(phonemes, plen, window=1, spectrograms=spectrs)
            fert = get_fertilities(alignment.cpu(), plen, slen)
            for f in fert:
                file.write(', '.join(str(x) for x in f) + '\n')
Example #24
0
def test_result(model, test_loader, device):
    # testing the model by turning model "Eval" mode
    model.eval()
    preds = []
    names = []
    for data, name in Bar(test_loader):
        # move-tensors-to-GPU
        data = data.to(device)
        output = model(data)
        pred = torch.sigmoid(output)
        names.extend(list(name))
        preds.extend(pred.tolist())

    test_result = pd.DataFrame({'image_name': names, 'target': preds})
    test_result.to_csv('testing.csv')
    return (test_result)
    def train(self):
        self.model.train()
        for e in range(self.n_epochs):
            print("Epoch: {} of {}".format(e, self.n_epochs))
            total_loss = 0
            for x, y in Bar(self.training_generator):
                self.model.zero_grad()
                y_pred = self.model(x)
                loss = self.loss_fn(y_pred, y)
                loss.backward()
                self.optimizer.step()
                total_loss += float(loss.detach().numpy())

            self.writer.add_scalar('Loss/train', total_loss, e)
            if e % 10 == 0:
                self.test(e)
Example #26
0
    def test(self, epoch):
        self.net.eval()

        total_loss = 0
        with torch.no_grad():
            for x, _, _ in Bar(self.dataloader_val):
                x = x.float().to(self.device)
                z = self.net(x)
                loss = torch.mean(torch.sum((z - self.c)**2, dim=1))

                total_loss += loss.item()
        loss = total_loss / len(self.dataloader_val)
        print('Testing Deep SVDD... Epoch: {}, Loss: {:.3}'.format(
            epoch, loss))
        stop = self.es.count(loss, self.net, self.c, self.args)
        return loss, stop
Example #27
0
def test_model(net):    
    print("Testing the model...")
    global best_acc
    net.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_id, (inputs, targets) in enumerate(Bar(test_loader)):
            # Send input tensors to device (needed for GPU support)
            inputs, targets = inputs.to(device), targets.to(device)

            outputs = net(inputs)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += (predicted == targets).sum().item()
    acc = 100.*correct/total
    print(f'The test accuracy of the model is: {acc}')
def train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler,
                n_examples):
    model = model.train()
    losses = []
    correct_predictions = 0
    for d in Bar(data_loader):
        input_ids = d["input_ids"].to(device)
        attention_mask = d["attention_mask"].to(device)
        targets = d["targets"].to(device)
        outputs = model(input_ids=input_ids, attention_mask=attention_mask)
        _, preds = torch.max(outputs, dim=1)
        loss = loss_fn(outputs, targets)
        correct_predictions += torch.sum(preds == targets)
        losses.append(loss.item())
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimizer.step()
        scheduler.step()
        optimizer.zero_grad()
    return correct_predictions.double() / n_examples, np.mean(losses)
Example #29
0
    def train(self):
        """Training the ClasSVDD model"""
        if self.args.pretrain == True:
            self.load_pretrained_weights()
        else:
            self.net.apply(weights_init_normal)
            self.c = torch.randn(self.args.latent_dim).to(self.device)

        self.es = EarlyStopping(patience=self.args.patience)

        optimizer = optim.Adam(self.net.parameters(),
                               lr=self.args.lr,
                               weight_decay=self.args.weight_decay)
        scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=self.args.lr_milestones, gamma=0.1)
        self.loss = []
        self.loss_t = []
        for epoch in range(self.args.num_epochs):
            total_loss = 0
            self.net.train()
            for x, y, _ in Bar(self.dataloader_train):
                x = x.float().to(self.device)
                y = y.long()

                optimizer.zero_grad()
                z = self.net(x)
                loss = torch.mean(torch.sum((z - self.c[y])**2, dim=1))
                loss.backward()
                optimizer.step()

                total_loss += loss.item()
            scheduler.step()
            print('Training ClasSVDD... Epoch: {}, Loss: {:.3f}'.format(
                epoch, total_loss / len(self.dataloader_train)))

            self.loss.append(total_loss / len(self.dataloader_train))
            loss_test, stop = self.test(epoch)
            self.loss_t.append(loss_test)
            if stop:
                break
        self.load_weights()
Example #30
0
def test_model(dataloader, model, device):
    #Initialize and accumalate ground truth, predictions, and image indices
    GT = np.array(0)
    Predictions = np.array(0)
    Index = np.array(0)

    running_corrects = 0
    model.eval()

    # Iterate over data
    print('Testing Model...')
    with torch.no_grad():
        for idx, (inputs, labels, index) in enumerate(Bar(dataloader)):
            inputs = inputs.to(device)
            labels = labels.to(device)
            index = index.to(device)

            # forward
            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)

            #If test, accumulate labels for confusion matrix
            GT = np.concatenate((GT, labels.detach().cpu().numpy()), axis=None)
            Predictions = np.concatenate(
                (Predictions, preds.detach().cpu().numpy()), axis=None)
            Index = np.concatenate((Index, index.detach().cpu().numpy()),
                                   axis=None)

            running_corrects += torch.sum(preds == labels.data)

    test_acc = running_corrects.double() / (len(dataloader.sampler))
    print('Test Accuracy: {:4f}'.format(test_acc))

    test_dict = {
        'GT': GT[1:],
        'Predictions': Predictions[1:],
        'Index': Index[1:],
        'test_acc': np.round(test_acc.cpu().numpy() * 100, 2)
    }

    return test_dict