Ejemplo n.º 1
0
    def __init__(self, input_channels, n_classes):
        super(SCNN, self).__init__()
        self.input_channels = input_channels
        self.n_classes = n_classes
        self.features_size = 256 * 15 * 15  # 512*13*13

        self.loss_func = nn.CrossEntropyLoss()
        self.train_accuracy = torchmetrics.Accuracy()
        self.val_accuracy = torchmetrics.Accuracy()

        # self.auroc = torchmetrics.AUROC(num_classes=n_classes)

        self.conv1 = nn.Sequential(nn.Conv2d(1, 96, (6, 6), stride=(2, 2)),
                                   nn.BatchNorm2d(96), nn.LeakyReLU(),
                                   nn.MaxPool2d((2, 2)))

        self.conv2 = nn.Sequential(nn.Conv2d(96, 256, (3, 3), stride=(2, 2)),
                                   nn.BatchNorm2d(256), nn.LeakyReLU(),
                                   nn.MaxPool2d((2, 2)))

        self.conv3 = nn.Sequential(nn.Conv2d(256, 512, (3, 3), stride=(1, 1)),
                                   nn.LeakyReLU())

        self.fc = nn.Sequential(nn.Linear(self.features_size, 1024),
                                nn.LeakyReLU(), nn.Dropout(p=0.5),
                                nn.Linear(1024, self.n_classes))

        self.apply(self.weight_init)
Ejemplo n.º 2
0
    def __init__(self,
                 pretrained_bert=None,
                 target_classes={},
                 steps_train=None,
                 weights=None):
        super().__init__()
        self.steps_train = steps_train
        self.target_classes = target_classes  # key: name of the target layer, value: number of classes for the target
        self.weights = weights
        self.bert = transformers.BertModel.from_pretrained(
            pretrained_bert, output_hidden_states=True, return_dict=True)
        self.tokenizer = transformers.BertTokenizerFast.from_pretrained(
            pretrained_bert)

        # layers for different label_sets (upos, feats ...)
        self.prediction_layers = torch.nn.ModuleDict()
        self.accuracies = torch.nn.ModuleDict()
        self.val_accuracies = torch.nn.ModuleDict()
        for target_name, num_classes in self.target_classes.items():
            logging.info(
                f"Creating prediction layer for '{target_name}' with {num_classes} classes"
            )
            self.prediction_layers[target_name] = TaggerOutput(
                self.bert.config.hidden_size, num_classes)

            self.accuracies[target_name] = torchmetrics.Accuracy()
            self.val_accuracies[target_name] = torchmetrics.Accuracy()
Ejemplo n.º 3
0
    def __init__(self, input_channels, n_classes):
        super(SSNet, self).__init__()
        self.input_channels = input_channels
        self.n_classes = n_classes

        self.features_size = 8000

        self.loss_func = nn.CrossEntropyLoss()
        self.train_accuracy = torchmetrics.Accuracy()
        self.val_accuracy = torchmetrics.Accuracy()

        self.conv1 = nn.Sequential(nn.Conv3d(1, 8, (7, 3, 3), stride=1),
                                   nn.LeakyReLU(), nn.MaxPool3d(2))

        self.conv2 = nn.Sequential(nn.Conv3d(8, 16, (5, 3, 3), stride=1),
                                   nn.LeakyReLU(), nn.MaxPool3d(2))

        self.conv3 = nn.Sequential(nn.Conv3d(16, 32, (3, 3, 3), stride=1),
                                   nn.LeakyReLU(),
                                   nn.AdaptiveMaxPool3d((10, 5, 5)))

        # self.conv4 = nn.Sequential(
        #                 nn.Conv2d(32, 64, (3, 3), stride=1),
        #                 nn.LeakyReLU())

        self.fc1 = nn.Sequential(nn.Linear(self.features_size, 256),
                                 nn.LeakyReLU(), nn.Dropout(0.5))

        self.fc2 = nn.Sequential(nn.Linear(256, 128), nn.LeakyReLU(),
                                 nn.Dropout(0.5),
                                 nn.Linear(128, self.n_classes))
Ejemplo n.º 4
0
    def _set_metrics(self):
        num_classes = self.num_classes

        # Train
        self.train_acc = torchmetrics.Accuracy()
        self.train_precision = torchmetrics.Precision()
        self.train_recall = torchmetrics.Recall()
        self.train_f1 = torchmetrics.F1(
            num_classes=num_classes) if num_classes else None
        self.train_auc = torchmetrics.AUROC(
            num_classes=num_classes) if num_classes else None

        # Validation
        self.validation_acc = torchmetrics.Accuracy()
        self.validation_precision = torchmetrics.Precision()
        self.validation_recall = torchmetrics.Recall()
        self.validation_f1 = torchmetrics.F1(
            num_classes=num_classes) if num_classes else None
        self.validation_auc = torchmetrics.AUROC(
            num_classes=num_classes) if num_classes else None

        # Test
        self.test_acc = torchmetrics.Accuracy()
        self.test_precision = torchmetrics.Precision()
        self.test_recall = torchmetrics.Recall()
        self.test_f1 = torchmetrics.F1(
            num_classes=num_classes) if num_classes else None
        self.test_auc = torchmetrics.AUROC(
            num_classes=num_classes) if num_classes else None
    def __init__(
        self,
        encoder: nn.Module,
        clf: nn.Module,
        lr: float,
        weight_decay: float,
        fairness: FairnessType,
        mixup_lambda: Optional[float] = None,
        alpha: float = 1.0,
        lr_initial_restart: int = 10,
        lr_restart_mult: int = 2,
        lr_sched_interval: TrainingMode = TrainingMode.epoch,
        lr_sched_freq: int = 1,
    ) -> None:
        super().__init__(
            lr=lr,
            weight_decay=weight_decay,
            lr_initial_restart=lr_initial_restart,
            lr_restart_mult=lr_restart_mult,
            lr_sched_interval=lr_sched_interval,
            lr_sched_freq=lr_sched_freq,
        )
        self.encoder = encoder
        self.clf = clf
        self.net = nn.Sequential(self.encoder, self.clf)
        self.fairness = fairness
        self.mixup_lambda = mixup_lambda
        self.alpha = alpha

        self._loss_fn = CrossEntropyLoss(reduction=ReductionType.mean)

        self.test_acc = torchmetrics.Accuracy()
        self.train_acc = torchmetrics.Accuracy()
        self.val_acc = torchmetrics.Accuracy()
    def __init__(self,
                 config_backbone,
                 config_training,
                 num_classes,
                 learning_rate=None):
        super().__init__()

        self.config_training = config_training

        if learning_rate:
            self.learning_rate = learning_rate
        else:
            self.learning_rate = self.config_training.lr

        in_features = self.get_in_features(config_backbone.name)

        self.feature_extractor = timm.create_model(config_backbone.name,
                                                   pretrained=True,
                                                   num_classes=0)
        self.classifier = nn.Sequential(
            nn.Dropout(0.3),
            nn.Linear(in_features, num_classes),
        )

        self.train_accuracy = torchmetrics.Accuracy()
        self.val_accuracy = torchmetrics.Accuracy()
        self.test_accuracy = torchmetrics.Accuracy()
Ejemplo n.º 7
0
    def __init__(self, batch_size, lr_scheduler_milestones, lr_gamma, nclass, nfeatures, length, lr=1e-2, L2_reg=1e-3, top_acc=1, loss=torch.nn.CrossEntropyLoss()):
        super().__init__()

        self.batch_size = batch_size
        self.nclass = nclass
        self.nfeatures = nfeatures
        self.length = length

        self.loss = loss
        self.lr = lr
        self.lr_scheduler_milestones = lr_scheduler_milestones
        self.lr_gamma = lr_gamma
        self.L2_reg = L2_reg

        # Log hyperparams (all arguments are logged by default)
        self.save_hyperparameters(
            'length',
            'nfeatures',
            'L2_reg',
            'lr',
            'lr_gamma',
            'lr_scheduler_milestones',
            'batch_size',
            'nclass'
        )

        # Metrics to log
        if not top_acc < nclass:
            raise ValueError('`top_acc` must be strictly smaller than `nclass`.')
        self.train_acc = torchmetrics.Accuracy(top_k=top_acc)
        self.val_acc = torchmetrics.Accuracy(top_k=top_acc)
        self.train_f1 = torchmetrics.F1(nclass, average='macro')
        self.val_f1 = torchmetrics.F1(nclass, average='macro')

        self.features = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=20, kernel_size=(3,5), stride=1, padding=(1,2)),
            nn.BatchNorm2d(20),
            nn.ReLU(True),
            nn.Conv2d(in_channels=20, out_channels=20, kernel_size=(3,5), stride=1, padding=(1,2)),
            nn.BatchNorm2d(20),
            nn.ReLU(True),
            nn.Conv2d(in_channels=20, out_channels=20, kernel_size=(3,5), stride=1, padding=(1,2)),
            nn.BatchNorm2d(20),
            nn.ReLU(True),
            nn.Conv2d(in_channels=20, out_channels=20, kernel_size=(3,5), stride=1, padding=(1,2)),
            nn.BatchNorm2d(20),
            nn.ReLU(True),
            nn.Conv2d(in_channels=20, out_channels=20, kernel_size=(3,3), stride=1, padding=(1,1)),
            nn.BatchNorm2d(20),
            nn.ReLU(True),
            nn.Conv2d(in_channels=20, out_channels=nfeatures, kernel_size=(3,3), stride=1, padding=(1,1)),
            nn.BatchNorm2d(nfeatures),
            nn.ReLU(True)
        )
        self.pool = nn.AvgPool2d(kernel_size=(2, self.length))
        self.classifier = nn.Sequential(
            nn.Linear(1*nfeatures, nclass),  # 1 because global pooling reduce length of features to 1
            #nn.Softmax(1)  # Already included in nn.CrossEntropy
        )
Ejemplo n.º 8
0
    def __init__(self, num_classes, lr, lr_milestones):

        super().__init__()
        self.save_hyperparameters()

        self.model = torchvision.models.resnet18(pretrained=False,
                                                 num_classes=num_classes)
        self.train_acc = torchmetrics.Accuracy()
        self.val_acc = torchmetrics.Accuracy()
Ejemplo n.º 9
0
    def __init__(self, cfg, *args, **kwargs):
        super().__init__()
        self.cfg = cfg
        self.save_hyperparameters(cfg)

        self.model = srn.resnet18(False, False, num_classes=cfg.model.n_classes)

        self.criterion = nn.CrossEntropyLoss()

        self.train_accuracy = torchmetrics.Accuracy()
        self.val_accuracy = torchmetrics.Accuracy()
Ejemplo n.º 10
0
 def __init__(self, model, lr: float = 1e-4, augmentations: Optional[nn.Module] = None):
     super().__init__()
     self.model = model
     self.arch = self.model.arch
     self.num_classes = self.model.num_classes
     self.train_accuracy = torchmetrics.Accuracy()
     self.train_f1_score = torchmetrics.F1(self.num_classes, average='weighted')
     self.val_accuracy = torchmetrics.Accuracy()
     self.val_f1_score = torchmetrics.F1(self.num_classes, average='weighted')
     self.learn_rate = lr
     self.aug = augmentations
Ejemplo n.º 11
0
    def __init__(self, backbone, final_layer):
        super().__init__()

        # lightning modules are best structured as systems
        self.backbone = backbone 
        self.linear = final_layer        
        self.save_hyperparameters()
        
        # torchmetrics stuff
        self.train_acc = torchmetrics.Accuracy()
        self.valid_acc = torchmetrics.Accuracy()
Ejemplo n.º 12
0
 def __init__(self, backbone, num_classes):
     super().__init__()
     self.backbone = backbone
     output = self.backbone(torch.Tensor(1, 3, 64, 64))
     linear_inplanes = output.shape[1]
     self.adaptive_pool = nn.AdaptiveAvgPool2d(1)
     self.flatten = nn.Flatten()
     self.dropout = nn.Dropout(0.2)
     self.linear = nn.Linear(linear_inplanes, num_classes)
     self.loss = nn.CrossEntropyLoss()
     self.train_accuracy = torchmetrics.Accuracy()
     self.val_accuracy = torchmetrics.Accuracy()
Ejemplo n.º 13
0
 def __init__(self,
              model: nn.Module,
              num_classes: int = 2,
              lr: float = 1e-4,
              **kwargs):
     super().__init__()
     self.model = model
     self.lr = lr
     self.loss = CrossEntropyLoss()
     self.train_acc = torchmetrics.Accuracy()
     self.val_acc = torchmetrics.Accuracy()
     self.save_hyperparameters()
Ejemplo n.º 14
0
def valid_epoch(model, valid_loader, criterion, epoch):
    model.eval()

    total_loss = AverageMeter()
    
    manual_top1 = AverageMeter()
    manual_top5 = AverageMeter()
    torch_top1 = torchmetrics.Accuracy()
    torch_top5 = torchmetrics.Accuracy(top_k=5)
    torch_f1 = torchmetrics.F1(num_classes=312)

    with torch.no_grad():
        for batch in tqdm(valid_loader):
            images = batch["image"].to(device)
            elas = batch["ela"].to(device)
            target_labels = batch["label"].to(device)
            
            out_logits, _ = model(images, elas)

            loss = criterion(out_logits, target_labels)
            
            #---------------------Batch Loss Update-------------------------
            total_loss.update(loss.item(), valid_loader.batch_size)
                    
            # Metric
            with torch.no_grad():
                out_logits = out_logits.cpu().detach()
                target_labels = target_labels.cpu().detach()

                topk = topk_accuracy(out_logits, target_labels, topk=(1,5))
                manual_top1.update(topk[0].item(), valid_loader.batch_size)
                manual_top5.update(topk[1].item(), valid_loader.batch_size)

                torch_top1.update(torch.softmax(out_logits, dim=-1), target_labels)
                torch_top5.update(torch.softmax(out_logits, dim=-1), target_labels)
                torch_f1.update(torch.softmax(out_logits, dim=-1), target_labels)


    valid_metrics = {
        "valid_loss": total_loss.avg,
        "valid_acc1_manual": manual_top1.avg,
        "valid_acc5_manual": manual_top5.avg,
        "valid_acc1_torch": torch_top1.compute().item(),
        "valid_acc_5_torch": torch_top5.compute().item(),
        "valid_f1": torch_f1.compute().item(),
        "epoch": epoch
    }
    wandb.log(valid_metrics)

    return valid_metrics
Ejemplo n.º 15
0
 def __init__(
     self,
     model,
     loss_fn=nn.CrossEntropyLoss(),
     train_acc=torchmetrics.Accuracy(),
     valid_acc=torchmetrics.Accuracy(),
     lr: float = 0.001,
 ):
     super(ClassificationEngine, self).__init__()
     self.model = model
     self.scaler = None
     self.loss_function = loss_fn
     self.train_acc = train_acc
     self.valid_acc = valid_acc
     self.lr = lr
Ejemplo n.º 16
0
def train(model, dataloader, optimizer, loss_func, device, start_epoch,
          scheduler, e):
    print(f'EPOCH[{e+1}/{start_epoch+opt.epoch}] Training....')
    model.train()
    iter_loss = []
    corrects = 0
    data_size = 0

    train_acc1 = torchmetrics.Accuracy(num_classes=10).to(device)
    train_acc5 = torchmetrics.Accuracy(num_classes=10, top_k=5).to(device)
    # train_precision = torchmetrics.Precision(num_classes=10, multiclass=True).to(device)
    # train_recall = torchmetrics.Recall(num_classes=10, multiclass=True).to(device)

    for i, (images, labels) in enumerate(dataloader):
        start = time.time()

        images, labels = images.to(device), labels.to(device)
        data_size += images.shape[0]

        optimizer.zero_grad()
        outputs = model(images)
        loss = loss_func(outputs, labels)
        loss.backward()
        optimizer.step()

        train_acc1(outputs, labels)
        train_acc5(outputs, labels)
        # train_precision(outputs, labels)
        # train_recall(outputs, labels)

        iter_loss.append(loss.item())
        corrects += sum(outputs.argmax(axis=1) == labels).item()

        end = time.time()
        if ((i + 1) % 40 == 0) or ((i + 1) == len(dataloader)):
            times = (end - start) * 40 if not (
                i + 1) == len(dataloader) else (end - start) * i
            print(f'Iter[{i+1}/{len(dataloader)}]'\
                  f'--- Loss: {sum(iter_loss)/data_size:0.4f}'\
                #   f' --- Accuracy: {corrects/data_size:0.2f}'\
                  f' --- Accuracy1: {train_acc1.compute():0.2f}'\
                  f' --- Accuracy5: {train_acc5.compute():0.2f}'\
                #   f' --- Precision: {train_precision.compute():0.2f}'\
                #   f' --- Recall: {train_recall.compute():0.2f}'\
                  f'--- Time:{strftime("%H:%M:%S", gmtime(times))}'\
                  f'--- LR: {scheduler.get_lr()[0]:0.4f}')

    return [sum(iter_loss) / data_size, train_acc1.compute().cpu()]
Ejemplo n.º 17
0
def test(model, dataloader, loss_func, device, start_epoch, e):
    print(f'EPOCH[{e+1}/{start_epoch+opt.epoch}] Teseting....')
    model.eval()
    iter_loss = []
    corrects = 0
    
    test_metrics = torchmetrics.Accuracy(num_classes=opt.num_classes).to(device)
    
    with torch.no_grad():
        data_size = 0
        for i, (images, labels) in enumerate(dataloader):
            images, labels = images.to(device), labels.to(device)
            data_size += images.shape[0]
            
            outputs = model(images)
            loss = loss_func(outputs, labels)
            
            test_metrics(outputs, labels)
            
            iter_loss.append(loss.item())
            corrects += sum(outputs.argmax(axis=1) == labels).item()
    
    print(f'Iter[{i+1}/{len(dataloader)}]' \
          f'--- Loss: {sum(iter_loss)/data_size:0.4}'\
        #   f'--- Accuracy: {corrects/data_size:0.2}'\
          f'--- Accuracy: {test_metrics.compute():0.4f}')
    
    return [sum(iter_loss)/data_size, test_metrics.compute().cpu()]
Ejemplo n.º 18
0
 def __init__(self):
     super().__init__()
     self.model = models.resnet18(pretrained=True)
     for param in self.model.parameters():
         param.requires_grad = False
     self.model.fc = nn.Linear(512, 17)
     self.acc = torchmetrics.Accuracy()
Ejemplo n.º 19
0
    def __init__(self, model: t.nn.Module, cfg: DictConfig) -> None:
        """Create a LitModuleWrapper.

        Args:
            model: Your model to be quantized. It should be a general torch.nn.Module.
            cfg: The top-level user configuration object.
        """
        super().__init__()
        self.model = model
        self.cfg = cfg

        self.criterion = t.nn.CrossEntropyLoss()

        self.train_acc = tm.Accuracy()
        self.val_acc = tm.Accuracy(dist_sync_on_step=True)
        self.val_acc5 = tm.Accuracy(dist_sync_on_step=True, top_k=5)
Ejemplo n.º 20
0
    def __init__(self, hidden_size=64, learning_rate=2e-4):

        super().__init__()

        # Set our init args as class attributes
        self.hidden_size = hidden_size
        self.learning_rate = learning_rate

        # Initialize PyTorch Lightning accuracy function
        self.accuracy = torchmetrics.Accuracy()

        # Hardcode some dataset specific attributes
        self.num_classes = 10
        self.dims = (1, 28, 28)
        channels, width, height = self.dims
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])

        # Define PyTorch model
        self.model = nn.Sequential(
            nn.Flatten(),
            nn.Linear(channels * width * height, hidden_size),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_size, hidden_size),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_size, self.num_classes)
        )
Ejemplo n.º 21
0
    def __init__(self,
                 num_classes=5):
        super().__init__()
        self.save_hyperparameters()
        self.hparams.l2_norm = l2_norm
        self.hparams.lr = learning_rate
        self.ce = nn.CrossEntropyLoss()

        # Define model
        # self.model = models.resnext50_32x4d(pretrained=True)
        self.model = models.resnet18(pretrained=True)
        self.model.fc = torch.nn.Linear(self.model.fc.in_features, num_classes)

        # Define extra metrics
        self.train_accuracy = torchmetrics.Accuracy()
        self.validation_accuracy = torchmetrics.Accuracy()
Ejemplo n.º 22
0
    def __init__(self, model_name_or_path: str, num_labels: int,
                 learning_rate: float, adam_epsilon: float,
                 weight_decay: float, max_len: int, warmup_steps: int,
                 gpus: int, max_epochs: int, accumulate_grad_batches: int):
        super().__init__()
        self.model_name_or_path = model_name_or_path
        self.num_labels = num_labels

        self.save_hyperparameters('learning_rate', 'adam_epsilon',
                                  'weight_decay', 'max_len', 'gpus',
                                  'accumulate_grad_batches', 'max_epochs',
                                  'warmup_steps')

        self.config = transformers.AutoConfig.from_pretrained(
            model_name_or_path, num_labels=self.num_labels)
        self.model = transformers.AutoModelForSequenceClassification.from_pretrained(
            model_name_or_path, config=self.config)
        # self.model = nn.Sequential(
        #     OrderedDict(
        #         [
        #          ('base',transformers.AutoModel.from_pretrained(model_name_or_path)),
        #          ('classifier',nn.Linear(in_features=768,out_features=self.num_labels)),
        #          ('softmax',nn.Softmax())
        #         ]
        #     )
        # )
        metrics = torchmetrics.MetricCollection([
            torchmetrics.Accuracy(),
            torchmetrics.F1(num_classes=3, average='macro')
        ])
        self.train_metrics = metrics.clone()
        self.val_metrics = metrics.clone()
Ejemplo n.º 23
0
 def choose_metric(self):
     metric_map = {
         'acc': torchmetrics.Accuracy(),
         'psnr': torchmetrics.image.PSNR(),
         'psnr255': torchmetrics.image.PSNR(data_range=255),
     }
     return metric_map[self.params['metric'].lower()]
    def __init__(
        self,
        lr: float,
        weight_decay: float,
        lr_gamma: float,
        disc_steps: int,
        fairness: str,
        recon_weight: float,
        clf_weight: float,
        adv_weight: float,
        enc: nn.Module,
        dec: nn.Module,
        adv: nn.Module,
        clf: nn.Module,
    ):
        super().__init__()
        self.enc = enc
        self.dec = dec
        self.adv = adv
        self.clf = clf

        self.laftr_params = itertools.chain([
            *self.enc.parameters(), *self.dec.parameters(),
            *self.clf.parameters()
        ])
        self.adv_params = self.adv.parameters()

        self._clf_loss = nn.BCEWithLogitsLoss(reduction="mean")
        self._recon_loss = nn.L1Loss(reduction="mean")
        self._adv_clf_loss = nn.L1Loss(reduction="mean")

        self.disc_steps = disc_steps
        self.fairness = FairnessType[fairness]
        self.lr = lr
        self.lr_gamma = lr_gamma
        self.weight_decay = weight_decay

        self.clf_weight = clf_weight
        self.adv_weight = adv_weight
        self.recon_weight = recon_weight

        self.test_acc = torchmetrics.Accuracy()
        self.train_acc = torchmetrics.Accuracy()
        self.val_acc = torchmetrics.Accuracy()

        self._target_name: str = "y"
Ejemplo n.º 25
0
    def __init__(
        self,
        *,
        lr: float,
        weight_decay: float,
        disc_steps: int,
        fairness: FairnessType,
        recon_weight: float,
        clf_weight: float,
        adv_weight: float,
        enc: nn.Module,
        dec: nn.Module,
        adv: nn.Module,
        clf: nn.Module,
        lr_initial_restart: int = 10,
        lr_restart_mult: int = 2,
        lr_sched_interval: TrainingMode = TrainingMode.epoch,
        lr_sched_freq: int = 1,
    ) -> None:
        super().__init__(
            lr=lr,
            weight_decay=weight_decay,
            lr_initial_restart=lr_initial_restart,
            lr_restart_mult=lr_restart_mult,
            lr_sched_interval=lr_sched_interval,
            lr_sched_freq=lr_sched_freq,
        )
        self.enc = enc
        self.dec = dec
        self.adv = adv
        self.clf = clf

        self._clf_loss = CrossEntropyLoss(reduction=ReductionType.mean)
        self._recon_loss = nn.L1Loss(reduction="mean")
        self._adv_clf_loss = nn.L1Loss(reduction="none")

        self.disc_steps = disc_steps
        self.fairness = fairness

        self.clf_weight = clf_weight
        self.adv_weight = adv_weight
        self.recon_weight = recon_weight

        self.test_acc = torchmetrics.Accuracy()
        self.train_acc = torchmetrics.Accuracy()
        self.val_acc = torchmetrics.Accuracy()
Ejemplo n.º 26
0
    def __init__(self, cfg, *args, **kwargs):
        super().__init__()
        self.cfg = cfg
        self.save_hyperparameters(cfg)

        self.model = models.resnet18(pretrained=cfg.model.pre_trained)

        for param in self.model.parameters():
            param.requires_grad = False

        self.model.fc = nn.Linear(self.model.fc.in_features,
                                  cfg.model.n_classes)

        self.criterion = nn.CrossEntropyLoss()

        self.train_accuracy = torchmetrics.Accuracy()
        self.val_accuracy = torchmetrics.Accuracy()
Ejemplo n.º 27
0
def get_metric(
    metric_name: str,
    num_classes: Optional[int] = None,
    pos_label: Optional[int] = None,
):
    """
    Obtain a torchmerics.Metric from its name.
    Define a customized metric function in case that torchmetrics doesn't support some metric.

    Parameters
    ----------
    metric_name
        Name of metric.
    num_classes
        Number of classes.
    pos_label
        The label (0 or 1) of binary classification's positive class, which is used in some metrics, e.g., AUROC.

    Returns
    -------
    torchmetrics.Metric
        A torchmetrics.Metric object.
    custom_metric_func
        A customized metric function.
    """
    metric_name = metric_name.lower()
    if metric_name in [ACC, ACCURACY]:
        return torchmetrics.Accuracy(), None
    elif metric_name in [RMSE, ROOT_MEAN_SQUARED_ERROR]:
        return torchmetrics.MeanSquaredError(squared=False), None
    elif metric_name == R2:
        return torchmetrics.R2Score(), None
    elif metric_name == QUADRATIC_KAPPA:
        return (
            torchmetrics.CohenKappa(num_classes=num_classes,
                                    weights="quadratic"),
            None,
        )
    elif metric_name == ROC_AUC:
        return torchmetrics.AUROC(pos_label=pos_label), None
    elif metric_name == AVERAGE_PRECISION:
        return torchmetrics.AveragePrecision(pos_label=pos_label), None
    elif metric_name in [LOG_LOSS, CROSS_ENTROPY]:
        return torchmetrics.MeanMetric(), functools.partial(F.cross_entropy,
                                                            reduction="none")
    elif metric_name == COSINE_EMBEDDING_LOSS:
        return torchmetrics.MeanMetric(), functools.partial(
            F.cosine_embedding_loss, reduction="none")
    elif metric_name == PEARSONR:
        return torchmetrics.PearsonCorrCoef(), None
    elif metric_name == SPEARMANR:
        return torchmetrics.SpearmanCorrCoef(), None
    elif metric_name == F1:
        return CustomF1Score(num_classes=num_classes,
                             pos_label=pos_label), None
    else:
        raise ValueError(f"Unknown metric {metric_name}")
Ejemplo n.º 28
0
    def __init__(self, pretrained=True, num_classes=2, lr=0.005, **kwargs):
        super().__init__()

        self.pretrained: bool = pretrained
        self.num_classes: int = num_classes
        self.learning_rate: float = lr

        self.model: nn.Module = self._load_mobile_net(pretrained=pretrained,
                                                      num_classes=num_classes)

        self.trn_acc: torchmetrics.Accuracy = torchmetrics.Accuracy()
        self.trn_loss: torchmetrics.AverageMeter = torchmetrics.AverageMeter()
        self.val_acc: torchmetrics.Accuracy = torchmetrics.Accuracy()
        self.val_loss: torchmetrics.AverageMeter = torchmetrics.AverageMeter()

        self.criterion = nn.CrossEntropyLoss()
        self.save_hyperparameters()
        self.save_hyperparameters(kwargs)
Ejemplo n.º 29
0
    def __init__(self, learning_rate: float = 1e-1, learning_rate_min: float = 1e-4,
                 lr_max_epochs: int = -1, freeze: bool = True, *args: Any, **kwargs: Any) -> None:
        """
        Constructor for QuackDenseNet.

        Parameters
        ----------
        learning_rate: float
            Hyperparameter passed to pt.optim.lr_scheduler.CosineAnnealingLR
        learning_rate_min: float
            Hyperparameter passed to pt.optim.lr_scheduler.CosineAnnealingLR
        lr_max_epochs: int
            Hyperparameter passed to pt.optim.lr_scheduler.CosineAnnealingLR
        freeze: bool
            Should the image analyzing layers of the pre-trained Densenet be frozen?
        args: Any
            Passed to the parent constructor.
        kwargs: Any
            Passed to the parent constructor.
        """
        super().__init__(*args, **kwargs)
        # Load the pre-trained densenet
        pre_trained = models.densenet121(pretrained=True)
        if freeze:
            # Freeze the existing gradients.
            pre_trained.requires_grad_(False)
        # We want to replace the classifier.  New instances of models have
        # requires_grad = True by default.
        classifier_features_in = pre_trained.classifier.in_features
        pre_trained.classifier = nn.Linear(classifier_features_in, 1)
        self.__densenet = pre_trained
        self.__to_probability = nn.Sigmoid()
        self.__learning_rate_init = learning_rate
        self.__learning_rate_min = learning_rate_min
        self.__lr_max_epochs = lr_max_epochs
        self.__train_acc = tm.Accuracy()
        self.__train_f1 = tm.F1Score(num_classes=2)
        self.__val_acc = tm.Accuracy()
        self.__val_f1 = tm.F1Score(num_classes=2)
        self.__test_acc = tm.Accuracy()
        self.__test_f1 = tm.F1Score(num_classes=2)
        self.__loss_module = nn.BCEWithLogitsLoss()
        # For tuning.
        self.batch_size = 2
Ejemplo n.º 30
0
    def __init__(self, hparams):
        """Defines overall computations"""
        super().__init__()

        self.hparams = hparams
        self.save_hyperparameters()

        # A dictionary informs of the model of the input size, number of
        # target classes, and learning rate.
        self.fc1 = nn.Linear(self.hparams["input_size"], 420)
        self.fc2 = nn.Linear(420, 420)
        self.fc3 = nn.Linear(420, 420)
        self.fc4 = nn.Linear(420, self.hparams["targets"])
        self.dropout = nn.Dropout(0.4)

        # Instantiate accuracy metrics for each phase
        self.train_acc = torchmetrics.Accuracy()
        self.valid_acc = torchmetrics.Accuracy()
        self.test_acc = torchmetrics.Accuracy()