Ejemplo n.º 1
0
def get_metrics_collections_base(
    prefix,
    is_regressor: bool = True
    # device="cuda" if torch.cuda.is_available() else "cpu",
):
    if is_regressor:
        metrics = MetricCollection(
            {
                "MeanAbsoluteError": MeanAbsoluteError(),
                "MeanSquaredError": MeanSquaredError(),
                "SpearmanCorrcoef": SpearmanCorrcoef(),
                "PearsonCorrcoef": PearsonCorrcoef()
            },
            prefix=prefix)
    else:
        metrics = MetricCollection(
            {
                "Accuracy": Accuracy(),
                "Top_3": Accuracy(top_k=3),
                # "Top_5" :Accuracy(top_k=5),
                # "Precision_micro":Precision(num_classes=NUM_CLASS,average="micro"),
                # "Precision_macro":Precision(num_classes=NUM_CLASS,average="macro"),
                # "Recall_micro":Recall(num_classes=NUM_CLASS,average="micro"),
                # "Recall_macro":Recall(num_classes=NUM_CLASS,average="macro"),
                # "F1_micro":torchmetrics.F1(NUM_CLASS,average="micro"),
                # "F1_macro":torchmetrics.F1(NUM_CLASS,average="micro"),
            },
            prefix=prefix)
    return metrics
Ejemplo n.º 2
0
    def __init__(
            self,
            freeze_backbone: bool,
            lr_classifier: float,
            lr_backbone: float,
            weight_decay: float,
            epochs: int,
            model_path: Optional[str] = None,
            repo_or_dir: Optional[str] = 'facebookresearch/barlowtwins:main',
            model_name: Optional[str] = 'resnet50'):
        super().__init__()
        self.save_hyperparameters()

        self.backbone = self._load_backbone(repo_or_dir=repo_or_dir,
                                            model_name=model_name,
                                            model_path=model_path)
        self.backbone.fc = nn.Identity()

        if self.hparams.freeze_backbone:
            self.backbone.requires_grad_(False)

        self.classifier = nn.Linear(2048, 1000)
        self.classifier.weight.data.normal_(mean=0.0, std=0.01)
        self.classifier.bias.data.zero_()
        self.criterion = nn.CrossEntropyLoss()

        self.accuracy_top_k_one = Accuracy(top_k=1)
        self.accuracy_top_k_five = Accuracy(top_k=5)
Ejemplo n.º 3
0
def test_negative_ignore_index(preds, target, ignore_index, result):
    # We deduct -1 for an ignored index
    num_classes = len(target.unique()) - 1

    # Test class
    acc = Accuracy(num_classes=num_classes, ignore_index=ignore_index)
    acc_score = acc(preds, target)
    assert torch.allclose(acc_score, result)
    # Test functional metrics
    acc_score = accuracy(preds,
                         target,
                         num_classes=num_classes,
                         ignore_index=ignore_index)
    assert torch.allclose(acc_score, result)

    # If the ignore index is not set properly, we expect to see an error
    ignore_index = None
    # Test class
    acc = Accuracy(num_classes=num_classes, ignore_index=ignore_index)
    with pytest.raises(
            ValueError,
            match="^[The `target` has to be a non-negative tensor.]"):
        acc_score = acc(preds, target)

    # Test functional
    with pytest.raises(
            ValueError,
            match="^[The `target` has to be a non-negative tensor.]"):
        acc_score = accuracy(preds,
                             target,
                             num_classes=num_classes,
                             ignore_index=ignore_index)
Ejemplo n.º 4
0
def test_wrong_params(average, mdmc_average, num_classes, inputs, ignore_index, top_k, threshold):
    preds, target = inputs.preds, inputs.target

    with pytest.raises(ValueError):
        acc = Accuracy(
            average=average,
            mdmc_average=mdmc_average,
            num_classes=num_classes,
            ignore_index=ignore_index,
            threshold=threshold,
            top_k=top_k
        )
        acc(preds[0], target[0])
        acc.compute()

    with pytest.raises(ValueError):
        accuracy(
            preds[0],
            target[0],
            average=average,
            mdmc_average=mdmc_average,
            num_classes=num_classes,
            ignore_index=ignore_index,
            threshold=threshold,
            top_k=top_k
        )
Ejemplo n.º 5
0
def classification_eval(model, dataloader, device="cuda"):
    metrics = {
        'acc': Accuracy(),
        'acc_top5': Accuracy(top_k=5),
    }

    if device == "cuda":
        model = model.cuda()
        for metric_name in metrics:
            metrics[metric_name] = metrics[metric_name].cuda()

    model.eval()
    with torch.set_grad_enabled(False):
        for X, y in dataloader:
            if device == "cuda":
                X = X.cuda()
                y = y.cuda()
            else:
                X = X.cpu()
                y = y.cpu()

            pred = model(X)
            for metric_name, metric_fn in metrics.items():
                metric_fn.update(pred, y.data)

    for metric_name in metrics:
        metrics[metric_name] = metrics[metric_name].compute()
    return metrics
Ejemplo n.º 6
0
 def __init__(self):
     super(ClassifierBackBone, self).__init__()
     self.back_bone = nn.Sequential(nn.Conv2d(3, 32, (7, 7), stride=(2, 2)),
                                    ResidualBlock(32, 32),
                                    ResidualBlock(32, 64),
                                    ResidualBottleneck(64, 2),
                                    ResidualBlock(64, 64),
                                    ResidualBlock(64, 128),
                                    ResidualBottleneck(128, 2),
                                    ResidualBlock(128, 128),
                                    ResidualBlock(128, 256),
                                    ResidualBottleneck(256, 2),
                                    ResidualBlock(256, 256), nn.Flatten(),
                                    nn.Linear(256 * 25 * 25, 1),
                                    nn.Sigmoid())
     self.criterion = torch.nn.BCELoss()
     self.train_metrics = MetricCollection({
         'train_accuracy':
         Accuracy(compute_on_step=False),
         'train_precision':
         Precision(compute_on_step=False),
         'train_recall':
         Recall(compute_on_step=False),
     })
     self.val_metrics = MetricCollection({
         'val_accuracy':
         Accuracy(compute_on_step=False),
         'val_precision':
         Precision(compute_on_step=False),
         'val_recall':
         Recall(compute_on_step=False)
     })
Ejemplo n.º 7
0
    def __init__(
        self,
        backbone: str = "resnet50",
        train_bn: bool = True,
        milestones: tuple = (5, 10),
        batch_size: int = 32,
        lr: float = 1e-2,
        lr_scheduler_gamma: float = 1e-1,
        num_workers: int = 6,
        **kwargs,
    ) -> None:
        """
        Args:
            dl_path: Path where the data will be downloaded
        """
        super().__init__()
        self.backbone = backbone
        self.train_bn = train_bn
        self.milestones = milestones
        self.batch_size = batch_size
        self.lr = lr
        self.lr_scheduler_gamma = lr_scheduler_gamma
        self.num_workers = num_workers

        self.__build_model()

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.save_hyperparameters()
Ejemplo n.º 8
0
    def __init__(self, model, args, length):
        super().__init__()

        self.model = model
        # dim_mlp = self.model.fc[0].in_features
        # self.model.fc = nn.Identity()
        # self.model.train()
        self.fc = nn.Linear(embed_dim, 10)

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()

        self.optim = torch.optim.Adam(self.fc.parameters(),
                                      args.lr,
                                      betas=(0.9, 0.999),
                                      eps=1e-08,
                                      weight_decay=args.weight_decay,
                                      amsgrad=False)
        # self.optim = LARS(self.optim, eps=0.0)

        # sched = torch.optim.lr_scheduler.CosineAnnealingLR(self.optim, T_max=length, eta_min=0, last_epoch=-1)
        # w = scheduler.LinearWarmup(self.optim, warmup_steps=args.warmup, last_epoch=-1)
        # sched = scheduler.Scheduler(sched, w)
        # sched.optimizer = self.optim
        # self.scheduler = sched

        self.criterion = nn.CrossEntropyLoss()
Ejemplo n.º 9
0
    def __init__(
        self,
        backbone: str = "x3d_s",
        train_bn: bool = False,
        milestone: int = 5,
        lr: float = 1e-3,
        lr_scheduler_gamma: float = 1e-1,
        **kwargs,
    ) -> None:
        """TransferLearningModel.

        Args:
            backbone: Name of the feature extractor in Torch Hub
            train_bn: Whether the BatchNorm layers should be trainable
            milestones: List of two epochs milestones
            lr: Initial learning rate
            lr_scheduler_gamma: Factor by which the learning rate is reduced at each milestone
        """
        super().__init__()
        self.backbone = backbone
        self.train_bn = train_bn
        self.lr = lr
        self.lr_scheduler_gamma = lr_scheduler_gamma
        self.milestone = milestone

        self.__build_model()

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.soft = nn.Softmax()
        self.save_hyperparameters()
Ejemplo n.º 10
0
 def __init__(
     self,
     data_path: str,
     arch: str = "resnet18",
     pretrained: bool = False,
     lr: float = 0.1,
     momentum: float = 0.9,
     weight_decay: float = 1e-4,
     batch_size: int = 256,
     workers: int = 4,
 ):
     super().__init__()
     self.arch = arch
     self.pretrained = pretrained
     self.lr = lr
     self.momentum = momentum
     self.weight_decay = weight_decay
     self.data_path = data_path
     self.batch_size = batch_size
     self.workers = workers
     self.model = models.__dict__[self.arch](pretrained=self.pretrained)
     self.train_dataset: Optional[Dataset] = None
     self.eval_dataset: Optional[Dataset] = None
     self.train_acc1 = Accuracy(top_k=1)
     self.train_acc5 = Accuracy(top_k=5)
     self.eval_acc1 = Accuracy(top_k=1)
     self.eval_acc5 = Accuracy(top_k=5)
    def __init__(self, lr: float = 0.01, num_blocks: int = 5):
        super().__init__()
        self.lr = lr
        self.num_blocks = num_blocks

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.test_acc = Accuracy()
Ejemplo n.º 12
0
    def __init__(self, in_channels, out_channels):
        super().__init__()
        from torchmetrics import Accuracy

        self.lin = torch.nn.Linear(in_channels, out_channels)

        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
Ejemplo n.º 13
0
 def __init__(self, lite, args, model, dataloader):
     super().__init__()
     self.lite = lite
     self.args = args
     self.model = model
     self.dataloader = dataloader
     self.dataloader_iter = None
     self.accuracy = Accuracy().to(lite.device)
     self.test_loss = 0
Ejemplo n.º 14
0
class IrisClassification(pl.LightningModule):
    def __init__(self, **kwargs):
        super().__init__()

        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
        self.test_acc = Accuracy()
        self.args = kwargs

        self.fc1 = nn.Linear(4, 10)
        self.fc2 = nn.Linear(10, 10)
        self.fc3 = nn.Linear(10, 3)
        self.cross_entropy_loss = nn.CrossEntropyLoss()

        self.lr = kwargs.get("lr", 0.01)
        self.momentum = kwargs.get("momentum", 0.9)
        self.weight_decay = kwargs.get("weight_decay", 0.1)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        return x

    def configure_optimizers(self):
        return torch.optim.SGD(self.parameters(),
                               lr=self.lr,
                               momentum=self.momentum,
                               weight_decay=self.weight_decay)

    def training_step(self, batch, batch_idx):
        x, y = batch
        logits = self.forward(x)
        loss = self.cross_entropy_loss(logits, y)
        self.train_acc(torch.argmax(logits, dim=1), y)
        self.log("train_acc",
                 self.train_acc.compute(),
                 on_step=False,
                 on_epoch=True)
        self.log("loss", loss)
        return {"loss": loss}

    def validation_step(self, batch, batch_idx):
        x, y = batch
        logits = self.forward(x)
        loss = F.cross_entropy(logits, y)
        self.val_acc(torch.argmax(logits, dim=1), y)
        self.log("val_acc", self.val_acc.compute())
        self.log("val_loss", loss, sync_dist=True)

    def test_step(self, batch, batch_idx):
        x, y = batch
        logits = self.forward(x)
        loss = F.cross_entropy(logits, y)
        self.test_acc(torch.argmax(logits, dim=1), y)
        self.log("test_loss", loss)
        self.log("test_acc", self.test_acc.compute())
Ejemplo n.º 15
0
    def __init__(self, in_channels: int, out_channels: int,
                 hidden_channels: int = 256, num_layers: int = 2,
                 dropout: float = 0.5):
        super().__init__()
        self.gnn = GraphSAGE(in_channels, hidden_channels, num_layers,
                             out_channels, dropout=dropout,
                             norm=BatchNorm1d(hidden_channels))

        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
        self.test_acc = Accuracy()
Ejemplo n.º 16
0
 def __init__(self, in_feats, n_hidden, n_classes):
     super().__init__()
     self.save_hyperparameters()
     self.layers = nn.ModuleList()
     self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
     self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
     self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
     self.dropout = nn.Dropout(0.5)
     self.n_hidden = n_hidden
     self.n_classes = n_classes
     self.train_acc = Accuracy()
     self.val_acc = Accuracy()
Ejemplo n.º 17
0
    def __init__(self, lr=0.01):
        super().__init__()

        self.lr = lr
        for i in range(3):
            setattr(self, f"layer_{i}", nn.Linear(32, 32))
            setattr(self, f"layer_{i}a", torch.nn.ReLU())
        setattr(self, "layer_end", nn.Linear(32, 3))

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.test_acc = Accuracy()
    def __init__(self, num_features=24, num_classes=3, lr=0.01):
        super().__init__()
        self.save_hyperparameters()

        self.lr = lr
        for i in range(3):
            setattr(self, f"layer_{i}", nn.Linear(num_features, num_features))
            setattr(self, f"layer_{i}a", torch.nn.ReLU())
        setattr(self, "layer_end", nn.Linear(num_features, num_classes))

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.test_acc = Accuracy()
Ejemplo n.º 19
0
    def __init__(self,
                 model: torch.nn.Module,
                 freeze_layers: List = [],
                 loss: str = 'ce',
                 max_epochs: int = 100,
                 optimizer: str = 'adam',
                 learning_rate: float = 1e-3,
                 weight_decay: float = 1e-5,
                 lr_scheduler: str = 'cosine',
                 lr_decay_steps: List = [60, 80],
                 lr_decay_rate: float = 0.1,
                 final_lr: float = 0.,
                 nesterov: bool = False,
                 **kwargs):
        super().__init__()
        self.save_hyperparameters(ignore='model')

        if freeze_layers:
            if 'ExcludeFC' in freeze_layers:
                for name, param in model.named_parameters():
                    if not any(layer in name
                               for layer in ['classifier', 'fc']):
                        param.requires_grad = False

                if hasattr(model, 'fc'):
                    model.fc.weight.data.normal_(mean=0.0, std=0.01)
                    model.fc.bias.data.zero_()
                elif hasattr(model, 'classifier'):
                    model.classifier.weight.data.normal_(mean=0.0, std=0.01)
                    model.classifier.bias.data.zero_()
                print('Kaiming Initialization of fc/classifier\n')
            else:
                for name, param in model.named_parameters():
                    if any(layer in name for layer in freeze_layers):
                        param.requires_grad = False

        self.learning_rate = learning_rate
        self.model = model
        self.configure_loss()

        # metrics | Overall accuracy
        num_classes = kwargs.get('num_classes', None)
        self.train_acc = Accuracy(num_classes=num_classes)
        self.val_acc = Accuracy(num_classes=num_classes, compute_on_step=False)
        self.val_scores = ClassifyScore(num_classes,
                                        kwargs.get('classes', None))
        self.test_acc = Accuracy(num_classes=num_classes,
                                 compute_on_step=False)

        self.val_verbose = False
        self.final_val = False
Ejemplo n.º 20
0
def test_average_accuracy_bin(preds, target, num_classes, exp_result, average, multiclass):
    acc = Accuracy(num_classes=num_classes, average=average, multiclass=multiclass)

    for batch in range(preds.shape[0]):
        acc(preds[batch], target[batch])

    assert (acc.compute() == tensor(exp_result)).all()

    # Test functional
    total_samples = target.shape[0] * target.shape[1]

    preds = preds.view(total_samples, -1)
    target = target.view(total_samples, -1)
    acc_score = accuracy(preds, target, num_classes=num_classes, average=average, multiclass=multiclass)
    assert (acc_score == tensor(exp_result)).all()
Ejemplo n.º 21
0
    def __init__(
        self,
        n_channel: int = 1,
        n_class: int = 2,
        learning_rate: float = 1e-4,
        class_weight: List[float] = None,
        backbone: Union[str, nn.Module] = "simple-cnn",
        backbone_output_size: int = 0,
        n_hidden: int = 512,
        dropout: float = 0.2,
        lr_scheduler: bool = False,
        lr_scheduler_warmup_steps: int = 100,
        lr_scheduler_total_steps: int = 0,
        **kwargs,
    ):
        super().__init__()

        self.save_hyperparameters()

        if isinstance(backbone, str):
            self.backbone, backbone_output_size = get_backbone(
                backbone,
                channels=n_channel,
                dropout=dropout,
                **kwargs,
            )

        self.classifier = Classifier(backbone_output_size, n_class, n_hidden,
                                     dropout)

        if class_weight is not None:
            class_weight = torch.tensor(class_weight, dtype=torch.float)
        self.loss_fn = nn.CrossEntropyLoss(weight=class_weight)

        self.train_accuracy = Accuracy()
        self.val_accuracy = Accuracy()
        self.test_metrics = MetricCollection([
            Accuracy(),
            F1(num_classes=self.hparams.n_class, average="macro"),
            Recall(num_classes=self.hparams.n_class,
                   average="macro"),  # balanced acc.
            StatScores(
                num_classes=self.hparams.n_class
                if self.hparams.n_class > 2 else 1,
                reduce="micro",
                multiclass=self.hparams.n_class > 2,
            ),
        ])