Ejemplo n.º 1
0
    def __init__(
        self,
        num_classes: int,
        effdet_backbone: str = "tf_efficientdet_d4",
        strides: List[int] = [8, 16, 32, 64, 128],
        sizes: List[Tuple[int, int]] = [(-1, 64), (64, 128), (128, 256), (256, 512), (512, 10000000)],
        threshold: Optional[float] = None,
        nms_threshold: Optional[float] = None,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
        self.save_hyperparameters()
        self.num_classes = int(num_classes)
        self.strides = [int(x) for x in strides]
        self.sizes = [(int(x), int(y)) for x, y in sizes]

        # TODO train this from scratch using combustion EfficientDet
        # self._model = EffDetFCOS.from_predefined(
        #    compound_coeff, self.num_classes, fpn_levels=[3, 5, 7, 8, 9], strides=self.strides
        # )

        self._model = create_model(effdet_backbone, pretrained=True)
        del self._model.box_net
        del self._model.class_net

        fpn_filters = self._model.config.fpn_channels
        num_repeats = 4

        self.fcos = FCOSDecoder(fpn_filters, self.num_classes, num_repeats, strides)

        self.threshold = float(threshold) if threshold is not None else 0.05
        self.nms_threshold = float(nms_threshold) if nms_threshold is not None else 0.1
        self._criterion = FCOSLoss(self.strides, self.num_classes, radius=1, interest_range=self.sizes)

        # metrics
        metrics = MetricCollection({
            f"ap{thresh}": BoxAveragePrecision(iou_threshold=thresh / 100, compute_on_step=True)
            for thresh in (25, 50, 75)
        })
        self.val_metrics = metrics.clone(prefix="val/")
        self.test_metrics = metrics.clone(prefix="test/")

        # freeze backbone
        for param in self._model.backbone.parameters():
            param.requires_grad = False
 def __init__(self):
     super().__init__()
     self.metrics_list = ModuleList([DummyMetric() for _ in range(2)])
     self.metrics_dict = ModuleDict({"a": DummyMetric(), "b": DummyMetric()})
     self.metrics_collection_dict = MetricCollection({"a": DummyMetric(), "b": DummyMetric()})
     self.metrics_collection_dict_nested = ModuleDict(
         {"a": ModuleList([ModuleDict({"b": DummyMetric()}), DummyMetric()])}
     )
Ejemplo n.º 3
0
    def __init__(self, feature_size, lstm_hidden_size, lstm_num_layers, n_tags,
                 max_len):
        super().__init__()
        self.padding = 0
        self.bi_listm = nn.LSTM(feature_size,
                                lstm_hidden_size,
                                num_layers=lstm_num_layers,
                                bidirectional=True)
        self.attention = DeepAnalyzeAttention(lstm_hidden_size * 2, n_tags,
                                              max_len)
        self.crf = CRF(n_tags)
        self.lstm_dropout = nn.Dropout(0.25)

        self.train_metrics = MetricCollection(
            [Precision(), Recall(), TopkAccuracy(3)])
        self.val_metrics = MetricCollection(
            [Precision(), Recall(), TopkAccuracy(3)])
Ejemplo n.º 4
0
def get_metric_AUROC(NUM_CLASS):
    
    metrics=MetricCollection(
        {
                "AUROC_macro":torchmetrics.AUROC(num_classes=NUM_CLASS)
                        
        }
    )
    return metrics
Ejemplo n.º 5
0
    def __init__(self, metrics: List = None):
        """Constructor class for MetricContainer.

        Args:
            metrics: The list of metrics
        """
        super(MetricCallback, self).__init__(CallbackOrder.METRICS)
        metrics = MetricCollection(metrics)
        self.metrics = {
            "train": metrics.clone(prefix="train_"),
            "eval": metrics.clone(prefix="val_"),
        }
Ejemplo n.º 6
0
def test_raises_error_on_wrong_input():
    """Make sure that input type errors are raised on the wrong input."""
    with pytest.raises(TypeError, match="Metric arg need to be an instance of a .*"):
        MetricTracker([1, 2, 3])

    with pytest.raises(ValueError, match="Argument `maximize` should either be a single bool or list of bool"):
        MetricTracker(MeanAbsoluteError(), maximize=2)

    with pytest.raises(
        ValueError, match="The len of argument `maximize` should match the length of the metric collection"
    ):
        MetricTracker(MetricCollection([MeanAbsoluteError(), MeanSquaredError()]), maximize=[False, False, False])
Ejemplo n.º 7
0
 def __init__(self, dataloader_kNN,img_size=32):
     super().__init__(dataloader_kNN)
     # create a ResNet backbone and remove the classification head
     vit=VisionTransformerGenerator( img_size=img_size )
     self.backbone=nn.Sequential(*list(vit.children())[:-1],
                         #    nn.AdaptiveAvgPool2d(1),
                            )
     # resnet = lightly.models.ResNetGenerator('resnet-18')
     # self.backbone = nn.Sequential(
     #     *list(resnet.children())[:-1],
     #     nn.AdaptiveAvgPool2d(1),
     # )
     # create a simsiam model based on ResNet
     self.model = \
         lightly.models.SimSiam(self.backbone, num_ftrs=768, num_mlp_layers=2)
     self.criterion = lightly.loss.SymNegCosineSimilarityLoss()
     self.metric=MetricCollection({"CollapseLevel":CollapseLevel()})
    def __init__(self):
        super().__init__()

        # create a ResNet backbone and remove the classification head
        resnet = lightly.models.ResNetGenerator('resnet-18', 1, num_splits=8)
        backbone = nn.Sequential(
            *list(resnet.children())[:-1],
            nn.AdaptiveAvgPool2d(1),
        )
        self.metric = MetricCollection({"CollapseLevel": CollapseLevel()})
        # create a moco based on ResNet
        self.resnet_moco = \
            lightly.models.MoCo(backbone, num_ftrs=512, m=0.99, batch_shuffle=True)

        # create our loss with the optional memory bank
        self.criterion = lightly.loss.NTXentLoss(
            temperature=0.1, memory_bank_size=memory_bank_size)
Ejemplo n.º 9
0
    def __init__(
        self,
        model_config: DictConfig,
        optimizer_config: DictConfig,
        vocabulary: Vocabulary,
        teacher_forcing: float = 0.0,
    ):
        super().__init__()
        self.save_hyperparameters()
        self._optim_config = optimizer_config
        self._vocabulary = vocabulary

        if vocabulary.SOS not in vocabulary.label_to_id:
            raise ValueError(f"Can't find SOS token in label to id vocabulary")

        self.__pad_idx = vocabulary.label_to_id[vocabulary.PAD]
        eos_idx = vocabulary.label_to_id[vocabulary.EOS]
        ignore_idx = [
            vocabulary.label_to_id[vocabulary.SOS],
            vocabulary.label_to_id[vocabulary.UNK]
        ]
        metrics: Dict[str, Metric] = {
            f"{holdout}_f1": SequentialF1Score(pad_idx=self.__pad_idx,
                                               eos_idx=eos_idx,
                                               ignore_idx=ignore_idx)
            for holdout in ["train", "val", "test"]
        }
        id2label = {v: k for k, v in vocabulary.label_to_id.items()}
        metrics.update({
            f"{holdout}_chrf": ChrF(id2label,
                                    ignore_idx + [self.__pad_idx, eos_idx])
            for holdout in ["val", "test"]
        })
        self.__metrics = MetricCollection(metrics)

        self._encoder = self._get_encoder(model_config)
        decoder_step = LSTMDecoderStep(model_config,
                                       len(vocabulary.label_to_id),
                                       self.__pad_idx)
        self._decoder = Decoder(decoder_step, len(vocabulary.label_to_id),
                                vocabulary.label_to_id[vocabulary.SOS],
                                teacher_forcing)

        self.__loss = SequenceCrossEntropyLoss(self.__pad_idx,
                                               reduction="batch-mean")
Ejemplo n.º 10
0
    class TestModel(BoringModel):
        def __init__(self):
            super().__init__()
            self.metric = MetricCollection([SumMetric(), DiffMetric()])
            self.sum = 0.0
            self.diff = 0.0

        def training_step(self, batch, batch_idx):
            x = batch
            metric_vals = self.metric(x.sum())
            self.sum += x.sum()
            self.diff -= x.sum()
            self.log_dict({f"{k}_step": v for k, v in metric_vals.items()})
            return self.step(x)

        def training_epoch_end(self, outputs):
            metric_vals = self.metric.compute()
            self.log_dict({f"{k}_epoch": v for k, v in metric_vals.items()})
Ejemplo n.º 11
0
def get_metrics_collections_base(NUM_CLASS, prefix):

    metrics = MetricCollection(
        {
            "Accuracy": Accuracy(),
            "Top_3": Accuracy(top_k=3),
            "Top_5": Accuracy(top_k=5),
            "Precision_micro": Precision(num_classes=NUM_CLASS,
                                         average="micro"),
            "Precision_macro": Precision(num_classes=NUM_CLASS,
                                         average="macro"),
            "Recall_micro": Recall(num_classes=NUM_CLASS, average="micro"),
            "Recall_macro": Recall(num_classes=NUM_CLASS, average="macro"),
            "F1_micro": torchmetrics.F1(NUM_CLASS, average="micro"),
            "F1_macro": torchmetrics.F1(NUM_CLASS, average="micro"),
        },
        prefix=prefix)

    return metrics
Ejemplo n.º 12
0
    def __init__(self,
                 hparams,
                 train_subjects,
                 validate_subjects,
                 class_weights=None):
        super(TrainRTBENE, self).__init__()
        assert class_weights is not None, "Class Weights can't be None"

        self.model = MODELS[hparams.model_base]()
        self._criterion = torch.nn.BCEWithLogitsLoss(
            pos_weight=torch.Tensor([class_weights[1]]))
        self._train_subjects = train_subjects
        self._validate_subjects = validate_subjects
        self._metrics = MetricCollection(
            [Accuracy(),
             F1(), Precision(),
             Recall(), Specificity()])
        self.save_hyperparameters(
            hparams,
            ignore=["train_subjects", "validate_subjects", "class_weights"])
Ejemplo n.º 13
0
        def test_metrics_w_likelihood(self):
            metric = MeanAbsolutePercentageError()
            metric_collection = MetricCollection(
                [MeanAbsolutePercentageError(),
                 MeanAbsoluteError()])

            # test single metric
            model = RNNModel(
                12,
                "RNN",
                10,
                10,
                n_epochs=1,
                likelihood=GaussianLikelihood(),
                torch_metrics=metric,
            )
            model.fit(self.series)

            # test metric collection
            model = RNNModel(
                12,
                "RNN",
                10,
                10,
                n_epochs=1,
                likelihood=GaussianLikelihood(),
                torch_metrics=metric_collection,
            )
            model.fit(self.series)

            # test multivariate series
            model = RNNModel(
                12,
                "RNN",
                10,
                10,
                n_epochs=1,
                likelihood=GaussianLikelihood(),
                torch_metrics=metric_collection,
            )
            model.fit(self.multivariate_series)
Ejemplo n.º 14
0
def get_metrics_collections_base(NUM_CLASS,
                            # device="cuda" if torch.cuda.is_available() else "cpu",
                            
                            ):
    
    metrics = MetricCollection(
            {
                "Accuracy":Accuracy(),
                "Top_3":Accuracy(top_k=3),
                "Top_5" :Accuracy(top_k=5),
                "Precision_micro":Precision(num_classes=NUM_CLASS,average="micro"),
                "Precision_macro":Precision(num_classes=NUM_CLASS,average="macro"),
                "Recall_micro":Recall(num_classes=NUM_CLASS,average="micro"),
                "Recall_macro":Recall(num_classes=NUM_CLASS,average="macro"),
                "F1_micro":torchmetrics.F1(NUM_CLASS,average="micro"),
                "F1_macro":torchmetrics.F1(NUM_CLASS,average="micro"),
            }
            )
    
    
    return metrics
Ejemplo n.º 15
0
    def __init__(self, model_config: DictConfig, optimizer_config: DictConfig,
                 vocabulary: Vocabulary):
        super().__init__()
        self.save_hyperparameters()
        self._optim_config = optimizer_config

        self._encoder = PathEncoder(
            model_config,
            len(vocabulary.token_to_id),
            vocabulary.token_to_id[Vocabulary.PAD],
            len(vocabulary.node_to_id),
            vocabulary.node_to_id[Vocabulary.PAD],
        )

        self._classifier = Classifier(model_config,
                                      len(vocabulary.label_to_id))

        metrics: Dict[str, Metric] = {
            f"{holdout}_acc": Accuracy(num_classes=len(vocabulary.label_to_id))
            for holdout in ["train", "val", "test"]
        }
        self.__metrics = MetricCollection(metrics)
Ejemplo n.º 16
0
def get_metrics(metric_threshold, monitor_metrics, num_classes):
    macro_prec = Precision(num_classes, metric_threshold, average='macro')
    macro_recall = Recall(num_classes, metric_threshold, average='macro')
    another_macro_f1 = 2 * (macro_prec * macro_recall) / (macro_prec +
                                                          macro_recall + 1e-10)
    metrics = {
        'Micro-Precision':
        Precision(num_classes, metric_threshold, average='micro'),
        'Micro-Recall':
        Recall(num_classes, metric_threshold, average='micro'),
        'Micro-F1':
        F1(num_classes, metric_threshold, average='micro'),
        'Macro-F1':
        F1(num_classes, metric_threshold, average='macro'),
        # The f1 value of macro_precision and macro_recall. This variant of
        # macro_f1 is less preferred but is used in some works. Please
        # refer to Opitz et al. 2019 [https://arxiv.org/pdf/1911.03347.pdf]
        'Another-Macro-F1':
        another_macro_f1,
    }
    for metric in monitor_metrics:
        if isinstance(metric, Metric):  # customized metric
            metrics[type(metric).__name__] = metric
        elif re.match('P@\d+', metric):
            metrics[metric] = Precision(num_classes,
                                        average='samples',
                                        top_k=int(metric[2:]))
        elif re.match('R@\d+', metric):
            metrics[metric] = Recall(num_classes,
                                     average='samples',
                                     top_k=int(metric[2:]))
        elif metric not in [
                'Micro-Precision', 'Micro-Recall', 'Micro-F1', 'Macro-F1',
                'Another-Macro-F1'
        ]:
            raise ValueError(f'Invalid metric: {metric}')

    return MetricCollection(metrics)
Ejemplo n.º 17
0
 def __init__(self, config=None, pretrained_word_embedding=None):
     super(LSTUR, self).__init__()
     self.config = config
     self.news_encoder = NewsEncoder(config, pretrained_word_embedding)
     self.user_encoder = UserEncoder(config)
     assert int(config.num_filters * 0.5) == config.num_filters * 0.5
     self.user_embedding = nn.Embedding(
         config.num_users,
         config.num_filters if config.long_short_term_method == 'ini'
         else int(config.num_filters * 0.5),
         padding_idx=0)
     # val metrics
     self.val_performance_metrics = MetricCollection({
         'val_auc': AUC(),
         'val_mrr': MRR(),
         'val_ndcg@5': NDCG(k=5),
         'val_ndcg@10': NDCG(k=10)
     })
     self.val_sentiment_diversity_metrics_vader = MetricCollection({
         'val_senti_mrr_vader': SentiMRR(),
         'val_senti@5_vader': Senti(k=5),
         'val_senti@10_vader': Senti(k=10)
     })
     self.val_sentiment_diversity_metrics_bert = MetricCollection({
         'val_senti_mrr_bert': SentiMRR(),
         'val_senti@5_bert': Senti(k=5),
         'val_senti@10_bert': Senti(k=10)
     })
     # test metrics
     self.test_performance_metrics = MetricCollection({
         'test_auc': AUC(),
         'test_mrr': MRR(),
         'test_ndcg@5': NDCG(k=5),
         'test_ndcg@10': NDCG(k=10)
     })
     self.test_sentiment_diversity_metrics_vader = MetricCollection({
         'test_senti_mrr_vader': SentiMRR(),
         'test_senti@5_vader': Senti(k=5),
         'test_senti@10_vader': Senti(k=10)
     })
     self.test_sentiment_diversity_metrics_bert = MetricCollection({
         'test_senti_mrr_bert': SentiMRR(),
         'test_senti@5_bert': Senti(k=5),
         'test_senti@10_bert': Senti(k=10)
     })
     self.test_topic_diversity_metrics = MetricCollection({
         'test_topic_mrr': TopicMRR(),
         'test_topic_div@5': Topic(k=5),
         'test_topic_div@10': Topic(k=10)
     })
     self.test_ils_senti_metrics_vader = MetricCollection({
         'test_ils_senti@5_vader': ILS_Senti(k=5),
         'test_ils_senti@10_vader': ILS_Senti(k=10) 
     })
     self.test_ils_senti_metrics_bert = MetricCollection({
         'test_ils_senti@5_bert': ILS_Senti(k=5),
         'test_ils_senti@10_bert': ILS_Senti(k=10) 
     })  
     self.test_ils_topic_metrics = MetricCollection({
         'test_ils_topic@5': ILS_Topic(k=5),
         'test_ils_topic@10': ILS_Topic(k=10) 
     })
Ejemplo n.º 18
0
class ImageRegression(BaseModel):
    """
    Model for image regression.
    This is a configurable class composed by a backbone (see solarnet.models.backbone.py) and
    a regressor head (actually a classifier with 1 output).
    It is also a LightningModule and nn.Module.
    """
    def __init__(
        self,
        n_channel: int = 1,
        learning_rate: float = 1e-4,
        backbone: Union[str, nn.Module] = "simple-cnn",
        backbone_output_size: int = 0,
        n_hidden: int = 512,
        dropout: float = 0.2,
        loss_fn: str = "mse",
        lr_scheduler: bool = False,
        lr_scheduler_warmup_steps: int = 100,
        lr_scheduler_total_steps: int = 0,
        **kwargs,
    ):
        super().__init__()

        self.save_hyperparameters()

        if isinstance(backbone, str):
            self.backbone, backbone_output_size = get_backbone(
                backbone, channels=n_channel, dropout=dropout, **kwargs)

        self.regressor = Classifier(backbone_output_size, 1, n_hidden, dropout)

        if loss_fn == "mse":
            self.loss_fn = nn.MSELoss()
        elif loss_fn == "mae":
            self.loss_fn = nn.L1Loss()  # MAE
        else:
            raise RuntimeError("Undefined loss function")

        self.test_metrics = MetricCollection([
            MeanAbsoluteError(),
            MeanSquaredError(),
        ])

    @property
    def backbone_name(self) -> str:
        if isinstance(self.hparams.backbone, str):
            return self.hparams.backbone
        else:
            return type(self.hparams.backbone).__name__

    @property
    def output_size(self) -> int:
        return 1

    @auto_move_data
    def forward(self, image):
        return self.regressor(self.backbone(image))

    def training_step(self, batch, batch_id):
        return self.step(batch, step_type="train")

    def validation_step(self, batch, batch_id):
        return self.step(batch, step_type="val")

    def step(self, batch, step_type: str):
        image, y = batch
        y_pred = self(image)
        y_pred = torch.flatten(y_pred)
        y = y.float()
        loss = self.loss_fn(y_pred, y)

        self.log(f"{step_type}_loss", loss, prog_bar=True, sync_dist=True)

        return loss

    def test_step(self, batch, batch_idx):
        image, y = batch
        y_pred = self(image)
        y_pred = torch.flatten(y_pred)
        y = log_min_max_inverse_scale(y)
        y_pred = log_min_max_inverse_scale(y_pred)

        self.test_metrics(y_pred, y)

    def test_epoch_end(self, outs):
        test_metrics = self.test_metrics.compute()
        self.log("test_mae", test_metrics["MeanAbsoluteError"])
        self.log("test_mse", test_metrics["MeanSquaredError"])

    def configure_optimizers(self):
        logger.info(f"configure_optimizers lr={self.hparams.learning_rate}")

        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, self.parameters()),
            lr=self.hparams.learning_rate,
        )

        if not self.hparams.lr_scheduler:
            return optimizer

        scheduler = optim.lr_scheduler.LambdaLR(
            optimizer,
            linear_warmup_decay(self.hparams.lr_scheduler_warmup_steps,
                                self.hparams.lr_scheduler_total_steps,
                                cosine=True),
        )

        return ({
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "interval": "step",
                "frequency": 1,
            },
        }, )
Ejemplo n.º 19
0
class TrainingModule(pl.LightningModule):
    def __init__(self, tagger: LstmTagger):
        super().__init__()
        self.tagger = tagger

        self.train_metrics = MetricCollection([Precision(), Recall(), TopkAccuracy(1)])
        self.val_metrics = MetricCollection([Precision(), Recall(), TopkAccuracy(1)])

        self.softmax = torch.nn.Softmax(dim=-1)
        self.celoss = SequenceCrossEntropyLoss(reduction="batch-mean", pad_idx=2)

    def training_step(self, batch, batch_idx):
        reports, target, masks = batch
        mask = torch.cat(masks, dim=1)

        if self.tagger.with_crf:
            emissions = torch.cat([self.tagger.calc_emissions(report, mask) for report, mask in zip(reports, masks)],
                                  dim=1)
            loss = -self.tagger.crf(emissions, target, mask)
        else:
            scores = self.tagger.forward(reports, masks)
            loss = self.celoss(scores, target)

        with torch.no_grad():
            scores = self.tagger.forward(reports, masks)
            preds = scores.argmax(dim=-1)

        scores = self.softmax(scores)
        self.train_metrics.update(preds, target, mask, scores=scores)

        self.log("train_loss", loss)

        return loss

    def validation_step(self, batch, *args):
        reports, target, masks = batch
        mask = torch.cat(masks, dim=1)
        if self.tagger.with_crf:
            emissions = torch.cat([self.tagger.calc_emissions(report, mask) for report, mask in zip(reports, masks)],
                                  dim=1)
            loss = -self.tagger.crf(emissions, target, mask)
        else:
            scores = self.tagger.forward(reports, masks)
            loss = self.celoss(scores, target)

        with torch.no_grad():
            scores = self.tagger.forward(reports, masks)
            preds = scores.argmax(dim=-1)

        scores = self.softmax(scores)
        self.val_metrics.update(preds, target, mask, scores=scores)

        return loss

    def validation_epoch_end(self, outputs: List[Any]) -> None:
        super().validation_epoch_end(outputs)
        self.log("val_metrics", self.val_metrics.compute())
        print(self.val_metrics.compute())
        self.val_metrics.reset()

    def training_epoch_end(self, outputs: List[Any]) -> None:
        super().training_epoch_end(outputs)
        self.log("train_metrics", self.train_metrics.compute())
        self.train_metrics.reset()

    def configure_optimizers(self):
        return Adam(self.parameters(), lr=1e-4, weight_decay=1e-5)
Ejemplo n.º 20
0
 def __init__(self, config=None, pretrained_word_embedding=None):
     super(SENTIREC, self).__init__()
     self.config = config
     news_encoder = NewsEncoder(config, pretrained_word_embedding)
     self.news_encoder = TimeDistributed(news_encoder, batch_first=True)
     self.user_encoder = UserEncoder(config)
     self.sentiment_predictor = nn.Linear(config.word_embedding_dim, 1)
     # val metrics
     self.val_performance_metrics = MetricCollection({
         'val_auc':
         AUC(),
         'val_mrr':
         MRR(),
         'val_ndcg@5':
         NDCG(k=5),
         'val_ndcg@10':
         NDCG(k=10)
     })
     self.val_sentiment_diversity_metrics_vader = MetricCollection({
         'val_senti_mrr_vader':
         SentiMRR(),
         'val_senti@5_vader':
         Senti(k=5),
         'val_senti@10_vader':
         Senti(k=10)
     })
     self.val_sentiment_diversity_metrics_bert = MetricCollection({
         'val_senti_mrr_bert':
         SentiMRR(),
         'val_senti@5_bert':
         Senti(k=5),
         'val_senti@10_bert':
         Senti(k=10)
     })
     # test metrics
     self.test_performance_metrics = MetricCollection({
         'test_auc':
         AUC(),
         'test_mrr':
         MRR(),
         'test_ndcg@5':
         NDCG(k=5),
         'test_ndcg@10':
         NDCG(k=10)
     })
     self.test_sentiment_diversity_metrics_vader = MetricCollection({
         'test_senti_mrr_vader':
         SentiMRR(),
         'test_senti@5_vader':
         Senti(k=5),
         'test_senti@10_vader':
         Senti(k=10)
     })
     self.test_sentiment_diversity_metrics_bert = MetricCollection({
         'test_senti_mrr_bert':
         SentiMRR(),
         'test_senti@5_bert':
         Senti(k=5),
         'test_senti@10_bert':
         Senti(k=10)
     })
     self.test_topic_diversity_metrics = MetricCollection({
         'test_topic_mrr':
         TopicMRR(),
         'test_topic_div@5':
         Topic(k=5),
         'test_topic_div@10':
         Topic(k=10)
     })
     self.test_ils_senti_metrics_vader = MetricCollection({
         'test_ils_senti@5_vader':
         ILS_Senti(k=5),
         'test_ils_senti@10_vader':
         ILS_Senti(k=10)
     })
     self.test_ils_senti_metrics_bert = MetricCollection({
         'test_ils_senti@5_bert':
         ILS_Senti(k=5),
         'test_ils_senti@10_bert':
         ILS_Senti(k=10)
     })
     self.test_ils_topic_metrics = MetricCollection({
         'test_ils_topic@5':
         ILS_Topic(k=5),
         'test_ils_topic@10':
         ILS_Topic(k=10)
     })
Ejemplo n.º 21
0
        Returns a copy of the input model.
        Note: the model should have been pruned for this method to work to create buffer masks and what not.
    """
    new_model = create_model(model.__class__, device)
    source_params = dict(model.named_parameters())
    source_buffer = dict(model.named_buffers())
    for name, param in new_model.named_parameters():
        param.data.copy_(source_params[name].data)
    for name, buffer_ in new_model.named_buffers():
        buffer_.data.copy_(source_buffer[name].data)
    return new_model


metrics = MetricCollection([
    Accuracy(),
    Precision(),
    Recall(),
    F1(),
])


def train(model: nn.Module,
          train_dataloader: DataLoader,
          lr: float = 1e-3,
          device: str = 'cuda:0',
          fast_dev_run=False,
          verbose=True) -> Dict[str, torch.Tensor]:

    optimizer = torch.optim.Adam(lr=lr, params=model.parameters())
    loss_fn = nn.CrossEntropyLoss()
    num_batch = len(train_dataloader)
    global metrics
Ejemplo n.º 22
0
class ClassifierBackBone(pl.LightningModule):
    def __init__(self):
        super(ClassifierBackBone, self).__init__()
        self.back_bone = nn.Sequential(nn.Conv2d(3, 32, (7, 7), stride=(2, 2)),
                                       ResidualBlock(32, 32),
                                       ResidualBlock(32, 64),
                                       ResidualBottleneck(64, 2),
                                       ResidualBlock(64, 64),
                                       ResidualBlock(64, 128),
                                       ResidualBottleneck(128, 2),
                                       ResidualBlock(128, 128),
                                       ResidualBlock(128, 256),
                                       ResidualBottleneck(256, 2),
                                       ResidualBlock(256, 256), nn.Flatten(),
                                       nn.Linear(256 * 25 * 25, 1),
                                       nn.Sigmoid())
        self.criterion = torch.nn.BCELoss()
        self.train_metrics = MetricCollection({
            'train_accuracy':
            Accuracy(compute_on_step=False),
            'train_precision':
            Precision(compute_on_step=False),
            'train_recall':
            Recall(compute_on_step=False),
        })
        self.val_metrics = MetricCollection({
            'val_accuracy':
            Accuracy(compute_on_step=False),
            'val_precision':
            Precision(compute_on_step=False),
            'val_recall':
            Recall(compute_on_step=False)
        })

    def forward(self, x):
        return self.back_bone(x)

    def configure_optimizers(self):
        optimizer_func = torch.optim.Adam(self.parameters(), lr=1e-2)
        return optimizer_func

    def training_step(self, train_batch, batch_idx):
        inp, label = train_batch
        out = self.back_bone(inp)
        loss = self.criterion(out, label)
        out = torch.round(out).to(int).to('cpu')
        label = label.to(int).to('cpu')
        self.train_metrics(out, label)
        self.log("train_loss", loss.item())
        return loss

    def validation_step(self, val_batch, batch_idx):
        inp, label = val_batch
        out = self.back_bone(inp)
        loss = self.criterion(out, label)
        out = torch.round(out).to(int).to('cpu')
        label = label.to(int).to('cpu')
        self.val_metrics(out, label)
        self.log("val_loss", loss.item(), prog_bar=True)
        return loss

    def on_train_epoch_end(self, outputs):
        metrics = self.train_metrics.compute()
        self.logger.experiment.add_scalars('Train', metrics)

    def on_validation_epoch_end(self):
        metrics = self.val_metrics.compute()
        self.logger.experiment.add_scalars('Validation', metrics)
Ejemplo n.º 23
0
        if method_input is not None:
            getattr(tracker, method)(*method_input)
        else:
            getattr(tracker, method)()


@pytest.mark.parametrize(
    "base_metric, metric_input, maximize",
    [
        (Accuracy(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
        (Precision(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
        (Recall(num_classes=10), (torch.randint(10, (50,)), torch.randint(10, (50,))), True),
        (MeanSquaredError(), (torch.randn(50), torch.randn(50)), False),
        (MeanAbsoluteError(), (torch.randn(50), torch.randn(50)), False),
        (
            MetricCollection([Accuracy(num_classes=10), Precision(num_classes=10), Recall(num_classes=10)]),
            (torch.randint(10, (50,)), torch.randint(10, (50,))),
            True,
        ),
        (
            MetricCollection([Accuracy(num_classes=10), Precision(num_classes=10), Recall(num_classes=10)]),
            (torch.randint(10, (50,)), torch.randint(10, (50,))),
            [True, True, True],
        ),
        (MetricCollection([MeanSquaredError(), MeanAbsoluteError()]), (torch.randn(50), torch.randn(50)), False),
        (
            MetricCollection([MeanSquaredError(), MeanAbsoluteError()]),
            (torch.randn(50), torch.randn(50)),
            [False, False],
        ),
    ],
Ejemplo n.º 24
0
 def __init__(self):
     super().__init__()
     self.metric = MetricCollection([SumMetric(), DiffMetric()])
     self.sum = 0.0
     self.diff = 0.0
Ejemplo n.º 25
0
class DeepAnalyze(pl.LightningModule):
    def __init__(self, feature_size, lstm_hidden_size, lstm_num_layers, n_tags,
                 max_len):
        super().__init__()
        self.padding = 0
        self.bi_listm = nn.LSTM(feature_size,
                                lstm_hidden_size,
                                num_layers=lstm_num_layers,
                                bidirectional=True)
        self.attention = DeepAnalyzeAttention(lstm_hidden_size * 2, n_tags,
                                              max_len)
        self.crf = CRF(n_tags)
        self.lstm_dropout = nn.Dropout(0.25)

        self.train_metrics = MetricCollection(
            [Precision(), Recall(), TopkAccuracy(3)])
        self.val_metrics = MetricCollection(
            [Precision(), Recall(), TopkAccuracy(3)])

    def forward(self, inputs, mask):
        seq_len, batch_size = mask.shape

        x, _ = self.bi_listm(inputs)
        x = self.lstm_dropout(x)
        x = self.attention(x, mask)
        preds = self.crf.decode(x, mask)

        preds = [pred + [0] * (seq_len - len(pred)) for pred in preds]
        preds = torch.tensor(preds).transpose(0, 1).to(inputs.device)

        return preds

    def training_step(self, batch, batch_idx):
        inputs, labels, mask = batch
        x, _ = self.bi_listm(inputs)
        x = self.lstm_dropout(x)
        emissions = self.attention(x, mask)

        loss = -self.crf(emissions, labels, mask)

        with torch.no_grad():
            preds = self.forward(inputs, mask)

        self.train_metrics.update(preds,
                                  labels,
                                  mask,
                                  scores=get_label_scores(
                                      self.crf, emissions, preds, mask))

        self.log("train_loss", loss)

        return loss

    def validation_step(self, batch, *args):
        inputs, labels, mask = batch
        x, _ = self.bi_listm(inputs)
        x = self.lstm_dropout(x)
        emissions = self.attention(x, mask)
        loss = -self.crf(emissions, labels, mask)
        with torch.no_grad():
            preds = self.forward(inputs, mask)

        self.val_metrics.update(preds,
                                labels,
                                mask,
                                scores=get_label_scores(
                                    self.crf, emissions, preds, mask))
        return loss

    def validation_epoch_end(self, outputs: List[Any]) -> None:
        super().validation_epoch_end(outputs)
        self.log("val_metrics", self.val_metrics.compute())
        self.val_metrics.reset()

    def training_epoch_end(self, outputs: List[Any]) -> None:
        super().training_epoch_end(outputs)
        self.log("train_metrics", self.train_metrics.compute())
        self.train_metrics.reset()

    def configure_optimizers(self):
        return Adam(self.parameters(), lr=1e-3)
Ejemplo n.º 26
0
@pytest.mark.parametrize(
    "base_metric, metric_input, maximize",
    [
        (Accuracy(num_classes=10),
         (torch.randint(10, (50, )), torch.randint(10, (50, ))), True),
        (Precision(num_classes=10),
         (torch.randint(10, (50, )), torch.randint(10, (50, ))), True),
        (Recall(num_classes=10),
         (torch.randint(10, (50, )), torch.randint(10, (50, ))), True),
        (MeanSquaredError(), (torch.randn(50), torch.randn(50)), False),
        (MeanAbsoluteError(), (torch.randn(50), torch.randn(50)), False),
        (
            MetricCollection([
                Accuracy(num_classes=10),
                Precision(num_classes=10),
                Recall(num_classes=10)
            ]),
            (torch.randint(10, (50, )), torch.randint(10, (50, ))),
            True,
        ),
        (
            MetricCollection([
                Accuracy(num_classes=10),
                Precision(num_classes=10),
                Recall(num_classes=10)
            ]),
            (torch.randint(10, (50, )), torch.randint(10, (50, ))),
            [True, True, True],
        ),
        (MetricCollection([MeanSquaredError(),
Ejemplo n.º 27
0
def get_metrics(metric_threshold, monitor_metrics, num_classes):
    """Map monitor metrics to the corresponding classes defined in `torchmetrics.Metric`
    (https://torchmetrics.readthedocs.io/en/latest/references/modules.html).

    Args:
        metric_threshold (float): Threshold to monitor for metrics.
        monitor_metrics (list): Metrics to monitor while validating.
        num_classes (int): Total number of classes.

    Raises:
        ValueError: The metric is invalid if:
            (1) It is not one of 'P@k', 'R@k', 'RP@k', 'nDCG@k', 'Micro-Precision',
                'Micro-Recall', 'Micro-F1', 'Macro-F1', 'Another-Macro-F1', or a
                `torchmetrics.Metric`.
            (2) Metric@k: k is greater than `num_classes`.

    Returns:
        torchmetrics.MetricCollection: A collections of `torchmetrics.Metric` for evaluation.
    """
    if monitor_metrics is None:
        monitor_metrics = []

    metrics = dict()
    for metric in monitor_metrics:
        if isinstance(metric, Metric):  # customized metric
            metrics[type(metric).__name__] = metric
            continue

        match_top_k = re.match(r'\b(P|R|RP|nDCG)\b@(\d+)', metric)
        match_metric = re.match(r'\b(Micro|Macro)\b-\b(Precision|Recall|F1)\b',
                                metric)

        if match_top_k:
            metric_abbr = match_top_k.group(1)  # P, R, PR, or nDCG
            top_k = int(match_top_k.group(2))
            if top_k >= num_classes:
                raise ValueError(
                    f'Invalid metric: {metric}. {top_k} is greater than {num_classes}.'
                )
            if metric_abbr == 'P':
                metrics[metric] = Precision(num_classes,
                                            average='samples',
                                            top_k=top_k)
            elif metric_abbr == 'R':
                metrics[metric] = Recall(num_classes,
                                         average='samples',
                                         top_k=top_k)
            elif metric_abbr == 'RP':
                metrics[metric] = RPrecision(top_k=top_k)
            elif metric_abbr == 'nDCG':
                metrics[metric] = NDCG(top_k=top_k)
                # The implementation in torchmetrics stores the prediction/target of all batches,
                # which can lead to CUDA out of memory.
                # metrics[metric] = RetrievalNormalizedDCG(k=top_k)
        elif metric == 'Another-Macro-F1':
            metrics[metric] = MacroF1(num_classes,
                                      metric_threshold,
                                      another_macro_f1=True)
        elif metric == 'Macro-F1':
            metrics[metric] = MacroF1(num_classes, metric_threshold)
        elif match_metric:
            average_type = match_metric.group(1).lower()  # Micro
            metric_type = match_metric.group(2)  # Precision, Recall, or F1
            metrics[metric] = getattr(torchmetrics.classification,
                                      metric_type)(num_classes,
                                                   metric_threshold,
                                                   average=average_type)
        else:
            raise ValueError(
                f'Invalid metric: {metric}. Make sure the metric is in the right format: Macro/Micro-Precision/Recall/F1 (ex. Micro-F1)'
            )

    return MetricCollection(metrics)
Ejemplo n.º 28
0
def main(csv_file, data_path):
    gpu = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    data_set = CSVDataset(csv_file, data_path)
    tb_logger = TensorBoardLogger(save_dir='./logs/')

    data_len = len(data_set)
    train_len, val_len = int(0.6 * data_len), int(0.2 * data_len)
    test_len = data_len - (train_len + val_len)

    train_set, val_set, test_set = random_split(
        data_set, (train_len, val_len, test_len)
    )

    train_loader = DataLoader(train_set, batch_size=16, shuffle=True)
    val_loader = DataLoader(val_set, batch_size=16)
    test_loader = DataLoader(test_set, batch_size=16)

    model = ClassifierBackBone().to(gpu)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
    criterion = torch.nn.BCELoss()

    writer = tensorboard.SummaryWriter('./logs')
    writer.add_graph(model, input_to_model=torch.randn(1, 3, 400, 400))

    train_collection = MetricCollection([Accuracy(compute_on_step=False)])
    val_collection = MetricCollection([Accuracy(compute_on_step=False)])

    for ep in range(1, 1000):
        loss_val = 0
        with tqdm.tqdm(train_loader, unit="batch") as train_epoch:
            for idx, (inp, label) in enumerate(train_epoch):
                train_epoch.set_description(f'Train: {ep}')
                inp, label = inp.to(gpu), label.to(gpu)
                optimizer.zero_grad()
                out = model(inp)
                loss = criterion(out, label)
                loss.backward()
                optimizer.step()
                loss_val += loss.item()
                out, label = torch.round(out).to(int).to('cpu'), label.to(int).to('cpu')
                train_collection(out, label)
                train_epoch.set_postfix(loss=loss_val / idx)
            writer.add_scalars('Training', train_collection.compute(), ep)
            train_collection.reset()

        val_loss_val = 0
        with tqdm.tqdm(val_loader, unit="batch") as val_epoch:
            for idx, (inp, label) in enumerate(val_epoch):
                with torch.no_grad():
                    inp, label = inp.to(gpu), label.to(gpu)
                    out = model(inp)
                    loss = criterion(out, label)
                    val_loss_val += loss.item()
                    out, label = torch.round(out).to(int).to('cpu'), label.to(int).to('cpu')
                    val_collection(out, label)
                    train_epoch.set_postfix(loss=loss_val / idx)
        writer.add_scalars('Validation', val_collection.compute(), ep)
        val_collection.reset()
        writer.add_scalars('Loss', {
            'training': loss_val / len(train_loader),
            'validation': val_loss_val / len(val_loader)
        }, ep)
        break
Ejemplo n.º 29
0
class Ranker(LightningModule):
    """Base class for rankers. Implements AP, RR and nDCG for validation and testing.
    This class needs to be extended and the following methods must be implemented:
        * forward
        * configure_optimizers (alternatively, this can be implemented in the data module)
    """
    def __init__(
        self,
        training_mode: TrainingMode = TrainingMode.POINTWISE,
        pairwise_loss_margin: float = 1.0,
    ) -> None:
        """Constructor.

        Args:
            training_mode (TrainingMode, optional): How to train the model. Defaults to TrainingMode.POINTWISE.
            pairwise_loss_margin (float, optional): Margin used in pairwise loss. Defaults to 1.0.
        """
        super().__init__()
        self.training_mode = training_mode
        self.pairwise_loss_margin = pairwise_loss_margin
        self.bce = torch.nn.BCEWithLogitsLoss()

        metrics = [RetrievalMAP, RetrievalMRR, RetrievalNormalizedDCG]
        self.val_metrics = MetricCollection(
            [M(compute_on_step=False) for M in metrics],
            prefix="val_",
        )
        self.test_metrics = MetricCollection(
            [M(compute_on_step=False) for M in metrics],
            prefix="test_",
        )

    def training_step(
        self,
        batch: Union[PointwiseTrainingBatch, PairwiseTrainingBatch],
        batch_idx: int,
    ) -> torch.Tensor:
        """Train a single batch.

        Args:
            batch (Union[PointwiseTrainingBatch, PairwiseTrainingBatch]): A training batch.
            batch_idx (int): Batch index.

        Returns:
            torch.Tensor: Training loss.
        """
        if self.training_mode == TrainingMode.POINTWISE:
            model_batch, labels, _ = batch
            loss = self.bce(self(model_batch).flatten(), labels.flatten())
        elif self.training_mode == TrainingMode.PAIRWISE:
            pos_model_batch, neg_model_batch, _ = batch
            pos_outputs = torch.sigmoid(self(pos_model_batch))
            neg_outputs = torch.sigmoid(self(neg_model_batch))
            loss = torch.mean(
                torch.clamp(self.pairwise_loss_margin - pos_outputs +
                            neg_outputs,
                            min=0))

        self.log("train_loss", loss)
        return loss

    def validation_step(self, batch: ValTestBatch,
                        batch_idx: int) -> Dict[str, torch.Tensor]:
        """Process a validation batch. The returned query IDs are internal IDs.

        Args:
            batch (ValTestBatch): A validation batch.
            batch_idx (int): Batch index.

        Returns:
            Dict[str, torch.Tensor]: Query IDs, scores and labels.
        """
        model_batch, q_ids, labels = batch
        return {
            "q_ids": q_ids,
            "scores": self(model_batch).flatten(),
            "labels": labels
        }

    def validation_step_end(self, step_results: Dict[str,
                                                     torch.Tensor]) -> None:
        """Update the validation metrics.

        Args:
            step_results (Dict[str, torch.Tensor]): Results from a validation step.
        """
        self.val_metrics(
            step_results["scores"],
            step_results["labels"],
            indexes=step_results["q_ids"],
        )

    def validation_epoch_end(
            self, val_results: Iterable[Dict[str, torch.Tensor]]) -> None:
        """Compute validation metrics.

        Args:
            val_results (Iterable[Dict[str, torch.Tensor]]): Results of the validation steps.
        """
        for metric, value in self.val_metrics.compute().items():
            self.log(metric, value, sync_dist=True)
        self.val_metrics.reset()

    def test_step(self, batch: ValTestBatch,
                  batch_idx: int) -> Dict[str, torch.Tensor]:
        """Process a test batch. The returned query IDs are internal IDs.

        Args:
            batch (ValTestBatch): A validation batch.
            batch_idx (int): Batch index.

        Returns:
            Dict[str, torch.Tensor]: Query IDs, scores and labels.
        """
        model_batch, q_ids, labels = batch
        return {
            "q_ids": q_ids,
            "scores": self(model_batch).flatten(),
            "labels": labels
        }

    def test_step_end(self, step_results: Dict[str, torch.Tensor]) -> None:
        """Update the test metrics.

        Args:
            step_results (Dict[str, torch.Tensor]): Results from a test step.
        """
        self.test_metrics(
            step_results["scores"],
            step_results["labels"],
            indexes=step_results["q_ids"],
        )

    def test_epoch_end(
            self, test_results: Iterable[Dict[str, torch.Tensor]]) -> None:
        """Compute test metrics.

        Args:
            test_results (Iterable[Dict[str, torch.Tensor]]): Results of the test steps.
        """
        for metric, value in self.test_metrics.compute().items():
            self.log(metric, value, sync_dist=True)
        self.test_metrics.reset()

    def predict_step(self, batch: PredictionBatch,
                     batch_idx: int) -> Dict[str, torch.Tensor]:
        """Compute scores for a prediction batch.

        Args:
            batch (PredictionBatch): Inputs.
            batch_idx (int): Batch index.
            dataloader_idx (int): DataLoader index.

        Returns:
            Dict[str, torch.Tensor]: Indices and scores.
        """
        indices, model_inputs = batch
        return {"indices": indices, "scores": self(model_inputs).flatten()}
Ejemplo n.º 30
0
class ImageClassification(BaseModel):
    """
    Model for image classification.
    This is a configurable class composed by a backbone (see solarnet.models.backbone.py) and
    a classifier.
    It is also a LightningModule and nn.Module.
    """
    def __init__(
        self,
        n_channel: int = 1,
        n_class: int = 2,
        learning_rate: float = 1e-4,
        class_weight: List[float] = None,
        backbone: Union[str, nn.Module] = "simple-cnn",
        backbone_output_size: int = 0,
        n_hidden: int = 512,
        dropout: float = 0.2,
        lr_scheduler: bool = False,
        lr_scheduler_warmup_steps: int = 100,
        lr_scheduler_total_steps: int = 0,
        **kwargs,
    ):
        super().__init__()

        self.save_hyperparameters()

        if isinstance(backbone, str):
            self.backbone, backbone_output_size = get_backbone(
                backbone,
                channels=n_channel,
                dropout=dropout,
                **kwargs,
            )

        self.classifier = Classifier(backbone_output_size, n_class, n_hidden,
                                     dropout)

        if class_weight is not None:
            class_weight = torch.tensor(class_weight, dtype=torch.float)
        self.loss_fn = nn.CrossEntropyLoss(weight=class_weight)

        self.train_accuracy = Accuracy()
        self.val_accuracy = Accuracy()
        self.test_metrics = MetricCollection([
            Accuracy(),
            F1(num_classes=self.hparams.n_class, average="macro"),
            Recall(num_classes=self.hparams.n_class,
                   average="macro"),  # balanced acc.
            StatScores(
                num_classes=self.hparams.n_class
                if self.hparams.n_class > 2 else 1,
                reduce="micro",
                multiclass=self.hparams.n_class > 2,
            ),
        ])

    @property
    def backbone_name(self) -> str:
        if isinstance(self.hparams.backbone, str):
            return self.hparams.backbone
        else:
            return type(self.hparams.backbone).__name__

    @property
    def output_size(self) -> int:
        return self.hparams.n_class

    @auto_move_data
    def forward(self, image):
        return self.classifier(self.backbone(image))

    def predict_step(self, batch, batch_idx: int, dataloader_idx: int = None):
        image, _ = batch
        return self(image)

    def training_step(self, batch, batch_id):
        return self.step(batch, step_type="train")

    def validation_step(self, batch, batch_id):
        return self.step(batch, step_type="val")

    def step(self, batch, step_type: str):
        image, y = batch
        y_pred = self(image)
        loss = self.loss_fn(y_pred, y)

        self.log(f"{step_type}_loss", loss, prog_bar=True, sync_dist=True)

        # Compute accuracy
        y_pred = F.softmax(y_pred, dim=1)
        self.__getattr__(f"{step_type}_accuracy")(y_pred, y)
        self.log(f"{step_type}_accuracy",
                 self.__getattr__(f"{step_type}_accuracy"),
                 on_step=False,
                 on_epoch=True)

        return loss

    def test_step(self, batch, batch_idx):
        image, y = batch
        y_pred = self(image)
        y_pred = F.softmax(y_pred, dim=1)

        self.test_metrics(y_pred, y)

    def test_epoch_end(self, outs):
        test_metrics = self.test_metrics.compute()

        tp, fp, tn, fn, _ = test_metrics.pop("StatScores")
        self.log("test_tp", tp)
        self.log("test_fp", fp)
        self.log("test_tn", tn)
        self.log("test_fn", fn)

        for key, value in test_metrics.items():
            self.log(f"test_{key.lower()}", value)

    def configure_optimizers(self):
        logger.info(f"configure_optimizers lr={self.hparams.learning_rate}")

        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, self.parameters()),
            lr=self.hparams.learning_rate,
        )

        if not self.hparams.lr_scheduler:
            return optimizer

        scheduler = optim.lr_scheduler.LambdaLR(
            optimizer,
            linear_warmup_decay(self.hparams.lr_scheduler_warmup_steps,
                                self.hparams.lr_scheduler_total_steps,
                                cosine=True),
        )

        return ({
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "interval": "step",
                "frequency": 1,
            },
        }, )