def test_v1_5_metric_classif_mix(): ConfusionMatrix.__init__._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): ConfusionMatrix(num_classes=1) FBeta.__init__._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): FBeta(num_classes=1) F1.__init__._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): F1(num_classes=1) HammingDistance.__init__._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): HammingDistance() StatScores.__init__._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): StatScores() target = torch.tensor([1, 1, 0, 0]) preds = torch.tensor([0, 1, 0, 0]) confusion_matrix._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): assert torch.equal( confusion_matrix(preds, target, num_classes=2).float(), torch.tensor([[2.0, 0.0], [1.0, 1.0]])) target = torch.tensor([0, 1, 2, 0, 1, 2]) preds = torch.tensor([0, 2, 1, 0, 0, 1]) fbeta._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): assert torch.allclose(fbeta(preds, target, num_classes=3, beta=0.5), torch.tensor(0.3333), atol=1e-4) f1._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): assert torch.allclose(f1(preds, target, num_classes=3), torch.tensor(0.3333), atol=1e-4) target = torch.tensor([[0, 1], [1, 1]]) preds = torch.tensor([[0, 1], [0, 1]]) hamming_distance._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): assert hamming_distance(preds, target) == torch.tensor(0.25) preds = torch.tensor([1, 0, 2, 1]) target = torch.tensor([1, 1, 2, 0]) stat_scores._warned = False with pytest.deprecated_call(match="It will be removed in v1.5.0"): assert torch.equal(stat_scores(preds, target, reduce="micro"), torch.tensor([2, 2, 6, 2, 4]))
def __init__(self): super().__init__() self.cnn = ConvNet() self.criterion = FocalLoss(alpha=0.5, gamma=2.0, reduction='mean') self.config = load_classifier_config() self.learning_rate = self.config['training']['learning_rates'] self.batch_size = self.config['training']['batch_sizes'] self.accuracy = Accuracy(num_classes=2) self.f1 = F1() self.recall = Recall() self.precision = Precision()
def __init__( self, tasks: list, dm: DataModule, n_c: int, # number of convolutions latent_dim: int, n_final_convolutions: int = 1, params_scarcity: int = 0, gamma: float = 0.98, lr: float = 1e-3, ): super().__init__() self.encoder = TrajsEncoder(n_c=n_c, latent_dim=latent_dim, dm=dm) self.save_hyperparameters("gamma") self.save_hyperparameters("lr") self.save_hyperparameters("tasks") self.save_hyperparameters("latent_dim") self.save_hyperparameters("n_c") self.save_hyperparameters(dm.ds_params) self.save_hyperparameters(dm.graph_info) outputs = self.get_output_modules(tasks, latent_dim, dm.ds_params["dim"], dm.ds_params["RW_types"]) self.out_networks = {} self.losses = {} self.targets = {} for out in outputs: net, target, loss = outputs[out] self.out_networks[out] = net self.targets[out] = target self.losses[out] = loss self.out_networks = nn.ModuleDict(self.out_networks) self.loss_scale = {} self.set_loss_scale() if "alpha" in tasks: self.MAE = MAE() if "model" in tasks: self.F1 = F1(len(dm.ds_params["RW_types"])) if "drift_norm" in tasks: self.EV = EV()
def __init__(self, hparams:dict): super().__init__() # self.model = models.resnet50(pretrained=True) self.model = models.wide_resnet50_2(pretrained=True) # self.model = models.densenet201(pretrained=True) self.model.fc = torch.nn.Linear(self.model.fc.in_features, 2) # self.model.classifier = torch.nn.Linear(self.model.classifier.in_features, 2) self.criterion = torch.nn.CrossEntropyLoss() # self.softmax = torch.nn.Softmax() # hyper param setting self.hparmas = hparams self.init_lr = hparams['optimizer_lr'] print(hparams) self.accuracy = Accuracy() self.prec = Precision(num_classes=1, is_multiclass=False) self.rc = Recall(num_classes=1, is_multiclass=False) self.f1 = F1(num_classes=1, multilabel=False) # self.confmat = ConfusionMatrix(num_classes=1) self.preds = [] self.gts = []
def __init__(self, threshold=0.5, device='cuda') -> None: super(F1Contrastive, self).__init__() self.f1 = F1(num_classes=1, average='weighted', threshold=threshold).to(device)