Esempio n. 1
0
 def prepare(self):
     (self.train_transforms,
      self.infer_transforms) = self._build_transforms(
          **self.config.transforms)
     (
         self._train_dataloader,
         self._val_dataloader,
         self._test_dataloader,
     ) = self._build_dataloaders(**self.config.data)
     num_slots = len(
         self.train_transforms["utterance&slots"][0].vocab.idx.keys())
     num_intents = len(self.train_transforms["intent"][0].vocab.idx.keys())
     add_feat_len = 0
     if self.config.model.use_intent:
         add_feat_len += num_intents
     self.joint_model = build_intent_joint_model(
         **self.config.model,
         num_slots=num_slots,
         num_intents=num_intents,
         vocab=self.train_transforms["utterance"][1].vocab,
         add_feat_len=add_feat_len,
     )
     self.slot_accuracy_metric = Accuracy(num_classes=num_slots)
     self.doc_accuracy_metric = Accuracy(num_classes=num_intents)
     self.precision_metric = AveragePrecision()
     self.slot_f1_metric = F1(num_classes=num_slots)
     self.doc_f1_metric = F1(num_classes=num_intents)
     self.optimizer = FairSeqAdam(self.joint_model.parameters(),
                                  **self.config.optimizer)
Esempio n. 2
0
 def __init__(self, model, criterion, config: dict, loaders: dict):
     super().__init__()
     self.model = model
     self.criterion = criterion
     self.config = config
     self.loaders = loaders
     self.metrics = {'acc': Accuracy(), 'f1': F1()}
Esempio n. 3
0
def test_f1(num_classes):
    f1 = F1(num_classes=num_classes)
    assert f1.name == 'f1'

    score = f1(pred=torch.tensor([[0, 1, 1], [1, 0, 1]]),
               target=torch.tensor([[0, 0, 1], [1, 0, 1]]))
    assert isinstance(score, torch.Tensor)
Esempio n. 4
0
 def __init__(self):
     super().__init__()
     self.G = gen()
     self.D = disc()
     self.C = clf()
     self.ce_loss = torch.nn.CrossEntropyLoss()
     self.f1 = F1(num_classes=n_labels)
Esempio n. 5
0
    def __init__(self,
                 model: torch.nn.Module,
                 train_data: str,
                 val_data: str,
                 test_data: str,
                 batch_size: int,
                 num_classes: int = 2,
                 num_workers: int = 1):
        super().__init__()
        self.num_workers = num_workers
        self.train_data = train_data
        self.val_data = val_data
        self.test_data = test_data
        self.batch_size = batch_size

        self.model = model

        self.criterion = torch.nn.BCELoss()

        self.collate_fn = PaddingCollateFn(150)

        self.train_metrics = []
        self.val_metrics = [
            Accuracy(num_classes=num_classes),
            F1(num_classes=num_classes),
            Precision(num_classes=num_classes),
            Recall(num_classes=num_classes)
        ]
Esempio n. 6
0
 def __init__(self, backbone, feature_indices, feature_channels):
     super().__init__()
     self.model = get_segmentation_model(backbone, feature_indices,
                                         feature_channels)
     self.criterion = BCEWithLogitsLoss()
     self.prec = Precision(num_classes=1, threshold=0.5)
     self.rec = Recall(num_classes=1, threshold=0.5)
     self.f1 = F1(num_classes=1, threshold=0.5)
Esempio n. 7
0
    def __init__(
        self,
        regression_task,
        classification_task,
        input_size,
        hidden_size,
        learning_rate,
        classifier_lambda,
        tanh_loss,
        fill_missing_regression,
        regressor_activation,
        classifier_loss_weights=None,
        **kwargs
    ):
        super().__init__()

        self.save_hyperparameters()

        # Sanity checks
        assert 0 <= classifier_lambda <= 1

        # Layers
        self.hidden_fc = nn.Linear(
            in_features=input_size, out_features=hidden_size, bias=True
        )
        
        if classification_task:
            self.classifier_hidden_fc = nn.Linear(
                in_features=hidden_size,
                out_features=hidden_size,
                bias=True
            )
            self.classifier_fc = nn.Linear(
                in_features=hidden_size,
                out_features=2,  # It's a binary classification
                bias=True,
            )
            self._build_classifier_loss_weigths(classifier_loss_weights)

        if regression_task:
            self.regressor_hidden_fc = nn.Linear(
                in_features=hidden_size,
                out_features=hidden_size,
                bias=True,
            )
            self.regressor_fc = nn.Linear(
                in_features=hidden_size, out_features=1, bias=True  # It's a regression
            )

        # Metrics
        if classification_task:
            self.f1 = F1()
            self.precision= Precision()
            self.recall = Recall()
        if regression_task:
            self.mse = MSE()
Esempio n. 8
0
    def __init__(self, loss, weight=None, num_classes=2, T=0.5):
        super().__init__()
        self.efficient_net = EfficientNet.from_pretrained(
            'efficientnet-b1', num_classes=num_classes)
        in_features = self.efficient_net._fc.in_features
        self.efficient_net._fc = nn.Linear(in_features, num_classes)
        with torch.no_grad():
            if weight is not None:
                self.efficient_net._fc.bias.data = weight
        self.num_classes = num_classes
        self.criterion_labeled = loss
        self.loss = get_mixmatch_loss(criterion_labeled=self.criterion_labeled,
                                      output_transform=nn.Softmax(dim=-1),
                                      K=2,
                                      weight_unlabeled=1.0,
                                      criterion_unlabeled=nn.MSELoss())

        self.train_metric = F1(num_classes=self.num_classes, average="none")
        self.val_metric = F1(num_classes=self.num_classes, average="none")
Esempio n. 9
0
    def __init__(self, hparams: dict):
        super().__init__()
        self.hparams = hparams
        m = hparams['model']
        model = model_zoo[m['arch']]
        self.model = model(num_classes=m['num_classes'])

        # configure metrics
        self.metrics = {
            'acc': partial(utils.accuracy, topk=(1, 3)),
            'f1': F1(num_classes=m['num_classes'])
        }
Esempio n. 10
0
    def __init__(self, loss, weight=None, num_classes=2):
        super().__init__()
        self.efficient_net = EfficientNet.from_pretrained(
            'efficientnet-b1', num_classes=num_classes)
        in_features = self.efficient_net._fc.in_features
        self.efficient_net._fc = nn.Linear(in_features, num_classes)
        with torch.no_grad():
            if weight is not None:
                self.efficient_net._fc.bias.data = weight
        self.num_classes = num_classes
        self.loss = loss

        self.metric = F1(num_classes=self.num_classes, average="none")
        self.train_metric = self.metric.clone()
        self.val_metric = self.metric.clone()
Esempio n. 11
0
 def __init__(self, out_classes: int = 3, lr_bert: float = 1e-5, lr_class: float = 1e-4,
              weight_decay: float = 1e-2, freeze_base: bool = False, train_steps: int = 100):
     super(SentBert, self).__init__()
     self.lr_bert = lr_bert
     self.lr_class = lr_class
     self.weight_decay = weight_decay
     self.train_steps = train_steps
     self.save_hyperparameters()
     self.bert = BertForSequenceClassification.from_pretrained(
         'bert-base-uncased', num_labels=out_classes, return_dict=True)
     if freeze_base:
         for param in self.bert.base_model.parameters():
             param.requires_grad = False
     self.f1 = F1(num_classes=out_classes, average='macro')
     self.train_acc = Accuracy()
     self.val_acc = Accuracy()
    def __init__(self, config):
        super().__init__()
        self.train_config = config

        self.roberta = RobertaForMaskedLM.from_pretrained('roberta-base')
        _ = self.roberta.eval()
        for param in self.roberta.parameters():
            param.requires_grad = False

        self.pred_model = self.roberta.roberta
        self.enc_model = self.pred_model.embeddings.word_embeddings
        self.proj_head = DVProjectionHead_EmbActi()

        self.lossfunc = nn.BCEWithLogitsLoss()

        self.acc = Accuracy(threshold=0.0)
        self.f1 = F1(threshold=0.0)
Esempio n. 13
0
def get_metrics(network, loader, weights, device, name, mode='full'):
    print('Start Evaluation')
    correct = 0
    strict_correct = 0
    total = 0
    strict_total = 0
    weights_offset = 0
    ys = []
    ps = []
    sigmoid = nn.Sigmoid()

    network.eval()
    with torch.no_grad():
        t = tqdm(iter(loader), leave=False, total=len(loader))
        for i, data in enumerate(t):
            x, y = data
            if mode == 'skip' and i >= 100:
                break
            x = x.to(device)
            y = y.to(device)
            p = sigmoid(network.predict(x))
            ys.append(y)
            ps.append(p)
            if weights is None:
                batch_weights = torch.ones(len(x))
            else:
                batch_weights = weights[weights_offset:weights_offset + len(x)]
                weights_offset += len(x)
            batch_weights = batch_weights.to(device)
            strict_correct += ((p.gt(.5) == y).all().float() *
                               batch_weights.reshape((-1, 1))).sum().item()
            correct += ((p.gt(.5) == y).float() * batch_weights.reshape(
                (-1, 1))).sum().item()
            total += p.size(0) * p.size(1)
            strict_total += batch_weights.sum().item()
        ps = torch.cat(ps).to(device)
        ys = torch.cat(ys).to(device)
        eces = get_ece(ps, ys).item()
        # micro_f1 = f1_score(ps.gt(.5).float(), ys, num_classes=None, class_reduction='micro').item()
        # macro_f1 = f1_score(ps.gt(.5).float(), ys, num_classes=None, class_reduction='macro').item()
        aucs = []
        micro_f1 = []
        macro_f1 = []
        for d in range(ps.size(1)):
            micro = F1(num_classes=2, average='micro')
            macro = F1(num_classes=2, average='macro')
            micro_f1.append(
                micro(ps[:, d].gt(.5).cpu().long(), ys[:,
                                                       d].cpu().long()).item())
            macro_f1.append(
                macro(ps[:, d].gt(.5).cpu().long(), ys[:,
                                                       d].cpu().long()).item())
            aucs.append(auroc(ps[:, d], ys[:, d]).item())
    network.train()
    results = {
        f'{name}_acc': correct / total,
        f'{name}_strict_acc': strict_correct / strict_total,
        f'{name}_auc': aucs,
        f'{name}_micro_f1': micro_f1,
        f'{name}_macro_f1': macro_f1,
        f'{name}_eces': eces
    }
    return results
Esempio n. 14
0
 def __init__(self):
     super().__init__()
     self.G = gen()
     self.D = disc()
     self.f1 = F1(num_classes=2)
Esempio n. 15
0
                                  shell=True)
    return tag


if __name__ == '__main__':

    with open("params.yaml", 'r') as fd:
        params = yaml.safe_load(fd)

    MODEL_PATH = params['test']['model_path']

    module = TrainingModule.load_from_checkpoint(MODEL_PATH).to('cuda')
    module.eval()

    acc = Accuracy()
    f1 = F1()
    precision = Precision()
    recall = Recall()

    mlflow.set_tracking_uri('file-plugin:/content/NLP_Emotions/mlruns')
    if get_closest_gittag() == 'v2.0':
        mlflow.set_experiment('SGD')
        mlflow.set_tag('Version', 'SGD')
        mlflow.set_tag('Stage', 'test')
        mlflow.set_tag('Commit', get_commit())
        mlflow.set_tag('Time', get_commit_time())
        mlflow.set_tag('Model', module.model_name)
        mlflow.log_params({
            'batch_size': module.hparams.batch_size,
            'epochs': module.hparams.epochs,
            'learning_rate': module.hparams.lr