Exemplo n.º 1
0
 def __init__(self, config):
     super().__init__(config)
     self.fc1 = torch.nn.Linear(config.input_size, config.hidden_size)
     self.relu = torch.nn.ReLU()
     self.fc2 = torch.nn.Linear(config.hidden_size, config.num_classes)
     self.val_acc = metrics.Accuracy()
     self.test_acc = metrics.Accuracy(compute_on_step=False)
Exemplo n.º 2
0
    def __init__(self, sign_loss=1, signer_loss=1, signer_loss_patience=0):
        super().__init__()

        self.sign_loss = sign_loss
        self.signer_loss = signer_loss
        self.signer_loss_patience = signer_loss_patience

        self.metrics = {
            "training": metrics.Accuracy(),
            "validation": metrics.Accuracy(),
            "test": metrics.Accuracy(),
        }
Exemplo n.º 3
0
Arquivo: task.py Projeto: jeanm/text
 def __init__(
     self,
     datamodule: DocClassificationDataModule,
     model: nn.Module,
     optimizer: Optimizer,
 ):
     super().__init__()
     self.text_transform = datamodule.text_transform
     self.label_transform = datamodule.label_transform
     self.model = model
     self.optimizer = optimizer
     self.loss = torch.nn.CrossEntropyLoss()
     self.valid_acc = metrics.Accuracy()
     self.test_acc = metrics.Accuracy()
Exemplo n.º 4
0
    def __init__(self,
                 input_size=INPUT_SIZE,
                 hidden_size=HIDDEN_SIZE,
                 num_layers=NUM_LAYERS,
                 dropout=DROPOUT):
        """
        Parameters
        ----------
        input_size : input size
        hidden_size : number of features in the hidden state in GRU
        num_layers : number of recurrent layers in GRU
        dropout : dropout probability
        """

        super().__init__()

        self.in_size = input_size
        self.accuracy = metrics.Accuracy()

        self.embedding_layer = nn.Embedding(12, 50)
        self.dropout_layer_1 = nn.Dropout(dropout)
        self.linear_layer_1 = nn.Linear(200, 100)
        self.dropout_layer_2 = nn.Dropout(dropout)
        self.linear_layer_2 = nn.Linear(100, 10)
        self.dropout_layer_3 = nn.Dropout(dropout)
        self.gru_layer = nn.GRU(input_size,
                                hidden_size,
                                num_layers=num_layers,
                                batch_first=True,
                                bidirectional=True,
                                dropout=dropout)
        self.__init_gru()
        self.linear_layer_3 = nn.Linear(2 * hidden_size, 5)
Exemplo n.º 5
0
    def validation_step(self, batch: T.List[torch.Tensor], batch_idx: int,
                        dataloader_idx: int):
        queries, labels, *supports = batch
        logits = self(queries, *supports)

        if dataloader_idx not in self.evaluators:
            eval_n_classes = len(supports)
            self.evaluators[f"dl_{dataloader_idx}"] = nn.ModuleDict({
                "accuracy":
                plmc.Accuracy(),
                "precision":
                plmc.Precision(num_classes=eval_n_classes),
                "recall":
                plmc.Recall(num_classes=eval_n_classes),
                "fbeta":
                plmc.FBeta(num_classes=eval_n_classes),
                "f1":
                plmc.F1(num_classes=eval_n_classes),
                "confmat":
                plmc.ConfusionMatrix(num_classes=eval_n_classes)
            }).to(device=self.device)

        evaluators = self.evaluators[f"dl_{dataloader_idx}"]
        for category, evaluator in evaluators.items():
            self.log(f"metrics/{category}", evaluator(logits, labels))
Exemplo n.º 6
0
 def __init__(self,
              net,
              cfg,
              criterion,
              optimizer,
              scheduler=None,
              experiment=None):
     """
     ------------------------------------
     Parameters
     net: torch.nn.Module
         Model
     cfg: DictConfig
         Config
     optimizer: torch.optim
         Optimizer
     scheduler: torch.optim.lr_scheduler
         Learning Rate Scheduler
     experiment: comet_ml.experiment
         Logger(Comet_ML)
     """
     super(CassavaLightningSystem, self).__init__()
     self.net = net
     self.cfg = cfg
     self.experiment = experiment
     self.criterion = criterion
     self.optimizer = optimizer
     self.scheduler = scheduler
     self.best_loss = 1e+9
     self.best_acc = 0
     self.epoch_num = 0
     self.acc_fn = metrics.Accuracy()
Exemplo n.º 7
0
 def compute_result(self, loss, preds, targets, stage):
     if not hasattr(self, "_acc_meter"):
         self._acc_meter = metrics.Accuracy(self.num_classes)
     acc = self.compute_acc(preds, targets)
     result = pl.EvalResult(checkpoint_on=loss)
     result.log(f"{stage}_loss", loss, prog_bar=True)
     result.log(f"{stage}_acc", acc, prog_bar=True)
     return result
Exemplo n.º 8
0
    def __init__(self, args, encoder, output_dim):
        super().__init__()
        self.save_hyperparameters(args)
        self.encoder = encoder

        self.encoder.fc.out_features = output_dim
        self.output_dim = output_dim
        self.model = self.encoder
        self.criterion = self.configure_criterion()

        self.accuracy = metrics.Accuracy()
        self.roc = metrics.ROC(pos_label=1)
        self.average_precision = metrics.AveragePrecision(pos_label=1)
Exemplo n.º 9
0
    def __init__(self, args, encoder, hidden_dim, output_dim):
        super().__init__()

        self.hparams = args
        # self.save_hyperparameters()

        self.encoder = encoder
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.model = nn.Sequential(nn.Linear(self.hidden_dim, self.output_dim))
        self.criterion = self.configure_criterion()

        self.accuracy = metrics.Accuracy()
        self.average_precision = metrics.AveragePrecision(pos_label=1)
Exemplo n.º 10
0
 def __init__(self, W, batch_size, train_dataset):
     super().__init__()
     self._set_hparams({'batch_size': batch_size, 'learning_rate': 5e-3})
     self.train_dataset = train_dataset
     input_dim = self.train_dataset.get_input_dim()
     self.phi = nn.Sequential(
         nn.Linear(input_dim, W),
         nn.ReLU(True),
         nn.Linear(W, W),
         nn.ReLU(True),
         nn.Linear(W, W),
         nn.ReLU(True),
         nn.Linear(W, W),
         nn.ReLU(True),
         nn.Linear(W, W),
         nn.ReLU(True),
         nn.Linear(W, W),
         nn.ReLU(True),
     )
     self.svm = LinearSVC()
     self.loss = nn.BCELoss()
     self.accuracy = metrics.Accuracy()
Exemplo n.º 11
0
    def __init__(self, n_classes, dataloader, split, device, expt_logdir):
        self.dataloader = dataloader
        self.device = device
        accuracy = metrics.Accuracy().to(self.device)
        iou = metrics.IoU(num_classes=n_classes).to(self.device)
        dice = Dice().to(self.device)
        recall = metrics.Recall(num_classes=n_classes,
                                average='macro',
                                mdmc_average='global').to(self.device)
        roc = metrics.ROC(num_classes=n_classes,
                          dist_sync_on_step=True).to(self.device)

        self.eval_metrics = {
            'accuracy': {
                'module': accuracy,
                'values': []
            },
            'iou': {
                'module': iou,
                'values': []
            },
            'dice': {
                'module': dice,
                'values': []
            },
            'sensitivity': {
                'module': recall,
                'values': []
            },
            'auroc': {
                'module': roc,
                'values': []
            }
        }
        self.softmax = nn.Softmax(dim=1)
        self.expt_logdir = expt_logdir
        self.split = split
min_loss = np.inf
metric_history = {"train":[], "val":[]} 
cur_patience = 0
cnn_model = GenreClassifierCNN(emb_dim=vector_size, out_channels=64, kernel_sizes=[3, 4, 5], embedding_weights=embedding_weights, dropout=0.5).to(device)
optimizer = torch.optim.Adam(cnn_model.parameters())
max_epochs = 5
loss_func = nn.CrossEntropyLoss()
patience = 5
max_acc = 0.0
train_acc = 0.0
val_acc = 0.0

for epoch in range(1, max_epochs + 1):
    train_loss = 0.0
    train_accuracy = metrics.Accuracy().to(device)
    cnn_model.train()
    pbar = tqdm(enumerate(train_dataloader), total=len(train_dataloader), leave=False)
    pbar.set_description(f"Epoch {epoch}")
    for it, batch in pbar: 
        optimizer.zero_grad()
        features = batch["features"].to(device)
        logits = F.softmax(cnn_model(features), dim=1)
        targets = batch["targets"].squeeze().long().to(device)
        batch_acc = train_accuracy(logits, targets)
        loss = loss_func(logits, targets)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        train_acc += batch_acc.item()
Exemplo n.º 13
0
    def __init__(self, model):
        super().__init__()

        self.model = model
        self.train_acc = metrics.Accuracy(num_classes=1)
        self.val_acc = metrics.Accuracy(num_classes=1)