Exemplo n.º 1
0
 def more_metrics(self, metrics_: OrderedDict):
     metrics_['loss'] = metrics.Loss(nn.CrossEntropyLoss())
     metrics_['accuracy'] = metrics.Accuracy()
     metrics_['recall'] = metrics.Recall()
     metrics_['precision'] = metrics.Precision()
     metrics_['confusion_matrix'] = metrics.ConfusionMatrix(
         8, average='recall')
Exemplo n.º 2
0
    def __init__(self, beta: int):
        def f_beta(p, r, beta):
            return torch.mean(
                (1 + beta**2) * p * r / (beta**2 * p + r + 1e-20)).item()

        super(FBetaMacro, self).__init__(f_beta, metrics.Precision(),
                                         metrics.Recall(), beta)
Exemplo n.º 3
0
    def __init__(self, is_multilabel=True):
        self.is_multilabel = is_multilabel
        # Create underlying metrics
        self.__precision = ig_metrics.Precision(
            average=False, is_multilabel=self.is_multilabel)

        self.__recall = ig_metrics.Recall(
            average=False, is_multilabel=self.is_multilabel)

        num = self.__precision * self.__recall * 2
        denom = self.__precision + self.__recall + 1e-20
        f1 = num / denom
        self.__metric = ig_metrics.MetricsLambda(
            lambda t: t.mean().item(), f1)
Exemplo n.º 4
0
        def get_metrics_fn() -> Dict[str, _metrics.Metric]:
            def rounded_transform(output):
                y_pred, y = output
                return torch.round(y_pred), y

            transform = rounded_transform
            accuracy = _metrics.Accuracy(transform, device=self.device)
            precision = _metrics.Precision(transform, device=self.device)
            recall = _metrics.Recall(transform, device=self.device)
            f1 = precision * recall * 2 / (precision + recall + 1e-20)
            return {
                'loss': _metrics.Loss(loss_fn),
                'accuracy': accuracy,
                'precision': precision,
                'recall': recall,
                'f1': f1
            }
Exemplo n.º 5
0
 def __init__(self, is_multilabel=True):
     self.is_multilabel = is_multilabel
     self.__metric = ig_metrics.Precision(
         average=True, is_multilabel=self.is_multilabel)
Exemplo n.º 6
0
    logging.info('creating trainer and evaluator engines')
    loss_fn = LossWithAux(nn.BCEWithLogitsLoss())
    trainer = engine.create_supervised_trainer(
        model=model,
        optimizer=optimizer,
        loss_fn=loss_fn,
        device='cuda',
        non_blocking=True,
    )

    evaluator = engine.create_supervised_evaluator(
        model,
        metrics={
            'loss': metrics.Loss(nn.BCELoss()),
            'precision':
            metrics.Precision(thresholded_transform(threshold=0.5)),
            'recall': metrics.Recall(thresholded_transform(threshold=0.5)),
            '[email protected]': IoUMetric(thresholded_transform(threshold=0.3)),
            '[email protected]': IoUMetric(thresholded_transform(threshold=0.5)),
        },
        device='cuda',
        non_blocking=True,
        output_transform=lambda x, y, y_pred:
        (torch.sigmoid(y_pred['out']), y),
    )

    logging.info(f'creating summary writer with tag {args.model_tag}')
    writer = tensorboard.SummaryWriter(log_dir=f'logs/{args.model_tag}')

    logging.info('attaching lr scheduler')
    lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
Exemplo n.º 7
0
 def more_metrics(self, metrics_: OrderedDict):
     metrics_['loss'] = metrics.Loss(nn.CrossEntropyLoss())
     metrics_['accuracy'] = metrics.Accuracy()
     metrics_['recall'] = metrics.Recall()
     metrics_['precision'] = metrics.Precision()
Exemplo n.º 8
0
 transforms=None
 DSet = NpyClfDatasets (CCSN, MSS, CHIRP, DSIR, transform=transforms)
 train_l, val_l   = DSet.train_test_split (random_state=24, test_size=0.25)
 t_DataLoader     = tud.DataLoader (DSet, sampler=train_l, batch_size=10, pin_memory=True)
 v_DataLoader     = tud.DataLoader (DSet, sampler=val_l,   batch_size=10, pin_memory=True)
 #########################
 DESC = "Epoch {} - loss {:.2f}"
 PBAR = tqdm (initial=0, leave=False, total=len(t_DataLoader), desc=DESC.format(0, 0))
 CLF  = CNN_ONE(idx=50)
 LFN  = tn.CrossEntropyLoss()
 OPM  = to.Adam(CLF.parameters(), lr=1e-3,)
 VAL_METRICS = {
     'loss':im.Loss (LFN), 
     'acc':im.Accuracy(),
     'recall':im.Recall(),
     'precision':im.Precision(),
     'cfm':im.ConfusionMatrix (3),
 }
 L_TRAIN = []
 L_EVAL  = []
 L_ACC   = []
 L_PRE   = []
 L_REC   = []
 L_CFM   = []
 #########################
 def train_step(engine, batch):
     CLF.train()
     OPM.zero_grad()
     x, y = batch['payload'], batch['target']
     ypred = CLF (x)
     loss = LFN (ypred, y.squeeze(1))