Пример #1
0
def validate(model, val_set, params):
    val_data = tqdm(DataLoader(val_set,
                               batch_size=params.batch_size,
                               collate_fn=KeyphraseData.collate_fn),
                    total=(len(val_set) // params.batch_size))
    metrics = Metrics()
    loss_avg = RunningAverage()
    with torch.no_grad():
        model.eval()
        for data, labels, mask in val_data:

            data = data.to(params.device)
            labels = labels.to(params.device)
            mask = mask.to(params.device)

            loss, logits = model(data, attention_mask=mask, labels=labels)

            predicted = logits.max(2)[1]
            metrics.update(batch_pred=predicted.cpu().numpy(),
                           batch_true=labels.cpu().numpy(),
                           batch_mask=mask.cpu().numpy())
            loss_avg.update(torch.mean(loss).item())
            val_data.set_postfix(type='VAL',
                                 loss='{:05.3f}'.format(loss_avg()))

    metrics.loss = loss_avg()
    return metrics
Пример #2
0
def test(model, dataloader, params):
    val_data = tqdm(dataloader.data_iterator(data_type='test',
                                             batch_size=params.batch_size),
                    total=(dataloader.size()[0] // params.batch_size))
    metrics = Metrics()
    loss_avg = RunningAverage()
    with torch.no_grad():
        for data, labels in val_data:
            model.eval()
            data = torch.tensor(data, dtype=torch.long).to(params.device)
            labels = torch.tensor(labels, dtype=torch.long).to(params.device)

            batch_masks = data != 0

            loss, logits = model(data,
                                 attention_mask=batch_masks,
                                 labels=labels)

            predicted = logits.max(2)[1]
            metrics.update(batch_pred=predicted.cpu().numpy(),
                           batch_true=labels.cpu().numpy(),
                           batch_mask=batch_masks.cpu().numpy())
            loss_avg.update(torch.mean(loss).item())
            val_data.set_postfix(type='VAL',
                                 loss='{:05.3f}'.format(loss_avg()))
    metrics.loss = loss_avg()
    return metrics
    def train_epoch(self, epoch):
        """Train an epoch."""
        self.model.train()  # Set model to training mode
        losses = Metrics()
        total_iter = len(self.train_data_loader.dataset) // self.train_data_loader.batch_size

        for idx, (x, y) in enumerate(self.train_data_loader):
            s = time.monotonic()

            x = x.to(self.device)
            y = y.to(self.device)
            y_pred = self.model(x)

            self.optimizer.zero_grad()
            loss = self.criterion(y_pred, y)
            loss.backward()
            self.optimizer.step()

            losses.update(loss.item(), x.size(0))

            self.writer.add_scalar('train/current_loss', losses.val, self.train_step)
            self.writer.add_scalar('train/avg_loss', losses.avg, self.train_step)
            self.train_step += 1

            e = time.monotonic()
            if idx % self.print_freq == 0:
                log_time = self.print_freq * (e - s)
                eta = ((total_iter - idx) * log_time) / 60.0
                print(f'Epoch {epoch} [{idx}/{total_iter}], loss={loss:.3f}, time={log_time:.2f}, ETA={eta:.2f}')

        return losses.avg
Пример #4
0
def test(exp_name):
    print('loading data......')
    test_data = getattr(datasets, opt.dataset)(opt.root,
                                               opt.test_data_dir,
                                               mode='test',
                                               size=opt.testsize)
    test_dataloader = DataLoader(test_data,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=opt.num_workers)
    total_batch = int(len(test_data) / 1)
    model, _, _ = generate_model(opt)

    model.eval()

    # metrics_logger initialization
    metrics = Metrics([
        'recall', 'specificity', 'precision', 'F1', 'F2', 'ACC_overall',
        'IoU_poly', 'IoU_bg', 'IoU_mean'
    ])

    logger = get_logger('./results/' + exp_name + '.log')
    with torch.no_grad():
        for i, data in enumerate(test_dataloader):
            img, gt = data['image'], data['label']

            if opt.use_gpu:
                img = img.cuda()
                gt = gt.cuda()

            output = model(img)
            _recall, _specificity, _precision, _F1, _F2, \
            _ACC_overall, _IoU_poly, _IoU_bg, _IoU_mean = evaluate(output, gt)

            metrics.update(recall=_recall,
                           specificity=_specificity,
                           precision=_precision,
                           F1=_F1,
                           F2=_F2,
                           ACC_overall=_ACC_overall,
                           IoU_poly=_IoU_poly,
                           IoU_bg=_IoU_bg,
                           IoU_mean=_IoU_mean)

    metrics_result = metrics.mean(total_batch)

    print("Test Result:")
    logger.info(
        'recall: %.4f, specificity: %.4f, precision: %.4f, F1: %.4f, F2: %.4f, '
        'ACC_overall: %.4f, IoU_poly: %.4f, IoU_bg: %.4f, IoU_mean: %.4f' %
        (metrics_result['recall'], metrics_result['specificity'],
         metrics_result['precision'], metrics_result['F1'],
         metrics_result['F2'], metrics_result['ACC_overall'],
         metrics_result['IoU_poly'], metrics_result['IoU_bg'],
         metrics_result['IoU_mean']))
    def validate_epoch(self):
        """Validate after training an epoch."""
        self.model.eval()  # Set model to evaluate mode
        losses = Metrics()

        with torch.no_grad():
            for idx, (x, y) in enumerate(self.val_data_loader):
                x = x.to(self.device)
                y = y.to(self.device)
                y_pred = self.model(x)
                loss = self.criterion(y_pred, y)
                losses.update(loss.item(), x.size(0))

                self.writer.add_scalar('val/current_loss', losses.val, self.val_step)
                self.writer.add_scalar('val/avg_loss', losses.avg, self.val_step)
                self.val_step += 1

        return losses.avg
    def test(self):
        self.model.eval()
        losses = Metrics()
        # accuracy = Metrics()

        with torch.no_grad():
            for idx, (x, y) in enumerate(self.test_data_loader):
                x = x.to(self.device)
                y = y.to(self.device)
                y_pred = self.model(x)

                loss = self.criterion(y_pred, y)
                losses.update(loss.item(), x.size(0))

                # predict = 1 if get_mean_score(y_pred.cpu().numpy()[0]) > 5 else 0
                # target = 1 if get_mean_score(y.cpu().numpy()[0]) > 5 else 0
                #
                # accuracy.update(1 if predict == target else 0)

        logger.info(f'test loss={losses.avg}')
        print(losses.avg)
        return losses.avg
Пример #7
0
def valid(model, valid_dataloader, total_batch):
    model.eval()

    # Metrics_logger initialization
    metrics = Metrics([
        'recall', 'specificity', 'precision', 'F1', 'F2', 'ACC_overall',
        'IoU_poly', 'IoU_bg', 'IoU_mean'
    ])

    with torch.no_grad():
        bar = tqdm(enumerate(valid_dataloader), total=total_batch)
        for i, data in bar:
            img, gt = data['image'], data['label']

            if opt.use_gpu:
                img = img.cuda()
                gt = gt.cuda()

            output = model(img)
            _recall, _specificity, _precision, _F1, _F2, \
            _ACC_overall, _IoU_poly, _IoU_bg, _IoU_mean = evaluate(output, gt)

            metrics.update(recall=_recall,
                           specificity=_specificity,
                           precision=_precision,
                           F1=_F1,
                           F2=_F2,
                           ACC_overall=_ACC_overall,
                           IoU_poly=_IoU_poly,
                           IoU_bg=_IoU_bg,
                           IoU_mean=_IoU_mean)

    metrics_result = metrics.mean(total_batch)
    model.train()

    return metrics_result