예제 #1
0
    def optimization_step(i, img, mask, lbl, save=False):
        if i % batch_multiplier == 0:
            optimizer.zero_grad()

        if torch.cuda.is_available():
            img = img.cuda()
            mask = mask.cuda()
            lbl = lbl.cuda()

        prediction = network(img)
        loss = loss_fn(prediction, mask, lbl)
        loss.backward()

        if (i+1) % batch_multiplier == 0:
            optimizer.step()

        if save:
            pred = torch.sigmoid(prediction)
            data = cut_to_match(pred, img)
            gt = cut_to_match(pred, lbl)
            writer.add_image('Train/prediction', pred[0], epoch)
            writer.add_image('Train/image', data[0, :3], epoch)
            writer.add_image('Train/ground_truth', gt[0], epoch)

        return loss
예제 #2
0
    def __call__(self, prediction, mask, target):
        prediction = prediction[0]
        mask = cut_to_match(prediction, mask[0], n_pref=0)
        target = cut_to_match(prediction, target[0], n_pref=0)

        ious = process_one_img(prediction, mask, target, self.thresholds)
        self.iou += ious
        self.n_processed += 1
        return 0
예제 #3
0
파일: unet.py 프로젝트: jatentaki/harm2d
    def forward(self, bot: ['b', 'fb', 'hb', 'wb'],
                hor: ['b', 'fh', 'hh', 'wh']) -> ['b', 'fo', 'ho', 'wo']:

        bot_big = self.upsample(bot)
        hor = cut_to_match(bot_big, hor, n_pref=2)
        combined = torch.cat([bot_big, hor], dim=1)

        return self.seq(combined)
예제 #4
0
    def forward(self, bot: [2, 'b', 'fb', 'hb', 'wb'],
                hor: [2, 'b', 'fh', 'hh', 'wh']) -> [2, 'b', 'fo', 'ho', 'wo']:

        bot_big = self.upsample(bot)
        hor = cut_to_match(bot_big, hor, n_pref=3)
        combined = d2.cat2d(bot_big, self.bottom_repr, hor,
                            self.horizontal_repr)

        return self.seq(combined)
예제 #5
0
def test(network, dataset, loss_fn, criteria, epoch, writer, early_stop=None):
    network.eval()
    loss_meter = AvgMeter()

    progress = tqdm(total=len(dataset), dynamic_ncols=True)
    with progress, torch.no_grad():
        for i, (img, mask, lbl) in enumerate(dataset):
            if i == early_stop:
                break

            if torch.cuda.is_available():
                img = img.cuda()
                mask = mask.cuda()
                lbl = lbl.cuda()

            prediction = network(img)
            loss = loss_fn(prediction, mask, lbl).item()

            if i == 0:
                pred = torch.sigmoid(prediction)
                img = cut_to_match(pred, img)
                lbl = cut_to_match(pred, lbl)
                writer.add_image('Test/prediction', pred[0], epoch)
                writer.add_image('Test/image', img[0, :3], epoch)
                writer.add_image('Test/ground_truth', lbl[0], epoch)

            loss_meter.update(loss)

            writer.add_scalar('Test/loss', loss, epoch)
            for criterion in criteria:
                value = criterion(prediction, mask, lbl)
                value = value.item() if isinstance(value, torch.Tensor) else value
                writer.add_scalar(f'Test/{criterion.name}', value, epoch)
            progress.update(1)


    writer.add_scalar('Test/loss_mean', loss_meter.avg, epoch)
예제 #6
0
 def __call__(self, prediction, mask, target):
     prediction = prediction[0]
     mask = cut_to_match(prediction, mask[0], n_pref=0)
     target = cut_to_match(prediction, target[0], n_pref=0)
     self.process_one_img(prediction, mask, target)
     return 0