Ejemplo n.º 1
0
    def validate(self, epoch):

        start = time.time()
        self.net.eval()
        val_batch_size = min(ARGS['batch_size'], len(self.val_dataset))
        val_dataloader = DataLoader(self.val_dataset,
                                    batch_size=val_batch_size)
        epoch_loss = 0.
        for batch_index, items in enumerate(val_dataloader):
            images, labels, edges = items['image'], items['label'], items[
                'edge']
            # print('label min:', labels[0].min(), ' max: ', labels[0].max())
            # print('edge min:', labels[0].min(), ' max: ', labels[0].max())

            if ARGS['gpu']:
                labels = labels.cuda()
                images = images.cuda()
                edges = edges.cuda()

            print('image shape:', images.size())

            with torch.no_grad():
                outputs_edge, outputs = self.net(images)
                loss_edge = lovasz_softmax(outputs_edge,
                                           edges)  # Lovasz-Softmax loss
                loss_seg = lovasz_softmax(outputs, labels)  #
                loss = ARGS['combine_alpha'] * loss_seg + (
                    1 - ARGS['combine_alpha']) * loss_edge

            pred = torch.max(outputs, dim=1)[1]
            iou = torch.sum(pred & labels) / (torch.sum(pred | labels) + 1e-6)

            print(
                'Validating Epoch: {epoch} [{val_samples}/{total_samples}]\tLoss: {:0.4f}\tIoU: {:0.4f}'
                .format(loss.item(),
                        iou.item(),
                        epoch=epoch,
                        val_samples=batch_index * val_batch_size,
                        total_samples=len(val_dataloader.dataset)))

            epoch_loss += loss

            # update training loss for each iteration
            # self.writer.add_scalar('Train/loss', loss.item(), n_iter)

        epoch_loss /= len(val_dataloader)
        self.writer.add_scalar('Val/loss', epoch_loss, epoch)

        finish = time.time()

        print('epoch {} training time consumed: {:.2f}s'.format(
            epoch, finish - start))
Ejemplo n.º 2
0
 def forward(self, output, target):
     logits = F.softmax(output, dim=1)
     loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
     return loss
Ejemplo n.º 3
0
    def lovasz_softmax(self, probas, labels, classes='present'):

        out = F.softmax(probas, dim=1)
        loss = L.lovasz_softmax(out, labels)
        return loss
Ejemplo n.º 4
0
def criterion_reg(pred, label):
    return nn.BCELoss()(pred, label) + lovasz_softmax(pred, label, classes=[1])
Ejemplo n.º 5
0
    def train(self, epoch):

        start = time.time()
        self.net.train()
        train_dataloader = DataLoader(self.train_dataset,
                                      batch_size=ARGS['batch_size'],
                                      shuffle=False)
        epoch_loss = 0.
        for batch_index, items in enumerate(train_dataloader):
            images, labels, edges = items['image'], items['label'], items[
                'edge']
            images = images.float()
            labels = labels.long()
            edges = edges.long()

            if ARGS['gpu']:
                labels = labels.cuda()
                images = images.cuda()
                edges = edges.cuda()

            self.optimizer.zero_grad()
            outputs_edge, outputs = self.net(images)
            # print('output edge min:', outputs_edge[0, 1].min(), ' max: ', outputs_edge[0, 1].max())
            # plt.imshow(outputs_edge[0, 1].detach().cpu().numpy() * 255, cmap='gray')
            # plt.show()
            loss_edge = lovasz_softmax(outputs_edge,
                                       edges)  # Lovasz-Softmax loss
            loss_seg = lovasz_softmax(outputs, labels)  #
            loss = ARGS['combine_alpha'] * loss_seg + (
                1 - ARGS['combine_alpha']) * loss_edge
            loss.backward()
            self.optimizer.step()
            self.lr_scheduler.step()

            n_iter = (epoch - 1) * len(train_dataloader) + batch_index + 1

            pred = torch.max(outputs, dim=1)[1]
            iou = torch.sum(pred & labels) / (torch.sum(pred | labels) + 1e-6)

            # print('edge min:', edges.min(), ' max: ', edges.max())
            # print('output edge min:', outputs_edge.min(), ' max: ', outputs_edge.max())

            print(
                'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tL_edge: {:0.4f}\tL_seg: {:0.4f}\tL_all: {:0.4f}\tIoU: {:0.4f}\tLR: {:0.4f}'
                .format(loss_edge.item(),
                        loss_seg.item(),
                        loss.item(),
                        iou.item(),
                        self.optimizer.param_groups[0]['lr'],
                        epoch=epoch,
                        trained_samples=batch_index * ARGS['batch_size'],
                        total_samples=len(train_dataloader.dataset)))

            epoch_loss += loss.item()

            # update training loss for each iteration
            # self.writer.add_scalar('Train/loss', loss.item(), n_iter)

        for name, param in self.net.named_parameters():
            layer, attr = os.path.splitext(name)
            attr = attr[1:]
            self.writer.add_histogram("{}/{}".format(layer, attr), param,
                                      epoch)

        epoch_loss /= len(train_dataloader)
        self.writer.add_scalar('Train/loss', epoch_loss, epoch)
        finish = time.time()

        print('epoch {} training time consumed: {:.2f}s'.format(
            epoch, finish - start))