コード例 #1
0
    def validating(self, epoch):
        self.model.eval()
        conf_mat = np.zeros((num_classes, num_classes)).astype(np.int64)
        tbar = tqdm(val_loader)
        with torch.no_grad():
            for imgs, target in tbar:
                # assert data[0].size()[2:] == data[1].size()[1:]
                imgs = Variable(imgs)
                target = Variable(target)
                imgs = imgs.cuda()
                target = target.cuda()

                self.optimizer.zero_grad()
                outputs = self.model(imgs)
                _, preds = torch.max(outputs, 1)
                preds = preds.data.cpu().numpy().squeeze().astype(np.uint8)
                target = target.data.cpu().numpy().squeeze().astype(np.uint8)
                score = _.data.cpu().numpy()
                conf_mat += metric.confusion_matrix(pred=preds.flatten(),
                                                    label=target.flatten(),
                                                    num_classes=num_classes)
        print(conf_mat)
        val_acc, val_acc_per_class, val_acc_cls, val_IoU, val_FWIoU, val_kappa = metric.evaluate(
            conf_mat)
        writer.add_scalars(main_tag='val_single_acc',
                           tag_scalar_dict={
                               class_names[i]: val_acc_per_class[i]
                               for i in range(len(class_names))
                           },
                           global_step=epoch,
                           walltime=None)
        writer.add_scalars(main_tag='val_single_iou',
                           tag_scalar_dict={
                               class_names[i]: val_IoU[i]
                               for i in range(len(class_names))
                           },
                           global_step=epoch,
                           walltime=None)
        writer.add_scalar('val_acc', val_acc, epoch)
        writer.add_scalar('val_acc_cls', val_acc_cls, epoch)
        writer.add_scalar('val_FWIoU', val_FWIoU, epoch)
        writer.add_scalar('val_kappa', val_kappa, epoch)
        model_name = 'epoch_%d_acc_%.5f_kappa_%.5f' % (epoch, val_acc,
                                                       val_kappa)
        if val_kappa > 0:
            torch.save(self.model.state_dict(),
                       os.path.join(directory, model_name + '.pth'))
            best_kappa = val_kappa
        table = PrettyTable(["index", "class name", "acc", "IoU"])
        for i in range(num_classes):
            table.add_row(
                [i, class_names[i], val_acc_per_class[i], val_IoU[i]])
        print(table)
        print("val_acc:", val_acc)
        print("val_FWIoU:", val_FWIoU)
        print("kappa:", val_kappa)
コード例 #2
0
    def validating(self, epoch):
        self.model.eval()  # 把module设成预测模式,对Dropout和BatchNorm有影响
        conf_mat = np.zeros(
            (self.num_classes, self.num_classes)).astype(np.int64)
        tbar = tqdm(self.val_loader)
        for index, data in enumerate(tbar):
            # assert data[0].size()[2:] == data[1].size()[1:]
            imgs = Variable(data[0])
            masks = Variable(data[1])

            if self.args.use_cuda:
                imgs = imgs.cuda()
                masks = masks.cuda()
            self.optimizer.zero_grad()
            outputs = self.model(imgs)
            _, preds = torch.max(outputs, 1)
            preds = preds.data.cpu().numpy().squeeze().astype(np.uint8)
            masks = masks.data.cpu().numpy().squeeze().astype(np.uint8)
            score = _.data.cpu().numpy()
            val_visual = []
            for i in range(score.shape[0]):
                num_score = np.sum(score[i] > 0.9)
                if num_score > 0.9 * (512 * 512):
                    img_pil = self.resore_transform(data[0][i])
                    preds_pil = Image.fromarray(preds[i].astype(
                        np.uint8)).convert('L')
                    pred_vis_pil = colorize_mask(preds[i])
                    gt_vis_pil = colorize_mask(data[1][i].numpy())

                    val_visual.extend([
                        self.visualize(img_pil.convert('RGB')),
                        self.visualize(gt_vis_pil.convert('RGB')),
                        self.visualize(pred_vis_pil.convert('RGB'))
                    ])

                    dir_list = ['rgb', 'label', 'vis_label', 'gt']
                    rgb_save_path = os.path.join(self.save_pseudo_data_path,
                                                 dir_list[0], str(epoch))
                    label_save_path = os.path.join(self.save_pseudo_data_path,
                                                   dir_list[1], str(epoch))
                    vis_save_path = os.path.join(self.save_pseudo_data_path,
                                                 dir_list[2], str(epoch))
                    gt_save_path = os.path.join(self.save_pseudo_data_path,
                                                dir_list[3], str(epoch))

                    path_list = [
                        rgb_save_path, label_save_path, vis_save_path,
                        gt_save_path
                    ]
                    for path in range(4):
                        if not os.path.exists(path_list[path]):
                            os.makedirs(path_list[path])
                    img_pil.save(
                        os.path.join(path_list[0],
                                     'img_batch_%d_%d.jpg' % (index, i)))
                    preds_pil.save(
                        os.path.join(path_list[1],
                                     'label_%d_%d.png' % (index, i)))
                    pred_vis_pil.save(
                        os.path.join(path_list[2],
                                     'vis_%d_%d.png' % (index, i)))
                    gt_vis_pil.save(
                        os.path.join(path_list[3],
                                     'gt_%d_%d.png' % (index, i)))
            if val_visual:
                val_visual = torch.stack(val_visual, 0)
                val_visual = torchvision.utils.make_grid(tensor=val_visual,
                                                         nrow=3,
                                                         padding=5,
                                                         normalize=False,
                                                         range=None,
                                                         scale_each=False,
                                                         pad_value=0)
                writer.add_image(tag='pres&GTs',
                                 img_tensor=val_visual,
                                 global_step=None,
                                 walltime=None)
            conf_mat += metric.confusion_matrix(pred=preds.flatten(),
                                                label=masks.flatten(),
                                                num_classes=self.num_classes)
        val_acc, val_acc_per_class, val_acc_cls, val_IoU, val_mean_IoU, val_kappa = metric.evaluate(
            conf_mat)
        writer.add_scalars(
            main_tag='val_single_acc',
            tag_scalar_dict={
                self.train_dataset.class_names[i]: val_acc_per_class[i]
                for i in range(len(self.train_dataset.class_names))
            },
            global_step=epoch,
            walltime=None)
        writer.add_scalars(
            main_tag='val_single_iou',
            tag_scalar_dict={
                self.train_dataset.class_names[i]: val_IoU[i]
                for i in range(len(self.train_dataset.class_names))
            },
            global_step=epoch,
            walltime=None)
        writer.add_scalar('val_acc', val_acc, epoch)
        writer.add_scalar('val_acc_cls', val_acc_cls, epoch)
        writer.add_scalar('val_mean_IoU', val_mean_IoU, epoch)
        writer.add_scalar('val_kappa', val_kappa, epoch)
        model_name = 'epoch_%d_acc_%.5f_kappa_%.5f' % (epoch, val_acc,
                                                       val_kappa)
        if val_kappa > self.args.best_kappa:
            torch.save(self.model.state_dict(),
                       os.path.join(self.args.directory, model_name + '.pth'))
            self.args.best_kappa = val_kappa
        table = PrettyTable(["序号", "名称", "acc", "IoU"])
        for i in range(self.num_classes):
            table.add_row([
                i, self.train_dataset.class_names[i], val_acc_per_class[i],
                val_IoU[i]
            ])
        print(table)
        print("val_acc:", val_acc)
        print("val_mean_IoU:", val_mean_IoU)
        print("kappa:", val_kappa)
コード例 #3
0
    def training(self, epoch):
        self.model.train()  # 把module设成训练模式,对Dropout和BatchNorm有影响

        train_loss = average_meter.AverageMeter()

        curr_iter = epoch * len(self.train_loader)
        lr = self.args.base_lr * (1 - float(curr_iter) / self.max_iter)**0.9
        conf_mat = np.zeros(
            (self.num_classes, self.num_classes)).astype(np.int64)
        tbar = tqdm(self.train_loader)
        for index, data in enumerate(tbar):
            # assert data[0].size()[2:] == data[1].size()[1:]
            # data = self.mixup_transform(data, epoch)
            imgs = Variable(data[0])
            masks = Variable(data[1])

            if self.args.use_cuda:
                imgs = imgs.cuda()
                masks = masks.cuda()
            self.optimizer.zero_grad()
            outputs = self.model(imgs)
            # torch.max(tensor, dim):指定维度上最大的数,返回tensor和下标
            _, preds = torch.max(outputs, 1)
            preds = preds.data.cpu().numpy().squeeze().astype(np.uint8)

            loss = self.criterion(outputs, masks)

            train_loss.update(loss, self.args.train_batch_size)
            writer.add_scalar('train_loss', train_loss.avg, curr_iter)
            loss.backward()
            self.optimizer.step()

            tbar.set_description(
                'epoch {}, training loss {}, with learning rate {}.'.format(
                    epoch, train_loss.avg, lr))
            masks = masks.data.cpu().numpy().squeeze().astype(np.uint8)
            conf_mat += metric.confusion_matrix(pred=preds.flatten(),
                                                label=masks.flatten(),
                                                num_classes=self.num_classes)
        train_acc, train_acc_per_class, train_acc_cls, train_IoU, train_mean_IoU, train_kappa = metric.evaluate(
            conf_mat)
        writer.add_scalar(tag='train_loss_per_epoch',
                          scalar_value=train_loss.avg,
                          global_step=epoch,
                          walltime=None)
        writer.add_scalar(tag='train_acc',
                          scalar_value=train_acc,
                          global_step=epoch,
                          walltime=None)
        writer.add_scalar(tag='train_kappa',
                          scalar_value=train_kappa,
                          global_step=epoch,
                          walltime=None)
        table = PrettyTable(["序号", "名称", "acc", "IoU"])
        for i in range(self.num_classes):
            table.add_row([
                i, self.train_dataset.class_names[i], train_acc_per_class[i],
                train_IoU[i]
            ])
        print(table)
        print("train_acc:", train_acc)
        print("train_mean_IoU:", train_mean_IoU)
        print("kappa:", train_kappa)
コード例 #4
0
    def training(self, epoch):
        self.model.train()

        train_loss = average_meter.AverageMeter()

        curr_iter = epoch * len(train_loader)
        #lr = 0.1 * (1 - float(curr_iter) / self.max_iter) ** 0.9
        lr = self.scheduler.get_lr()
        conf_mat = np.zeros((num_classes, num_classes)).astype(np.int64)
        tbar = tqdm(train_loader)

        for imgs, target in tbar:
            # assert data[0].size()[2:] == data[1].size()[1:]
            # data = self.mixup_transform(data, epoch)
            imgs = Variable(imgs)
            target = Variable(target)
            imgs = imgs.cuda()
            target = target.cuda()

            self.optimizer.zero_grad()
            outputs = self.model(imgs)
            # torch.max(tensor, dim)
            _, preds = torch.max(outputs, 1)
            preds = preds.data.cpu().numpy().squeeze().astype(np.uint8)

            loss = self.criterion(outputs, target)

            train_loss.update(loss, 64)
            writer.add_scalar('train_loss', train_loss.avg, curr_iter)
            loss.backward()
            self.optimizer.step()
            self.scheduler.step()
            tbar.set_description(
                'epoch {}, training loss {}, with learning rate {}.'.format(
                    epoch, train_loss.avg, lr))
            target = target.data.cpu().numpy().squeeze().astype(np.uint8)
            conf_mat += metric.confusion_matrix(pred=preds.flatten(),
                                                label=target.flatten(),
                                                num_classes=num_classes)

        train_acc, train_acc_per_class, train_acc_cls, train_IoU, train_FWIoU, train_kappa = metric.evaluate(
            conf_mat)
        writer.add_scalar(tag='train_loss_per_epoch',
                          scalar_value=train_loss.avg,
                          global_step=epoch,
                          walltime=None)
        writer.add_scalar(tag='train_acc',
                          scalar_value=train_acc,
                          global_step=epoch,
                          walltime=None)
        writer.add_scalar(tag='train_kappa',
                          scalar_value=train_kappa,
                          global_step=epoch,
                          walltime=None)
        table = PrettyTable(["index", "class name", "acc", "IoU"])
        for i in range(num_classes):
            table.add_row(
                [i, class_names[i], train_acc_per_class[i], train_IoU[i]])
        print(table)
        print("train_acc:", train_acc)
        print("train_FWIoU:", train_FWIoU)
        print("kappa:", train_kappa)
    def validating(self, epoch):
        self.model.eval()  # 把module设成预测模式,对Dropout和BatchNorm有影响
        conf_mat = np.zeros(
            (self.num_classes, self.num_classes)).astype(np.int64)
        tbar = tqdm(self.val_loader)
        for index, data in enumerate(tbar):
            # assert data[0].size()[2:] == data[1].size()[1:]
            imgs = Variable(data[0])
            masks = Variable(data[1])

            if self.args.use_cuda:
                imgs = imgs.cuda()
                masks = masks.cuda()
            self.optimizer.zero_grad()
            outputs = self.model(imgs)
            _, preds = torch.max(outputs, 1)
            preds = preds.data.cpu().numpy().squeeze().astype(np.uint8)
            masks = masks.data.cpu().numpy().squeeze().astype(np.uint8)
            score = _.data.cpu().numpy()
            for i in range(score.shape[0]):
                num_score = np.sum(score[i] > 0.8)
                if num_score > 0.9 * (128 * 128):
                    img_pil = self.resore_transform(data[0][i])
                    preds_pil = Image.fromarray(preds[i].astype(
                        np.uint8)).convert('L')
                    pred_vis_pil = colorize_mask(preds[i])
                    gt_vis_pil = colorize_mask(data[1][i].numpy())

                    dir_list = ['rgb', 'label', 'vis_label', 'gt']
                    rgb_save_path = os.path.join(self.save_pseudo_data_path,
                                                 dir_list[0], str(epoch))
                    label_save_path = os.path.join(self.save_pseudo_data_path,
                                                   dir_list[1], str(epoch))
                    vis_save_path = os.path.join(self.save_pseudo_data_path,
                                                 dir_list[2], str(epoch))
                    gt_save_path = os.path.join(self.save_pseudo_data_path,
                                                dir_list[3], str(epoch))

                    path_list = [
                        rgb_save_path, label_save_path, vis_save_path,
                        gt_save_path
                    ]
                    for path in range(4):
                        if not os.path.exists(path_list[path]):
                            os.makedirs(path_list[path])
                    img_pil.save(
                        os.path.join(path_list[0],
                                     'img_batch_%d_%d.jpg' % (index, i)))
                    preds_pil.save(
                        os.path.join(path_list[1],
                                     'label_%d_%d.png' % (index, i)))
                    pred_vis_pil.save(
                        os.path.join(path_list[2],
                                     'vis_%d_%d.png' % (index, i)))
                    gt_vis_pil.save(
                        os.path.join(path_list[3],
                                     'gt_%d_%d.png' % (index, i)))
            conf_mat += metric.confusion_matrix(pred=preds.flatten(),
                                                label=masks.flatten(),
                                                num_classes=self.num_classes)
        val_acc, val_acc_per_class, val_acc_cls, val_IoU, val_mean_IoU, val_kappa = metric.evaluate(
            conf_mat)
        model_name = 'epoch_%d_acc_%.5f_kappa_%.5f' % (epoch, val_acc,
                                                       val_kappa)
        if val_kappa > self.args.best_kappa:
            torch.save(self.model.state_dict(),
                       os.path.join(self.args.directory, model_name + '.pth'))
            self.args.best_kappa = val_kappa
        table = PrettyTable(["序号", "名称", "acc", "IoU"])
        for i in range(self.num_classes):
            table.add_row(
                [i, gid_classes()[i], val_acc_per_class[i], val_IoU[i]])
        print(table)
        print("val_acc:", val_acc)
        print("val_mean_IoU:", val_mean_IoU)
        print("kappa:", val_kappa)