Exemplo n.º 1
0
def test(**kwargs):
    opts.parse_kwargs(**kwargs)
    print("test begin")
    test_loader = datasets.VOC2012ClassSeg("./datasets",train = False)
    test_loader = torch.utils.data.DataLoader(test_loader,batch_size = 1,shuffle =False)
    label_name = test_loader.dataset.class_names
    result = "./result"
    if os.path.exists(result):
        shutil.rmtree(result)
    os.mkdir(result)
    net = ext_model.fcn8s.FCN8sAtOnce(21)
    device = torch.device("cuda:0")
    net.load_state_dict(torch.load(opts.load_model_path))
    net.to(device)
    for batch_idx,(data,target) in tqdm.tqdm(enumerate(test_loader,1),total = len(test_loader),desc = "process :testing",ncols = 100):
        data = data.to(device)
        target = target.to(device)
        scores = net(data)
        #segmentation
        imgs = data.detach().cpu().numpy()
        lbl_pred = scores.max(1)[1].cpu().numpy()[:,:,:]
        lbl_true = target.detach().cpu().numpy()
        for img,lt,lp in zip(imgs,lbl_true,lbl_pred):
            img,lt = untransform(img,lt)
            viz_images = utils.visualize_segmentation(img = img,lbl_true= lt ,lbl_pred=lp,n_class =len(label_name),label_names = label_name)
            viz_images  = cv2.cvtColor(viz_images,cv2.COLOR_RGB2BGR)
            cv2.imwrite(os.path.join(result,"{}.jpg".format(batch_idx)),viz_images)
def main():
    args = get_args()

    size = args.size
    shift = args.shift

    output_dir = os.path.join(args.output, f"{size}/{shift}")
    mkdir(output_dir)

    with open('./info/project-info.csv', 'r') as csvfile:

        f_csv = csv.reader(csvfile, delimiter=str(','), quotechar=str('|'))
        next(f_csv)

        for row in f_csv:
            tissue = row[1]
            dye = row[2]
            original_name = row[6]
            scale = row[7]

            path_to_image = os.path.join(
                Paths.PATH_TO_IMAGES, tissue, scale, original_name)

            extension = "jpg"
            if not os.path.exists(f"{path_to_image}.{extension}"):
                extension = "png"
            path_to_image = f"{path_to_image}.{extension}"

            output_filename = os.path.join(
                output_dir, tissue + "&" + dye + ".jpg")

            print(output_filename)
            if os.path.exists(output_filename):
                print(f"{output_filename} already exists")
                continue

            visualize_segmentation(
                path_to_image, output_filename, size, shift)
Exemplo n.º 3
0
    def validate(self):

        visualizations = []
        val_metrics = runningScore(self.n_classes)
        val_loss_meter = averageMeter()

        with torch.no_grad():
            self.model.eval()
            for rgb, ir, target in tqdm.tqdm(
                    self.val_loader, total=len(self.val_loader),
                    desc=f'Valid epoch={self.epoch}', ncols=80, leave=False):

                rgb, ir, target = rgb.to(self.device), ir.to(self.device), target.to(self.device)

                score = self.model(rgb, ir)
                # score = self.model(rgb)

                weight = self.val_loader.dataset.class_weight
                if weight:
                    weight = torch.Tensor(weight).to(self.device)

                loss = CrossEntropyLoss(score, target, weight=weight, reduction='mean', ignore_index=-1)
                loss_data = loss.data.item()
                if np.isnan(loss_data):
                    raise ValueError('loss is nan while validating')

                val_loss_meter.update(loss_data)

                rgbs = rgb.data.cpu()
                irs = ir.data.cpu()

                if isinstance(score, (tuple, list)):
                    lbl_pred = score[0].data.max(1)[1].cpu().numpy()
                else:
                    lbl_pred = score.data.max(1)[1].cpu().numpy()
                lbl_true = target.data.cpu()

                for rgb, ir, lt, lp in zip(rgbs, irs, lbl_true, lbl_pred):
                    rgb, ir, lt = self.val_loader.dataset.untransform(rgb, ir, lt)
                    val_metrics.update(lt, lp)
                    if len(visualizations) < 9:
                        viz = visualize_segmentation(
                            lbl_pred=lp, lbl_true=lt, img=rgb, ir=ir,
                            n_classes=self.n_classes, dataloader=self.train_loader)
                        visualizations.append(viz)

        acc, acc_cls, mean_iou, fwavacc, cls_iu = val_metrics.get_scores()
        metrics = [acc, acc_cls, mean_iou, fwavacc]

        print(f'\nEpoch: {self.epoch}', f'loss: {val_loss_meter.avg}, mIoU: {mean_iou}')

        out = osp.join(self.out, 'visualization_viz')
        if not osp.exists(out):
            os.makedirs(out)
        out_file = osp.join(out, 'epoch{:0>5d}.jpg'.format(self.epoch))
        scipy.misc.imsave(out_file, get_tile_image(visualizations))

        with open(osp.join(self.out, 'log.csv'), 'a') as f:
            elapsed_time = (
                datetime.datetime.now(pytz.timezone('UTC')) -
                self.timestamp_start).total_seconds()
            log = [self.epoch] + [''] * 5 + \
                  [val_loss_meter.avg] + metrics + [elapsed_time]
            log = map(str, log)
            f.write(','.join(log) + '\n')

        mean_iu = metrics[2]
        is_best = mean_iu > self.best_mean_iu
        if is_best:
            self.best_mean_iu = mean_iu
        torch.save({
            'epoch': self.epoch,
            'arch': self.model.__class__.__name__,
            'optim_state_dict': self.optim.state_dict(),
            'model_state_dict': self.model.state_dict(),
            'best_mean_iu': self.best_mean_iu,
        }, osp.join(self.out, 'checkpoint.pth.tar'))
        if is_best:
            shutil.copy(osp.join(self.out, 'checkpoint.pth.tar'),
                        osp.join(self.out, 'model_best.pth.tar'))

        val_loss_meter.reset()
        val_metrics.reset()

        class_name = self.val_loader.dataset.class_names
        if class_name is not None:
            for index, value in enumerate(cls_iu.values()):
                offset = 20 - len(class_name[index])
                print(class_name[index] + ' ' * offset + f'{value * 100:>.2f}')
        else:
            print("\nyou don't specify class_names, use number instead")
            for key, value in cls_iu.items():
                print(key, f'{value * 100:>.2f}')
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model', type=str, default='deeplab-largefov')
    parser.add_argument(
        '--model_file',
        type=str,
        default=
        '/home/ecust/lx/Semantic-Segmentation-PyTorch/logs/deeplab-largefov_20190417_230357/model_best.pth.tar',
        help='Model path')
    parser.add_argument('--dataset_type',
                        type=str,
                        default='voc',
                        help='type of dataset')
    parser.add_argument(
        '--dataset',
        type=str,
        default='/home/ecust/Datasets/PASCAL VOC/VOCdevkit/VOC2012',
        help='path to dataset')
    parser.add_argument('--img_size',
                        type=tuple,
                        default=None,
                        help='resize images using bilinear interpolation')
    parser.add_argument('--crop_size',
                        type=tuple,
                        default=None,
                        help='crop images')
    parser.add_argument('--n_classes',
                        type=int,
                        default=21,
                        help='number of classes')
    parser.add_argument('--pretrained',
                        type=bool,
                        default=True,
                        help='should be set the same as train.py')
    args = parser.parse_args()

    model_file = args.model_file
    root = args.dataset
    n_classes = args.n_classes

    crop = None
    # crop = Compose([RandomCrop(args.crop_size)])
    loader = get_loader(args.dataset_type)
    val_loader = DataLoader(loader(root,
                                   n_classes=n_classes,
                                   split='val',
                                   img_size=args.img_size,
                                   augmentations=crop,
                                   pretrained=args.pretrained),
                            batch_size=1,
                            shuffle=False,
                            num_workers=4)

    model, _, _ = Models.model_loader(args.model, n_classes, resume=None)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    print('==> Loading {} model file: {}'.format(model.__class__.__name__,
                                                 model_file))

    model_data = torch.load(model_file)

    try:
        model.load_state_dict(model_data)
    except Exception:
        model.load_state_dict(model_data['model_state_dict'])
    model.eval()

    print('==> Evaluating with {} dataset'.format(args.dataset_type))
    visualizations = []
    metrics = runningScore(n_classes)

    for data, target in tqdm.tqdm(val_loader,
                                  total=len(val_loader),
                                  ncols=80,
                                  leave=False):
        data, target = data.to(device), target.to(device)
        score = model(data)

        imgs = data.data.cpu()
        lbl_pred = score.data.max(1)[1].cpu().numpy()
        lbl_true = target.data.cpu()
        for img, lt, lp in zip(imgs, lbl_true, lbl_pred):
            img, lt = val_loader.dataset.untransform(img, lt)
            metrics.update(lt, lp)
            if len(visualizations) < 9:
                viz = visualize_segmentation(lbl_pred=lp,
                                             lbl_true=lt,
                                             img=img,
                                             n_classes=n_classes,
                                             dataloader=val_loader)
                visualizations.append(viz)
    acc, acc_cls, mean_iu, fwavacc, cls_iu = metrics.get_scores()
    print('''
Accuracy:       {0:.2f}
Accuracy Class: {1:.2f}
Mean IoU:       {2:.2f}
FWAV Accuracy:  {3:.2f}'''.format(acc * 100, acc_cls * 100, mean_iu *
                                  100, fwavacc * 100) + '\n')

    class_name = val_loader.dataset.class_names
    if class_name is not None:
        for index, value in enumerate(cls_iu.values()):
            offset = 20 - len(class_name[index])
            print(class_name[index] + ' ' * offset + f'{value * 100:>.2f}')
    else:
        print("\nyou don't specify class_names, use number instead")
        for key, value in cls_iu.items():
            print(key, f'{value * 100:>.2f}')

    viz = get_tile_image(visualizations)
    # img = Image.fromarray(viz)
    # img.save('viz_evaluate.png')
    scipy.misc.imsave('viz_evaluate.png', viz)
Exemplo n.º 5
0
def main():
    # parser = argparse.ArgumentParser(
    #     formatter_class=argparse.ArgumentDefaultsHelpFormatter
    # )
    # parser.add_argument('--model', type=str, default='multi-gnn1')
    # parser.add_argument('--model_file', type=str, default='/home/ecust/lx/Multimodal/logs/multi-gnn1_FS/model_best.pth.tar',help='Model path')
    # parser.add_argument('--dataset_type', type=str, default='b',help='type of dataset')
    # parser.add_argument('--dataset', type=str, default='/home/ecust/Datasets/数据库B(541)',help='path to dataset')
    # parser.add_argument('--base_size', type=tuple, default=(300, 300), help='resize images using bilinear interpolation')
    # parser.add_argument('--crop_size', type=tuple, default=None, help='crop images')
    # parser.add_argument('--n_classes', type=int, default=13, help='number of classes')
    # parser.add_argument('--pretrained', type=bool, default=True, help='should be set the same as train.py')
    # args = parser.parse_args()
    args = argparser()

    model_file = '/home/ecust/lx/Multimodal/logs/resnet_20190916_093026/model_best.pth.tar'
    root = args.dataset_root

    crop=None
    # crop = Compose([RandomCrop(args.crop_size)])
    loader = get_loader(args.dataset)
    val_loader = DataLoader(
        loader(root, split='val', base_size=args.base_size, augmentations=crop),
        batch_size=1, shuffle=False, num_workers=4)
    args.n_classes = loader.NUM_CLASS

    model = Models.model_loader(args.model, args.n_classes,
                                backbone=args.backbone, norm_layer=nn.BatchNorm2d,
                                multi_grid=args.multi_grid,
                                multi_dilation=args.multi_dilation)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    print('==> Loading {} model file: {}'.format(model.__class__.__name__, model_file))

    model_data = torch.load(model_file)

    try:
        model.load_state_dict(model_data)
    except Exception:
        model.load_state_dict(model_data['model_state_dict'])
    model.eval()

    print('==> Evaluating with {} dataset'.format(args.dataset))
    visualizations = []
    metrics = runningScore(args.n_classes)

    i = 0
    for rgb, ir, target in tqdm.tqdm(val_loader, total=len(val_loader), ncols=80, leave=False):
        rgb, ir, target = rgb.to(device), ir.to(device), target.to(device)
        score = model(rgb, ir)
        # score = model(ir)

        rgbs = rgb.data.cpu()
        irs = ir.data.cpu()
        lbl_pred = score[0].data.max(1)[1].cpu().numpy()
        lbl_true = target.data.cpu()
        for rgb, ir, lt, lp in zip(rgbs, irs, lbl_true, lbl_pred):
            rgb, ir, lt = val_loader.dataset.untransform(rgb, ir, lt)
            metrics.update(lt, lp)

            i += 1
            if i % 5 == 0:
                if len(visualizations) < 9:
                    viz = visualize_segmentation(
                        lbl_pred=lp, lbl_true=lt, img=rgb, ir=ir,
                        n_classes=args.n_classes, dataloader=val_loader)
                    visualizations.append(viz)

    acc, acc_cls, mean_iu, fwavacc, cls_iu = metrics.get_scores()
    print('''
Accuracy:       {0:.2f}
Accuracy Class: {1:.2f}
Mean IoU:       {2:.2f}
FWAV Accuracy:  {3:.2f}'''.format(acc * 100,
                                  acc_cls * 100,
                                  mean_iu * 100,
                                  fwavacc * 100) + '\n')

    class_name = val_loader.dataset.class_names
    if class_name is not None:
        for index, value in enumerate(cls_iu.values()):
            offset = 20 - len(class_name[index])
            print(class_name[index] + ' ' * offset + f'{value * 100:>.2f}')
    else:
        print("\nyou don't specify class_names, use number instead")
        for key, value in cls_iu.items():
            print(key, f'{value * 100:>.2f}')

    viz = get_tile_image(visualizations)
    # img = Image.fromarray(viz)
    # img.save('viz_evaluate.png')
    scipy.misc.imsave('viz_evaluate.png', viz)
Exemplo n.º 6
0
    def validate(self):

        visualizations = []
        val_metrics = runningScore(self.n_classes)
        val_loss_meter = averageMeter()

        with torch.no_grad():
            self.model.eval()
            for data, target in tqdm.tqdm(self.val_loader,
                                          total=len(self.val_loader),
                                          desc=f'Valid epoch={self.epoch}',
                                          ncols=80,
                                          leave=False):

                data, target = data.to(self.device), target.to(self.device)

                score = self.model(data)

                weight = self.val_loader.dataset.class_weight
                if weight:
                    weight = torch.Tensor(weight).to(self.device)

                # target = resize_labels(target, (score.size()[2], score.size()[3]))
                # target = target.to(self.device)
                loss = CrossEntropyLoss(score,
                                        target,
                                        weight=weight,
                                        reduction='mean',
                                        ignore_index=-1)
                loss_data = loss.data.item()
                if np.isnan(loss_data):
                    raise ValueError('loss is nan while validating')

                val_loss_meter.update(loss_data)

                # if not isinstance(score, tuple):
                #     lbl_pred = score.data.max(1)[1].cpu().numpy()
                # else:
                #     lbl_pred = score[-1].data.max(1)[1].cpu().numpy()

                # lbl_pred, lbl_true = get_multiscale_results(score, target, upsample_logits=False)
                imgs = data.data.cpu()
                if isinstance(score, tuple):
                    lbl_pred = score[-1].data.max(1)[1].cpu().numpy()
                else:
                    lbl_pred = score.data.max(1)[1].cpu().numpy()
                lbl_true = target.data.cpu()
                for img, lt, lp in zip(imgs, lbl_true, lbl_pred):
                    img, lt = self.val_loader.dataset.untransform(img, lt)
                    val_metrics.update(lt, lp)
                    # img = Image.fromarray(img).resize((lt.shape[1], lt.shape[0]), Image.BILINEAR)
                    # img = np.array(img)
                    if len(visualizations) < 9:
                        viz = visualize_segmentation(
                            lbl_pred=lp,
                            lbl_true=lt,
                            img=img,
                            n_classes=self.n_classes,
                            dataloader=self.train_loader)
                        visualizations.append(viz)

        acc, acc_cls, mean_iou, fwavacc, _ = val_metrics.get_scores()
        metrics = [acc, acc_cls, mean_iou, fwavacc]

        print(f'\nEpoch: {self.epoch}',
              f'loss: {val_loss_meter.avg}, mIoU: {mean_iou}')

        out = osp.join(self.out, 'visualization_viz')
        if not osp.exists(out):
            os.makedirs(out)
        out_file = osp.join(out, 'epoch{:0>5d}.jpg'.format(self.epoch))
        scipy.misc.imsave(out_file, get_tile_image(visualizations))

        with open(osp.join(self.out, 'log.csv'), 'a') as f:
            elapsed_time = (datetime.datetime.now(pytz.timezone('UTC')) -
                            self.timestamp_start).total_seconds()
            log = [self.epoch] + [''] * 5 + \
                  [val_loss_meter.avg] + metrics + [elapsed_time]
            log = map(str, log)
            f.write(','.join(log) + '\n')

        mean_iu = metrics[2]
        is_best = mean_iu > self.best_mean_iu
        if is_best:
            self.best_mean_iu = mean_iu
        torch.save(
            {
                'epoch': self.epoch,
                'arch': self.model.__class__.__name__,
                'optim_state_dict': self.optim.state_dict(),
                'model_state_dict': self.model.state_dict(),
                'best_mean_iu': self.best_mean_iu,
            }, osp.join(self.out, 'checkpoint.pth.tar'))
        if is_best:
            shutil.copy(osp.join(self.out, 'checkpoint.pth.tar'),
                        osp.join(self.out, 'model_best.pth.tar'))

        val_loss_meter.reset()
        val_metrics.reset()