Пример #1
0
class CalculateMIoU(Callback):
    def __init__(self, nb_class):
        self.nb_class = nb_class

    def _setup_graph(self):
        self.pred = self.trainer.get_predictor(['image'], ['prob'])

    def _before_train(self):
        pass

    def _trigger(self):
        global args
        self.val_ds = get_data('val', args.base_dir, args.meta_dir, 1)
        self.val_ds.reset_state()

        self.stat = MIoUStatistics(self.nb_class)

        for image, label in tqdm(self.val_ds.get_data()):
            label = np.squeeze(label)
            image = np.squeeze(image)
            prediction = predict_scaler(image,
                                        self.pred,
                                        scales=[0.5, 0.75, 1, 1.25, 1.5],
                                        classes=CLASS_NUM,
                                        tile_size=CROP_SIZE,
                                        is_densecrf=False)
            prediction = np.argmax(prediction, axis=2)
            self.stat.feed(prediction, label)

        self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
        self.trainer.monitors.put_scalar("mean_accuracy",
                                         self.stat.mean_accuracy)
        self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
    def _trigger(self):
        global args
        self.val_ds = get_data('val', args.base_dir, args.meta_dir, 1)
        self.val_ds.reset_state()

        self.stat = MIoUStatistics(self.nb_class)

        def mypredictor(input_img):
            # input image: 1*H*W*3
            # output : H*W*C
            output = self.pred(input_img)
            return output[0][0]

        for image, label in tqdm(self.val_ds.get_data()):
            label = np.squeeze(label)
            image = np.squeeze(image)
            prediction = predict_scaler(image,
                                        mypredictor,
                                        scales=[0.5, 0.75, 1, 1.25, 1.5],
                                        classes=CLASS_NUM,
                                        tile_size=CROP_SIZE,
                                        is_densecrf=False)
            prediction = np.argmax(prediction, axis=2)
            self.stat.feed(prediction, label)

        self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
        self.trainer.monitors.put_scalar("mean_accuracy",
                                         self.stat.mean_accuracy)
        self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
def get_validation_miou(model_g, model_f1, model_f2, quick_test=1e10):
    if is_debug == 1:
        quick_test = 2

    logger.info("proceed test on cityscapes val set...")
    model_g.eval()
    model_f1.eval()
    model_f2.eval()



    val_img_transform = Compose([
        Scale(train_img_shape, Image.BILINEAR),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),

    ])
    val_label_transform = Compose([Scale(cityscapes_image_shape, Image.NEAREST),
                               # ToTensor()
                               ])

    #notice here, training, validation size difference, this is very tricky.

    target_loader = data.DataLoader(get_dataset(dataset_name="city16", split="val",
        img_transform=val_img_transform,label_transform=val_label_transform, test=True, input_ch=3),
                                    batch_size=1, pin_memory=True)

    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(args.n_class)

    interp = torch.nn.Upsample(size=(cityscapes_image_shape[1], cityscapes_image_shape[0]), mode='bilinear')

    for index, (origin_imgs, labels, paths) in tqdm(enumerate(target_loader)):
        if index > quick_test: break
        path = paths[0]
        imgs = Variable(origin_imgs.cuda(), volatile=True)

        feature = model_g(imgs)
        outputs = model_f1(feature)

        if args.use_f2:
            outputs += model_f2(feature)


        pred = interp(outputs)[0, :].data.max(0)[1].cpu()

        feed_predict = np.squeeze(np.uint8(pred.numpy()))
        feed_label = np.squeeze(np.asarray(labels.numpy()))


        stat.feed(feed_predict, feed_label)

    logger.info("tensorpack IoU16: {}".format(stat.mIoU_beautify))
    logger.info("tensorpack mIoU16: {}".format(stat.mIoU))
    model_g.train()
    model_f1.train()
    model_f2.train()

    return stat.mIoU
def proceed_test(model_g, model_f1, model_f2, quick_test=1e10):
    logger.info("proceed test on cityscapes val set...")
    model_g.eval()
    model_f1.eval()
    model_f2.eval()

    test_img_shape = (2048, 1024)

    val_img_transform = Compose([
        Scale(test_img_shape, Image.BILINEAR),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),
    ])
    val_label_transform = Compose([
        Scale(test_img_shape, Image.BILINEAR),
        # ToTensor()
    ])

    target_loader = data.DataLoader(get_dataset(
        dataset_name="city16",
        split="val",
        img_transform=val_img_transform,
        label_transform=val_label_transform,
        test=True,
        input_ch=3),
                                    batch_size=1,
                                    pin_memory=True)

    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(args.n_class)

    for index, (origin_imgs, labels, paths) in tqdm(enumerate(target_loader)):
        if index > quick_test: break
        path = paths[0]
        # if index > 10: break
        imgs = Variable(origin_imgs.cuda(), volatile=True)

        feature = model_g(imgs)
        outputs = model_f1(feature)

        if args.use_f2:
            outputs += model_f2(feature)

        pred = outputs[0, :].data.max(0)[1].cpu()

        feed_predict = np.squeeze(np.uint8(pred.numpy()))
        feed_label = np.squeeze(np.asarray(labels.numpy()))

        stat.feed(feed_predict, feed_label)

    logger.info("tensorpack mIoU: {}".format(stat.mIoU))
    logger.info("tensorpack mean_accuracy: {}".format(stat.mean_accuracy))
    logger.info("tensorpack accuracy: {}".format(stat.accuracy))
    model_g.train()
    model_f1.train()
    model_f2.train()
def proceed_validation(args, is_save=True, is_densecrf=False):
    import cv2
    #name = "ningbo_val"
    name = "val"
    ds = dataset.PSSD(args.base_dir, args.meta_dir, name)
    ds = BatchData(ds, 1)

    pred_config = PredictConfig(model=Model(),
                                session_init=get_model_loader(args.load),
                                input_names=['image'],
                                output_names=['prob'])
    predictor = OfflinePredictor(pred_config)
    from tensorpack.utils.fs import mkdir_p
    result_dir = "result/pssd_apr26"
    #result_dir = "ningbo_validation"
    mkdir_p(result_dir)
    i = 1
    stat = MIoUStatistics(CLASS_NUM)
    logger.info("start validation....")
    for image, label in tqdm(ds.get_data()):
        label = np.squeeze(label)
        image = np.squeeze(image)

        def mypredictor(input_img):
            #input image: 1*H*W*3
            #output : H*W*C
            output = predictor(input_img)
            return output[0][0]

        prediction = predict_scaler(image,
                                    mypredictor,
                                    scales=[0.5, 0.75, 1, 1.25, 1.5],
                                    classes=CLASS_NUM,
                                    tile_size=CROP_SIZE,
                                    is_densecrf=is_densecrf)
        prediction = np.argmax(prediction, axis=2)
        stat.feed(prediction, label)

        if is_save:
            cv2.imwrite(
                os.path.join(result_dir, "{}.png".format(i)),
                np.concatenate((image, visualize_label(label),
                                visualize_label(prediction)),
                               axis=1))
            #imwrite_grid(image,label,prediction, border=512, prefix_dir=result_dir, imageId = i)
        i += 1

    logger.info("mIoU: {}".format(stat.mIoU))
    logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
    logger.info("accuracy: {}".format(stat.accuracy))
def proceed_test(model, input_size, quick_test=1e10):
    logger.info("proceed test on cityscapes val set...")
    model.eval()
    model.cuda()
    testloader = data.DataLoader(cityscapesDataSet(crop_size=input_size,
                                                   mean=IMG_MEAN,
                                                   scale=False,
                                                   mirror=False,
                                                   set="val"),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(cityscape_image_size[1],
                               cityscape_image_size[0]),
                         mode='bilinear')

    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(NUM_CLASSES)

    for index, batch in tqdm(enumerate(testloader), desc="validation"):
        if index > quick_test: break

        image, label, _, name = batch
        image, label = Variable(image, volatile=True), Variable(label)

        output2 = model(image.cuda())  #(1,19,129,257)
        output = interp(output2).cpu().data[0].numpy()
        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        stat.feed(output, label.data.cpu().numpy().squeeze())

    miou16 = np.sum(stat.IoU) / 16
    print("tensorpack class16 IoU with: {}".format(miou16))
    model.train()
    return miou16
Пример #7
0
def main():
    """Create the model and start the evaluation process."""
    import warnings
    if not sys.warnoptions:
        warnings.simplefilter("ignore")

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    model = Res_Deeplab(num_classes=args.num_classes)
    #from pytorchgo.model.MyFCN8s import MyFCN8s
    #model = MyFCN8s(n_class=NUM_CLASSES)

    if args.restore_from[:4] == 'http' :
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict['model_state_dict'])

    model.eval()
    model.cuda(gpu0)

    image_size = (1024, 512)#(1280,720) #(2048, 1024)
    cityscape_image_size = (2048, 1024)

    print ("evaluating {}".format(args.restore_from))
    print ("************ best mIoU:{} *******".format(saved_state_dict['best_mean_iu']))
    print("evaluation image size: {}, please make sure this image size is equal to your training image size, this is important for your final mIoU!".format(image_size))

    testloader = data.DataLoader(cityscapesDataSet( crop_size=(image_size[0], image_size[1]), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),
                                    batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(cityscape_image_size[1], cityscape_image_size[0]), mode='bilinear')

    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(NUM_CLASSES)

    for index, batch in tqdm(enumerate(testloader)):
        image,label, _, name = batch
        image, label = Variable(image, volatile=True), Variable(label)

        #output2 = model(image.cuda(gpu0))
        output1, output2 = model(image.cuda(gpu0))
        output = interp(output2).cpu().data[0].numpy()

        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)


        name = name[0].split('/')[-1]
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))

        stat.feed(output, label.data.cpu().numpy().squeeze())

    print("tensorpack IoU: {}".format(stat.mIoU_beautify))
    print("tensorpack class16 IoU: {}".format(np.sum(stat.IoU)/16))
    print("tensorpack mIoU: {}".format(stat.mIoU))
    print("tensorpack mean_accuracy: {}".format(stat.mean_accuracy))
    print("tensorpack accuracy: {}".format(stat.accuracy))
Пример #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataroot',
                        default='/home/hutao/lab/pytorchgo/example/ROAD/data',
                        help='Path to source dataset')
    parser.add_argument(
        '--model_file',
        default=
        'train_log/reborn.vgg16.lr1e-5.w1_10_1.sm_bugfix.class16.adapSegnet_DC.1024x512.wgan.d_mse.dstep1/model_best.pth.tar',
        help='Model path')
    parser.add_argument('--gpu', type=int, default=3)
    parser.add_argument('--method',
                        default='LSD',
                        help="Method to use for training | LSD, sourceonly")
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    model_file = args.model_file

    image_size = [2048, 1024]
    dset = 'cityscapes'
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes(
        dset,
        args.dataroot,
        split='val',
        transform=True,
        image_size=image_size),
                                             batch_size=1,
                                             shuffle=False)

    # Defining and loading model

    n_class = 16
    if args.method == 'sourceonly':
        model = torchfcn.models.FCN8s_sourceonly(n_class=n_class)
    elif args.method == 'LSD':
        model = torchfcn.models.Seg_model(n_class=n_class)
    else:
        raise ValueError(
            'Invalid argument for method specified - Should be LSD or sourceonly'
        )

    if torch.cuda.is_available():
        model = model.cuda()
    print('==> Loading %s model file: %s' %
          (model.__class__.__name__, model_file))

    model_data = torch.load(args.model_file)
    print "best mean iou in training: {}".format(model_data['best_mean_iu'])
    try:
        model.load_state_dict(model_data)
    except Exception:
        model.load_state_dict(model_data['model_state_dict'])
    model.eval()

    # Evaluation

    print('==> Evaluating with CityScapes validation')
    visualizations = []
    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(n_class)

    label_trues, label_preds = [], []
    for batch_idx, (data, target) in tqdm.tqdm(enumerate(val_loader),
                                               total=len(val_loader),
                                               ncols=80,
                                               leave=False):
        if torch.cuda.is_available():
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        target_np = target.data.cpu().numpy()
        if args.method == 'sourceonly':
            score = model(data)
        elif args.method == 'LSD':
            score = model(data)
        lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :].squeeze()

        if dset == 'cityscapes':
            lbl_pred_new = transform_label(lbl_pred, (2048, 1024))

        label_trues.append(target.data.cpu().numpy().squeeze())
        label_preds.append(lbl_pred_new.squeeze())
        stat.feed(label_preds[-1], label_trues[-1])

    print("tensorpack mIoU: {}".format(stat.mIoU))
    print("tensorpack mean_accuracy: {}".format(stat.mean_accuracy))
    print("tensorpack accuracy: {}".format(stat.accuracy))

    # Computing mIoU
    json_path = osp.join(args.dataroot, 'cityscapes_info.json')
    compute_mIoU(label_preds, label_trues, json_path)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--dataroot',
        default='/home/hutao/lab/pytorchgo/example/LSD-seg/data',
        help='Path to source dataset')
    parser.add_argument('--model_file',
                        default='train_log/train.class16/model_best.pth.tar',
                        help='Model path')
    parser.add_argument('--gpu', type=int, default=4)
    parser.add_argument('--method',
                        default='LSD',
                        help="Method to use for training | LSD, sourceonly")
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    #torch.cuda.set_device(args.gpu)

    model_file = args.model_file

    n_class = 16
    print(
        "warning!!!!!!!!! n_class: {}, if you use train.class16, please change class num to 16!!!!"
        .format(n_class))

    image_size = [640, 320]
    print("image size: {}".format(image_size))
    dset = 'cityscapes'
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes(
        dset,
        args.dataroot,
        class_num=n_class,
        split='val',
        transform=True,
        image_size=image_size),
                                             batch_size=1,
                                             shuffle=False)

    # Defining and loading model

    if args.method == 'sourceonly':
        model = torchfcn.models.FCN8s_sourceonly(n_class=n_class)
    elif args.method == 'LSD':
        model = torchfcn.models.FCN8s_LSD(n_class=n_class)
    else:
        raise ValueError(
            'Invalid argument for method specified - Should be LSD or sourceonly'
        )

    if torch.cuda.is_available():
        model = model.cuda()
    print('==> Loading %s model file: %s' %
          (model.__class__.__name__, model_file))

    model_data = torch.load(args.model_file)
    try:
        model.load_state_dict(model_data)
    except Exception:
        print("model load exception..")
        model.load_state_dict(model_data['model_state_dict'])
    model.eval()

    # Evaluation
    print("best_mean_iu: {}".format(model_data['best_mean_iu']))

    print('==> Evaluating with CityScapes validation')
    visualizations = []
    from tensorpack.utils.stats import MIoUStatistics
    stat = MIoUStatistics(n_class)

    label_trues, label_preds = [], []
    for batch_idx, (data, target) in tqdm.tqdm(
            enumerate(val_loader),
            total=len(val_loader),
    ):
        if torch.cuda.is_available():
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data, volatile=True), Variable(target)
        target_np = target.data.cpu().numpy()
        if args.method == 'sourceonly':
            score = model(data)
        elif args.method == 'LSD':
            score, __, __, __ = model(data)
        lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :].squeeze()

        if dset == 'cityscapes':
            lbl_pred_new = transform_label(lbl_pred, (2048, 1024))

        label_trues.append(target.data.cpu().numpy().squeeze())
        label_preds.append(lbl_pred_new.squeeze())
        stat.feed(label_preds[-1], label_trues[-1])

    print("tensorpack mIoU16: {}".format(np.sum(stat.IoU) / 16))
    print("tensorpack mean_accuracy: {}".format(stat.mean_accuracy))
    print("tensorpack accuracy: {}".format(stat.accuracy))

    # Computing mIoU
    json_path = osp.join(args.dataroot, 'cityscapes_info.json')
    compute_mIoU(label_preds, label_trues, json_path)
Пример #10
0
if args.use_f2:
    F2.load_state_dict(checkpoint['f2_state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.trained_checkpoint,
                                                    checkpoint['epoch']))

G.eval()
F1.eval()
F2.eval()

if torch.cuda.is_available():
    G.cuda()
    F1.cuda()
    F2.cuda()

from tensorpack.utils.stats import MIoUStatistics
stat = MIoUStatistics(nb_classes=16, ignore_label=255)
for index, (origin_imgs, labels, paths) in tqdm(enumerate(target_loader)):
    path = paths[0]
    #if index > 10: break

    imgs = Variable(origin_imgs)
    if torch.cuda.is_available():
        imgs = imgs.cuda()

    feature = G(imgs)
    outputs = F1(feature)

    if args.use_f2:
        outputs += F2(feature)

    if args.saves_prob: