def proceed_validation(args, is_save=True, is_densecrf=False):
    import cv2
    #name = "ningbo_val"
    name = "val"
    ds = dataset.PSSD(args.base_dir, args.meta_dir, name)
    ds = BatchData(ds, 1)

    pred_config = PredictConfig(model=Model(),
                                session_init=get_model_loader(args.load),
                                input_names=['image'],
                                output_names=['prob'])
    predictor = OfflinePredictor(pred_config)
    from tensorpack.utils.fs import mkdir_p
    result_dir = "result/pssd_apr26"
    #result_dir = "ningbo_validation"
    mkdir_p(result_dir)
    i = 1
    stat = MIoUStatistics(CLASS_NUM)
    logger.info("start validation....")
    for image, label in tqdm(ds.get_data()):
        label = np.squeeze(label)
        image = np.squeeze(image)

        def mypredictor(input_img):
            #input image: 1*H*W*3
            #output : H*W*C
            output = predictor(input_img)
            return output[0][0]

        prediction = predict_scaler(image,
                                    mypredictor,
                                    scales=[0.5, 0.75, 1, 1.25, 1.5],
                                    classes=CLASS_NUM,
                                    tile_size=CROP_SIZE,
                                    is_densecrf=is_densecrf)
        prediction = np.argmax(prediction, axis=2)
        stat.feed(prediction, label)

        if is_save:
            cv2.imwrite(
                os.path.join(result_dir, "{}.png".format(i)),
                np.concatenate((image, visualize_label(label),
                                visualize_label(prediction)),
                               axis=1))
            #imwrite_grid(image,label,prediction, border=512, prefix_dir=result_dir, imageId = i)
        i += 1

    logger.info("mIoU: {}".format(stat.mIoU))
    logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
    logger.info("accuracy: {}".format(stat.accuracy))
Exemple #2
0
def generate_trimap_pascal(rador=1):
    #main_img_dir = "/data_a/dataset/cityscapes"
    #meta_txt = "cityscapes"

    main_img_dir = "/data_a/dataset/pascalvoc2012/VOC2012trainval/VOCdevkit/VOC2012"
    meta_txt = "pascalvoc12"

    from tensorpack.utils.fs import mkdir_p
    trimap_dir = os.path.join(main_img_dir, "trimap_gt{}".format(rador))
    mkdir_p(trimap_dir)
    print(trimap_dir)
    f = open(os.path.join(meta_txt, "train.txt"))
    result_f = open(
        os.path.join(meta_txt, "train_tripmap{}.txt".format(rador)), "w")
    lines = f.readlines()
    from tqdm import tqdm
    for l in tqdm(lines):
        l = l.strip("\n")
        img_dir, label_dir = l.split(" ")
        img = cv2.imread(os.path.join(main_img_dir, img_dir))
        label = cv2.imread(os.path.join(main_img_dir, label_dir), 0)
        new_label = label.copy()
        basename = os.path.basename(label_dir)
        #edge = cv2.Canny(label, 100, 200).astype("float32")
        #xs,ys = np.where(edge==255)
        w, h = label.shape
        for x in range(w):
            for y in range(h):
                if is_edge(x, y, label):
                    new_label[x - rador:x + rador, y - rador:y + rador] = 255

        tripmap_name = os.path.join(trimap_dir, basename)

        cv2.imshow("im", img / 255.0)
        cv2.imshow("raw-originlabel", label)
        cv2.imshow("color-originlabel", visualize_label(label))
        cv2.imshow("raw-newlabel", new_label)
        cv2.imshow("color-newlabel", visualize_label(new_label))
        cv2.waitKey(0)

        #cv2.imwrite(tripmap_name, new_label)
        result_f.write("{} {}\n".format(img_dir, tripmap_name))
    f.close()
    result_f.close()
def view_data(base_dir, meta_dir, batch_size):
    ds = RepeatedData(get_data('train', base_dir, meta_dir, batch_size), -1)
    ds.reset_state()
    for ims, labels in ds.get_data():
        for im, label in zip(ims, labels):
            #aa = visualize_label(label)
            #pass
            cv2.imshow("im", im / 255.0)
            cv2.imshow("raw-label", label)
            cv2.imshow("color-label", visualize_label(label))
            cv2.waitKey(0)
Exemple #4
0
def proceed(detection_json):
    _coco = COCO(detection_json)
    result = _coco.getCatIds()
    catToImgs = _coco.catToImgs
    imgToAnns = _coco.imgToAnns
    img_ids_set = set()
    for id, coco_id in enumerate(voc_ids_list):
        img_ids_set = img_ids_set | set(catToImgs[coco_id])
    img_ids_list = list(img_ids_set)

    for img_id in tqdm(img_ids_list[1:]):
        img = _coco.loadImgs(img_id)[0]
        origin_img = cv2.imread(os.path.join(val_dir, img['file_name']))
        annIds = _coco.getAnnIds(imgIds=img_id)
        img_mask = np.zeros((img['height'], img['width'], 1), dtype=np.uint8)

        for annId in annIds:
            ann = _coco.loadAnns(annId)[0]
            if ann['category_id'] in voc_ids_set:
                # polygon
                if type(ann['segmentation']) == list:
                    for _instance in ann['segmentation']:
                        rle = mask.frPyObjects([_instance], img['height'],
                                               img['width'])
                # mask
                else:  # mostly is aeroplane
                    if type(ann['segmentation']['counts']) == list:
                        rle = mask.frPyObjects([ann['segmentation']],
                                               img['height'], img['width'])
                    else:
                        rle = [ann['segmentation']]
                m = mask.decode(rle)
                img_mask[np.where(
                    m == 1)] = coco_to_voc_dict[ann['category_id']]

        f.write("{} {}\n".format(
            os.path.join(train_prefix_dir, img['file_name']),
            os.path.join(val_prefix_dir,
                         img['file_name'].replace("jpg", "png"))))

        cv2.imwrite(
            os.path.join(pasalvoc_root, train_prefix_dir, img['file_name']),
            origin_img)
        cv2.imwrite(
            os.path.join(pasalvoc_root, val_prefix_dir,
                         img['file_name'].replace("jpg", "png")),
            img_mask[:, :, 0])  #single channel

        if False:
            cv2.imwrite("cv.jpg", origin_img)
            cv2.imwrite("mask.jpg", img_mask)
            cv2.imshow("im", origin_img)
            cv2.imshow("color-label", visualize_label(img_mask[:, :, 0]))
            cv2.waitKey(0)
Exemple #5
0
def vis_seg(imgs, labels, waitkey=10000):
    import cv2
    from tensorpack.utils.segmentation.segmentation import predict_slider, visualize_label, predict_scaler
    img = torchvision.utils.make_grid(imgs).numpy()
    img = np.transpose(img, (1, 2, 0))
    img = img[:, :, ::-1] + 128

    label = torchvision.utils.make_grid(labels.unsqueeze(1)).numpy()
    label = np.transpose(label, (1, 2, 0))
    # plt.imshow(img)
    # plt.show()
    cv2.imshow("source image", img.astype(np.uint8))
    cv2.imshow("source label", visualize_label(label[:, :, 0]))
    cv2.waitKey(waitkey)
def proceed_test_dir(args):
    import cv2
    ll = os.listdir(args.test_dir)

    pred_config = PredictConfig(model=Model(),
                                session_init=get_model_loader(args.load),
                                input_names=['image'],
                                output_names=['prob'])
    predictor = OfflinePredictor(pred_config)

    from tensorpack.utils.fs import mkdir_p
    result_dir = "test-from-dir"
    visual_dir = os.path.join(result_dir, "visualization")
    final_dir = os.path.join(result_dir, "final")
    import shutil
    shutil.rmtree(result_dir, ignore_errors=True)
    mkdir_p(result_dir)
    mkdir_p(visual_dir)
    mkdir_p(final_dir)

    logger.info("start validation....")

    def mypredictor(input_img):
        # input image: 1*H*W*3
        # output : H*W*C
        output = predictor(input_img[np.newaxis, :, :, :])
        return output[0][0]

    for i in tqdm(range(len(ll))):
        filename = ll[i]
        image = cv2.imread(os.path.join(args.test_dir, filename))
        prediction = predict_scaler(image,
                                    mypredictor,
                                    scales=[0.5, 0.75, 1, 1.25, 1.5],
                                    classes=CLASS_NUM,
                                    tile_size=CROP_SIZE,
                                    is_densecrf=False)
        prediction = np.argmax(prediction, axis=2)
        cv2.imwrite(os.path.join(final_dir, "{}".format(filename)), prediction)
        cv2.imwrite(
            os.path.join(visual_dir, "{}".format(filename)),
            np.concatenate((image, visualize_label(prediction)), axis=1))
def view_data(base_dir, meta_dir, batch_size):
    ds = RepeatedData(get_data('train', base_dir, meta_dir, batch_size), -1)
    ds.reset_state()
    from tensorpack.utils.fs import mkdir_p
    result_dir = "result/view"
    #result_dir = "ningbo_validation"
    mkdir_p(result_dir)
    i = 0
    for ims, labels in ds.get_data():
        for im, label in zip(ims, labels):
            #aa = visualize_label(label)
            #pass
            #cv2.imshow("im", im / 255.0)
            #cv2.imshow("raw-label", label)
            #cv2.imshow("color-label", visualize_label(label))
            cv2.imwrite(os.path.join(result_dir, "{}.png".format(i)),
                        np.concatenate((im, visualize_label(label)), axis=1))
            #cv2.waitKey(0)
            i += 1
            print i
        image -= self.mean
        image = image.transpose((2, 0, 1))

        return image.copy(), label.copy(), np.array(size), name


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    dst = SynthiaDataSet(
        root="../data/SYNTHIA",
        list_path="synthia_list/SYNTHIA_imagelist_train.txt",
        label_list_path="synthia_list/SYNTHIA_labellist_train.txt")
    trainloader = data.DataLoader(dst, batch_size=4)

    for i, data in enumerate(trainloader):
        imgs, labels, size, name = data

        import cv2
        from tensorpack.utils.segmentation.segmentation import predict_slider, visualize_label, predict_scaler
        img = torchvision.utils.make_grid(imgs).numpy()
        img = np.transpose(img, (1, 2, 0))
        img = img[:, :, ::-1] + 128

        label = torchvision.utils.make_grid(labels.unsqueeze(1)).numpy()
        label = np.transpose(label, (1, 2, 0))
        #plt.imshow(img)
        #plt.show()
        cv2.imshow("source image", img.astype(np.uint8))
        cv2.imshow("source label", visualize_label(label[:, :, 0]))
        cv2.waitKey(10000)
Exemple #9
0
        else:
            im = im.resize((data_size[0], data_size[1]), Image.LANCZOS)
            im_ = np.array(im, dtype=np.float64)
            label_= np.array(label, dtype=np.int32)

        return im_, label_



if __name__ == '__main__':
    from tensorpack.utils.segmentation.segmentation import predict_slider, visualize_label, predict_scaler
    def get_data(ds, idx):
        data_file = ds.files[ds.split][idx]
        img, label = ds.image_label_loader(data_file['img'], data_file['lbl'], ds.image_size, random_crop=True)
        return img, label
    dataset = SYNTHIA('SYNTHIA', '/home/hutao/lab/pytorchgo/example/LSD-seg/data', split='train', transform=True, image_size=[640, 320])
    cs = CityScapes('cityscapes', '/home/hutao/lab/pytorchgo/example/LSD-seg/data', split='train', transform=True,
                      image_size=[640, 320])

    for i in range(len(dataset)):
        img,label = get_data(dataset, i)
        cs_img, cs_label= get_data(cs, i)
        print np.unique(label)
        cv2.imshow("source image",img.astype(np.uint8))
        cv2.imshow("source label",visualize_label(label))
        cv2.imshow("target image",cs_img.astype(np.uint8))
        cv2.imshow("target label", visualize_label(cs_label))
        cv2.waitKey(10000)