Esempio n. 1
0
def test_get_color_bar():
    import boxx
    colors = get_colors([1, 2, 3, 4, 5])
    size = (40, 200)
    is_vertical = False
    img = get_color_bar(colors, size, is_vertical)
    boxx.show(img)
Esempio n. 2
0
    def forward(self, images, features, targets=None):
        """
        Arguments:
            images (ImageList): images for which we want to compute the predictions
            features (list[Tensor]): features computed from the images that are
                used for computing the predictions. Each tensor in the list
                correspond to different feature levels
            targets (list[BoxList]): ground-truth boxes present in the image (optional)

        Returns:
            boxes (list[BoxList]): the predicted boxes from the RPN, one BoxList per
                image.
            losses (dict[Tensor]): the losses for the model during training. During
                testing, it is an empty dict.
        """
        semantics, instances = self.head(features)

        from boxx import show
        show(images.tensors.permute(0, 2, 3,
                                    1)[0].int().cpu().numpy()[:, :, ::-1],
             figsize=(10, 10))

        if self.training:
            return self._forward_train(semantics, instances, targets)
        else:
            return semantics, instances
Esempio n. 3
0
def test_overlay_bboxes():
    import boxx
    img = np.random.random((200, 200))
    bbox = [[10, 10, 40, 40], [20, 100, 50, 150]]
    labels = [1, 3]

    img = overlay_bboxes(img, bbox, labels=labels, colorbar=2)
    boxx.show(img)
Esempio n. 4
0
def _test_psf():
    import boxx
    s = 10
    size = (s, s, s)
    pts_list = [(i, i, i) for i in range(s)]
    for kernel in range(3):
        img = np.zeros(size)
        pts = psf(pts_list, kernel, size=size, as_tuple=True)
        img[pts] = 1
        boxx.show(img)
Esempio n. 5
0
def cropMinAreaRect(img, rect, borderValue=None):
    import cv2
    # rotate img
    angle = rect[2]
    rows,cols = img.shape[0], img.shape[1]
    M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
    img_rot = cv2.warpAffine(img,M,(cols,rows),borderValue=borderValue)
    
    # rotate bounding box
#    rect0 = (rect[0], rect[1], 0.0)
    box = cv2.boxPoints(rect)
    pts = np.int0(cv2.transform(np.array([box]), M))[0]    
    pts[pts < 0] = 0

    # crop
    img_crop = img_rot[pts[1][1]:pts[0][1], 
                       pts[1][0]:pts[2][0]]
    return img_crop

    if 0 == 'test':
        # generate image
        img = np.zeros((1000, 1000), dtype=np.uint8)
        img = cv2.line(img,(400,400),(511,511),1,120)
        img = cv2.line(img,(300,300),(700,500),1,120)
        img = cv2.line(img,(0,0),(1000,100)[::-1],2,200)
        
        # find contours / rectangle
        _,contours,_ = cv2.findContours(img, 1, 1)
        
        # rect = ((middle_point), (w, h), angle)
        rect = cv2.minAreaRect(contours[0])
    #    rect = ((100.0, 499.5), (1999.0, 200.0), -90.0)
        print(rect)
        from boxx import timeit, show
        # crop
        with timeit():
            img_croped = cropMinAreaRect(img, rect)
        show(img)
        show(img_croped)
        input = torch.cat((lidar, img), 1).to(device)
        output = model(input)
        depth_est, _, _, _ = output[0], output[1], output[2], output[3]

        depth_est = torch.clamp(depth_est, 0, args.max_depth)
        torch.cuda.synchronize()
        torch.cuda.empty_cache()
        time_use = time.perf_counter() - time_a

        valid_mask = get_valid_mask(depth_gt, args)
        metrics_tmp, min_is_best_mask, metrics_names = compute_errors_9(
            depth_gt, depth_est, valid_mask)
        # metrics_tmp, min_is_best_mask, metrics_names = compute_errors_3(depth_gt, depth_est, valid_mask)

        print(time_use)
        show(depth_est)

        depth_est = (depth_est * 100).detach().cpu().numpy().squeeze().astype(
            np.uint16).squeeze()
        depth_gt = (depth_gt * 100).detach().cpu().numpy().squeeze().astype(
            np.uint16).squeeze()
        lidar_in = (lidar_in * 100).detach().cpu().numpy().squeeze().astype(
            np.uint16)
        torchvision.utils.save_image(img_in,
                                     os.path.join(args.save_dir, 'rgb.jpg'),
                                     normalize=True,
                                     scale_each=True)
        cv2.imwrite(os.path.join(args.save_dir, 'depth-est.png'), depth_est)
        cv2.imwrite(os.path.join(args.save_dir, 'depth-gt.png'), depth_gt)
        cv2.imwrite(os.path.join(args.save_dir, 'lidar.png'), lidar_in)
    torch.cuda.empty_cache()
Esempio n. 7
0
data_dir = 'hymenoptera_data'
data_dir = '/home/yanglei/tutorial/pytorch/hymenoptera_data'

image_datasets = {
    x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
    for x in ['train', 'val']
}
dataloaders = {
    x: torch.utils.data.DataLoader(image_datasets[x],
                                   batch_size=2,
                                   shuffle=True,
                                   num_workers=0)
    for x in ['train', 'val']
}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes

use_gpu = torch.cuda.is_available()

dataset = image_datasets['train']
dataloader = dataloaders['train']

if __name__ == '__main__':
    from boxx.ylth import *
    from boxx import tree, show
    tree - dataloaders
    show - dataloaders

    from boxx import torgb
    show(image_datasets, torgb)
Esempio n. 8
0
File: show.py Progetto: zbzstar/boxx
import numpy as np
from skimage.io import imread

img = imread('../../test/imgForTest/Lenna.jpg')

from boxx import show
show(img)
imgs = np.array([img, img, img])
show(imgs)
complex_struct = [dict(img=img, imgs=imgs)]
show(complex_struct)

from torch_data import dataloader
dataloader
show(dataloader)

from boxx import loga, torgb
# torgb is a function that
# try to transfer a tensor to normalized RGB image
batch = next(iter(dataloader))[0]
loga(batch)
loga - torgb(batch)

show(dataloader, torgb)
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Webcam Demo")
    parser.add_argument(
        "--config-file",
        default="configs/osis/osis_R_50_FPN_1x.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--weights",
        default="models/distributed_test/model_final.pth",
        metavar="FILE",
        help="path to the trained model",
    )
    parser.add_argument(
        "--images-dir",
        default="demo/images",
        metavar="DIR",
        help="path to demo images directory",
    )
    parser.add_argument(
        "--min-image-size",
        type=int,
        default=800,
        help="Smallest size of the image to feed to the model. "
        "Model was trained with 800, which gives best results",
    )
    parser.add_argument(
        "opts",
        help="Modify model config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    # load config from file and command-line arguments
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.MODEL.WEIGHT = args.weights

    cfg.freeze()

    # The following per-class thresholds are computed by maximizing
    # per-class f-measure in their precision-recall curve.
    # Please see compute_thresholds_for_classes() in coco_eval.py for details.
    thresholds_for_classes = [
        0.49211737513542175, 0.49340692162513733, 0.510103702545166,
        0.4707475006580353, 0.5197340250015259, 0.5007652044296265,
        0.5611110329627991, 0.4639902412891388, 0.4778415560722351,
        0.43332818150520325, 0.6180170178413391, 0.5248752236366272,
        0.5437473654747009, 0.5153843760490417, 0.4194680452346802,
        0.5640717148780823, 0.5087228417396545, 0.5021755695343018,
        0.5307778716087341, 0.4920770823955536, 0.5202335119247437,
        0.5715234279632568, 0.5089765191078186, 0.5422378778457642,
        0.45138806104660034, 0.49631351232528687, 0.4388565421104431,
        0.47193753719329834, 0.47037890553474426, 0.4791252017021179,
        0.45699411630630493, 0.48658522963523865, 0.4580649137496948,
        0.4603237509727478, 0.5243804454803467, 0.5235602855682373,
        0.48501554131507874, 0.5173789858818054, 0.4978085160255432,
        0.4626562297344208, 0.48144686222076416, 0.4889853894710541,
        0.4749937951564789, 0.42273756861686707, 0.47836390137672424,
        0.48752328753471375, 0.44069987535476685, 0.4241463541984558,
        0.5228247046470642, 0.4834112524986267, 0.4538525640964508,
        0.4730372428894043, 0.471712201833725, 0.5180512070655823,
        0.4671719968318939, 0.46602892875671387, 0.47536996006965637,
        0.487352192401886, 0.4771934747695923, 0.45533207058906555,
        0.43941256403923035, 0.5910647511482239, 0.554875910282135,
        0.49752360582351685, 0.6263655424118042, 0.4964958727359772,
        0.5542593002319336, 0.5049241185188293, 0.5306999087333679,
        0.5279538035392761, 0.5708096623420715, 0.524990975856781,
        0.5187852382659912, 0.41242220997810364, 0.5409807562828064,
        0.48504579067230225, 0.47305455803871155, 0.4814004898071289,
        0.42680642008781433, 0.4143834114074707
    ]

    demo_im_names = os.listdir(args.images_dir)

    # prepare object that handles inference plus adds predictions on top of image
    coco_demo = COCODemo(
        cfg,
        confidence_thresholds_for_classes=thresholds_for_classes,
        min_image_size=args.min_image_size)

    for im_name in demo_im_names[:1]:
        img = cv2.imread(os.path.join(args.images_dir, im_name))
        if img is None:
            continue
        start_time = time.time()

        semantics, instances = coco_demo.my_run(img)
        show(img[:, :, ::-1], figsize=(10, 10))

    print("Press any keys to exit ...")

    g()
Esempio n. 10
0
    '''多光谱RGN图像转换为多个植被指数'''
    img = np.array(img)
    vis = []
    for vi_type in vi_types:
        vi = rgn2vi(img, vi_type)
        vis.append(vi)
    vis = np.array(vis)
    vis = vis.transpose((1, 2, 0))
    return vis


if __name__ == '__main__':
    from PIL import Image
    from boxx import show

    #    path = r'E:\pic\jiansanjiang\contrast\RGB\data\test\img\0a54a1b8-b743-4824-8b5e-8e64893b7d64.jpg'
    #    path = r'E:\pic\jiansanjiang\contrast\RGB\img\rgb.jpg'
    path = r'D:\pic\jiansanjiang\contrast\RGN\img\rgn.jpg'
    img = Image.open(path)

    #    vi_types = ['G-R','ExG','ExG2','MExG','ExR','ExR2','VDVI','NGBDI','NGRDI',
    #                'RGRI','GRRI','GBRI','BRRI','RGBVI','ExGR','ExGR2',
    #                'CIVE','CIVE2','VEG','COM','COM2']
    #    vi_types = ['ExG','ExR','VDVI','NGRDI','RGRI','ExGR']
    #    vis = rgb2vis(img, vi_types)
    #    show(vis)

    vi_types = ['NDVI', 'RVI', 'NDWI', 'DVI', 'PVI', 'SAVI']
    vis2 = rgn2vis(img, vi_types)
    show(vis2.transpose((2, 0, 1)))
# depth_proj = depth


crop_h = 352
crop_w = 1216

seq1 = iaa.Sequential([
    iaa.CenterPadToFixedSize(height=crop_h, width=crop_w),  # 保证可crop
    iaa.CenterCropToFixedSize(height=crop_h, width=crop_w),
], random_order=True)

seq2 = iaa.Sequential([
    # iaa.CoarseDropout(0.19, size_px=200),
    iaa.Dropout(1-0.05),
], random_order=True)

img_aug = seq1(image=img)
# depth_aug = seq1(image=depth_proj)
depth_aug = seq1(image=seq2(image=depth_proj))
show(img_aug)
show(depth)
show(depth_aug)

name = '\\nyu-1-kitti.png'
cv2.imwrite(r'D:\pic\KITTI\persudo\data_depth\test_depth_completion_anonymous\image'+name, cv2.cvtColor(img_aug, cv2.COLOR_RGB2BGR))
cv2.imwrite(r'D:\pic\KITTI\persudo\data_depth\test_depth_completion_anonymous\velodyne_raw'+name, depth_aug)
cv2.imwrite(r'D:\pic\KITTI\persudo\data_depth\test_depth_completion_anonymous\groundtruth_depth'+name, depth)

cv2.imwrite(r'D:\pic\KITTI\persudo\data_depth\test_depth_completion_anonymous\image'+name, cv2.cvtColor(img_aug, cv2.COLOR_RGB2BGR))
cv2.imwrite(r'D:\pic\KITTI\persudo\data_depth\test_depth_completion_anonymous\velodyne_raw'+name, d_a)
cv2.imwrite(r'D:\pic\KITTI\persudo\data_depth\test_depth_completion_anonymous\groundtruth_depth'+name, depth_aug)