def load_timg(file_name):
    """Loads the image with OpenCV and converts to torch.Tensor."""
    assert os.path.isfile(file_name), f"Invalid file {file_name}"  # nosec
    # load image with OpenCV
    img = cv2.imread(file_name, cv2.IMREAD_COLOR)
    # convert image to torch tensor
    tensor = K.image_to_tensor(img, None).float() / 255.
    return K.color.bgr_to_rgb(tensor)
Beispiel #2
0
def my_app(args):
    # select the device
    device = torch.device('cpu')
    if args.cuda and torch.cuda.is_available():
        device = torch.device('cuda:0')

    # load the image and scale
    img_raw = cv2.imread(args.image_file, cv2.IMREAD_COLOR)
    img_raw = scale_image(img_raw, args.image_size)

    # preprocess
    img = K.image_to_tensor(img_raw, keepdim=False).to(device)
    img = K.color.bgr_to_rgb(img.float())

    # create the detector and find the faces !
    face_detection = FaceDetector().to(device)

    with torch.no_grad():
        dets = face_detection(img)
    dets = [FaceDetectorResult(o) for o in dets]

    # show image

    img_vis = img_raw.copy()

    for b in dets:
        if b.score < args.vis_threshold:
            continue

        # draw face bounding box
        img_vis = cv2.rectangle(img_vis,
                                b.top_left.int().tolist(),
                                b.bottom_right.int().tolist(), (0, 255, 0), 4)

        if args.blur_faces:
            apply_blur_face(img, img_vis, b)

        if args.vis_keypoints:
            # draw facial keypoints
            img_vis = draw_keypoint(img_vis, b, FaceKeypoint.EYE_LEFT)
            img_vis = draw_keypoint(img_vis, b, FaceKeypoint.EYE_RIGHT)
            img_vis = draw_keypoint(img_vis, b, FaceKeypoint.NOSE)
            img_vis = draw_keypoint(img_vis, b, FaceKeypoint.MOUTH_LEFT)
            img_vis = draw_keypoint(img_vis, b, FaceKeypoint.MOUTH_RIGHT)

            # draw the text score
            cx = int(b.xmin)
            cy = int(b.ymin + 12)
            img_vis = cv2.putText(img_vis, f"{b.score:.2f}", (cx, cy),
                                  cv2.FONT_HERSHEY_DUPLEX, 0.5,
                                  (255, 255, 255))

    # save and show image
    cv2.imwrite(args.image_out, img_vis)

    cv2.namedWindow('face_detection', cv2.WINDOW_NORMAL)
    cv2.imshow('face_detection', img_vis)
    cv2.waitKey(0)
Beispiel #3
0
    def generate_patch(self, size=(300, 400)):
        """
        Generate a random patch as a starting point for optimization.
        """
        # adv_patch_cpu = (mu + sigma * torch.randn(3, size[0], size[1])).clamp(0,255)
        adv_patch_cpu = cv2.resize(cv2.imread('data/patchnew0.jpg'), (size[1], size[0])) # W, H
        adv_patch_cpu = kornia.image_to_tensor(patch).to(torch.float)

        return adv_patch_cpu
    def _process_img(self, image):
        # resized_image = cv2.resize(image, (self.m.width, self.m.height))
        img = kornia.image_to_tensor(image).to(self.device).type(self.dtype)
        img = kornia.bgr_to_rgb(img)
        img = img.div(255.0).unsqueeze(0)

        resized_image = F.interpolate(img,
                                      size=(self.m.width, self.m.height),
                                      mode='nearest')
        return resized_image
Beispiel #5
0
    def __getitem__(self, idx):

        # Horizontal flip (Until Kornia will handle videos
        hflip = random.random() < 0.5 if self.opt.hflip else False

        images = self.generate_image(self.opt.scale_idx)
        images = K.image_to_tensor(images).float()
        images = images / 255  # Set range [0, 1]
        images_transformed = self._get_transformed_images(images, hflip)

        # Extract o-level index
        if self.opt.scale_idx > 0:
            images_zero_scale = self.generate_image(0)
            images_zero_scale = K.image_to_tensor(images_zero_scale).float()
            images_zero_scale = images_zero_scale / 255
            images_zero_scale_transformed = self._get_transformed_images(images_zero_scale, hflip)

            return [images_transformed, images_zero_scale_transformed]

        return images_transformed
Beispiel #6
0
def load_data_to_gpu(batch_dict):
    for key, val in batch_dict.items():
        if not isinstance(val, np.ndarray):
            continue
        if key in ['frame_id', 'metadata', 'calib']:
            continue
        if key in ['images']:
            batch_dict[key] = kornia.image_to_tensor(val).float().cuda()
        elif key in ['image_shape']:
            batch_dict[key] = torch.from_numpy(val).int().cuda()
        else:
            batch_dict[key] = torch.from_numpy(val).float().cuda()
Beispiel #7
0
    def test_rgb_to_hls(self, device):

        data = torch.rand(3, 5, 5).to(device)

        # OpenCV
        data_cv = kornia.tensor_to_image(data.clone())

        expected = cv2.cvtColor(data_cv, cv2.COLOR_RGB2HLS)
        expected = kornia.image_to_tensor(expected, True).to(device)
        expected[0] = 2 * math.pi * expected[0] / 360.

        f = kornia.color.RgbToHls()
        assert_allclose(f(data), expected)
Beispiel #8
0
def tracker_init(im, target_pos, target_sz, model, device='cpu'):
    state = dict()
    state['im_h'] = im.shape[0]
    state['im_w'] = im.shape[1]
 
    # initialize the exemplar
    model.template(kornia.image_to_tensor(im).to(device).float(), 
                   torch.from_numpy(target_pos).to(device),
                   torch.from_numpy(target_sz).to(device) )
    
    state['target_pos'] = target_pos
    state['target_sz'] = target_sz

    return state
Beispiel #9
0
    def gen_xcrop(self):
        # state = self.state

        # im_pert_template = state['im_pert_template']
        # pert_sz_ratio = state['pert_sz_ratio']
        # p = state['p']

        # im_shape = state['im'].shape[0:2]
        # bbox_pert_xcrop = scale_bbox(state['gts'][state['n_frame']], pert_sz_ratio)
        # mask_xcrop = get_bbox_mask(shape=im_shape, bbox=bbox_pert_xcrop, mode='tensor').to(state['device'])

        # bbox_pert_temp = scale_bbox(state['gts'][0], pert_sz_ratio)
        # bbox_pert_xcrop = scale_bbox(state['gts'][state['n_frame']], pert_sz_ratio)
        # im_pert_warped = warp(im_pert_template, bbox_pert_temp, bbox_pert_xcrop)
        # im_xcrop = kornia.image_to_tensor(state['im']).to(state['device'])
        # im_pert_xcrop = im_xcrop * (1-mask_xcrop) + im_pert_warped * mask_xcrop

        # x_crop = test.get_subwindow_tracking_(im_pert_xcrop, state['target_pos'], p.instance_size, round(self.s_x), 0)

        state = self.state
        pert = state['pert']
        pert_sz_ratio = state['pert_sz_ratio']
        p = state['p']
        device = state['device']

        im_shape = state['im'].shape[0:2]
        bbox_pert_xcrop = scale_bbox(state['gts'][state['n_frame']],
                                     pert_sz_ratio)
        mask_xcrop = get_bbox_mask(shape=im_shape,
                                   bbox=bbox_pert_xcrop,
                                   mode='tensor').to(device)

        bbox_pert_xcrop = torch.tensor(bbox_pert_xcrop).unsqueeze(
            dim=0).to(device)
        im_xcrop = kornia.image_to_tensor(state['im']).to(
            torch.float).unsqueeze(dim=0).to(device)
        patch_warped_search = warp_patch(pert, im_xcrop, bbox_pert_xcrop)
        patch_search = torch.where(mask_xcrop == 1, patch_warped_search,
                                   im_xcrop)
        x_crop = test.get_subwindow_tracking_(patch_search,
                                              state['target_pos'],
                                              p.instance_size, round(self.s_x),
                                              0)

        cv2.imshow('template',
                   kornia.tensor_to_image(state['pert_template'].byte()))
        cv2.imshow('x_crop', kornia.tensor_to_image(x_crop.byte()))
        cv2.waitKey(1)

        return state['pert_template'], x_crop
Beispiel #10
0
def get_local_descriptors(img, cv2_sift_kpts, kornia_descriptor, aff):
    #We will not train anything, so let's save time and memory by no_grad()
    with torch.no_grad():
        timg = K.color.rgb_to_grayscale(K.image_to_tensor(img, False)) / 255.
        timg = timg.cuda()
        lafs = laf_from_opencv_SIFT_kpts(cv2_sift_kpts).cuda()
        angles = KF.laf.get_laf_orientation(lafs)
        # We will estimate affine shape of the feature and re-orient the keypoints with the OriNet
        lafs_new = aff(lafs, timg)
        patches = KF.extract_patches_from_pyramid(timg, lafs_new, 32)
        B, N, CH, H, W = patches.size()
        # Descriptor accepts standard tensor [B, CH, H, W], while patches are [B, N, CH, H, W] shape
        # So we need to reshape a bit :)
        descs = kornia_descriptor(patches.view(B * N, CH, H,
                                               W)).view(B * N, -1)
    return descs.detach().cpu().numpy()
Beispiel #11
0
    def test_batch_rgb_to_hls(self, device):

        data = torch.rand(3, 5, 5).to(device)

        # OpenCV
        data_cv = kornia.tensor_to_image(data.clone())

        expected = cv2.cvtColor(data_cv, cv2.COLOR_RGB2HLS)
        expected = kornia.image_to_tensor(expected, False).to(device)
        expected[:, 0] = 2 * math.pi * expected[:, 0] / 360.

        # Kornia
        f = kornia.color.RgbToHls()

        data = data.repeat(2, 1, 1, 1)  # 2x3x5x5
        expected = expected.repeat(2, 1, 1, 1)  # 2x3x5x5
        assert_allclose(f(data), expected)
Beispiel #12
0
def main():
    try:
        img_bgr: np.ndarray = cv2.imread('model8.png', cv2.IMREAD_COLOR)
        x_bgr: torch.Tensor = kornia.image_to_tensor(img_bgr)
        x_rgb: torch.Tensor = kornia.bgr_to_rgb(x_bgr)
        x_rgb = x_rgb.expand(2, -1, -1, -1)
        x_rgb = x_rgb.float() / 255.

        imshow(x_rgb)
        # Box Blur
        x_blur: torch.Tensor = kornia.box_blur(x_rgb, (9, 9))
        imshow(x_blur)
        # Median Blur
        x_blur: torch.Tensor = kornia.median_blur(x_rgb, (5, 5))
        imshow(x_blur)
        # Gaussian Blur
        x_blur: torch.Tensor = kornia.gaussian_blur2d(x_rgb, (11, 11), (11., 11.))
        imshow(x_blur)

    except:
        print("Error found")
 def extract_features(self, im):
     kpts = self.det.detect(im, None)
     # We will not train anything, so let's save time and memory by no_grad()
     with torch.no_grad():
         timg = K.image_to_tensor(im, False).float() / 255.
         timg = timg.to(self.device)
         if timg.shape[1] == 3:
             timg_gray = K.rgb_to_grayscale(timg)
         else:
             timg_gray = timg
         # kornia expects keypoints in the local affine frame format.
         # Luckily, kornia_moons has a conversion function
         lafs = laf_from_opencv_SIFT_kpts(kpts, device=self.device)
         lafs_new = self.aff(lafs, timg_gray)
         patches = KF.extract_patches_from_pyramid(timg_gray, lafs_new, 32)
         B, N, CH, H, W = patches.size()
         # Descriptor accepts standard tensor [B, CH, H, W], while patches are [B, N, CH, H, W] shape
         # So we need to reshape a bit :)
         descs = self.desc(patches.view(B * N, CH, H,
                                        W)).view(B * N,
                                                 -1).detach().cpu().numpy()
     kpts = np.array([[kp.pt[0], kp.pt[1]] for kp in kpts])
     return kpts, descs
Beispiel #14
0
    def test_rgb_to_hsv(self, device):

        data = torch.rand(3, 5, 5).to(device)

        # OpenCV
        data_cv = kornia.tensor_to_image(data.clone())
        expected = cv2.cvtColor(data_cv, cv2.COLOR_RGB2HSV)
        expected = kornia.image_to_tensor(expected, True).to(device)

        h_expected = 2 * math.pi * expected[0] / 360.
        s_expected = expected[1]
        v_expected = expected[2]

        f = kornia.color.RgbToHsv()
        result = f(data)

        h = result[0, :, :]
        s = result[1, :, :]
        v = result[2, :, :]

        assert_allclose(h, h_expected)
        assert_allclose(s, s_expected)
        assert_allclose(v, v_expected)
Beispiel #15
0
def tracker_track(state, im, model, device='cpu', debug=False):
    target_pos = state['target_pos']
    target_sz = state['target_sz']

    p = model.p
    wc_x = target_sz[1] + p.context_amount * sum(target_sz)
    hc_x = target_sz[0] + p.context_amount * sum(target_sz)
    s_x = np.sqrt(wc_x * hc_x)
    scale_x = p.exemplar_size / s_x

    pscore, delta, pscore_size = model.track(kornia.image_to_tensor(im).unsqueeze(dim=0).to(device).float(),
                                             torch.from_numpy(target_pos).unsqueeze(dim=0).to(device),
                                             torch.from_numpy(target_sz).unsqueeze(dim=0).to(device))

    best_pscore_id = np.argmax(pscore.squeeze().detach().cpu().numpy())

    pred_in_crop = delta.squeeze().detach().cpu().numpy()[:, best_pscore_id] / scale_x
    lr = pscore_size.squeeze().detach().cpu().numpy()[best_pscore_id] * p.lr  # lr for OTB

    res_x = pred_in_crop[0] + target_pos[0]
    res_y = pred_in_crop[1] + target_pos[1]
    res_w = target_sz[0] * (1 - lr) + pred_in_crop[2] * lr
    res_h = target_sz[1] * (1 - lr) + pred_in_crop[3] * lr

    target_pos = np.array([res_x, res_y])
    target_sz = np.array([res_w, res_h])

    target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
    target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
    target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
    target_sz[1] = max(10, min(state['im_h'], target_sz[1]))

    state['target_pos'] = target_pos
    state['target_sz'] = target_sz

    return state
Beispiel #16
0
def crop_chw_torch(image, bbox, out_sz, device='cpu'):
    a = (out_sz - 1) / (bbox[2] - bbox[0])
    b = (out_sz - 1) / (bbox[3] - bbox[1])
    c = -a * bbox[0]
    d = -b * bbox[1]
    mapping = torch.tensor([[a, 0, c], [0, b, d]], device=device).unsqueeze(0)

    return kornia.warp_affine(image, mapping, dsize=(out_sz, out_sz), )


if __name__ == '__main__':
    bbox = np.array([250, 250, 300, 300])
    out_size = 125

    x = np.random.randn(600, 600, 3)
    y: torch.tensor = kornia.image_to_tensor(x, keepdim=False) # .to('cuda')  # BxCxHxW


    # a = crop_chw(x, bbox, out_size)
    # b = crop_chw_torch(x, bbox, out_size)
    # print(a.shape)
    # print(b.shape)

    import torch.utils.benchmark as benchmark

    t0 = benchmark.Timer(
        stmt='crop_chw(x, box, 125)',
        setup='from __main__ import crop_chw',
        globals={'x': x, 'box':np.array([250, 250, 300, 300])})

    t1 = benchmark.Timer(
Beispiel #17
0
import torch
import kornia
import cv2
import numpy as np
import torchvision

import matplotlib.pyplot as plt

# read the image with OpenCV
img: np.ndarray = cv2.imread('./download.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

# convert to torch tensor
data: torch.tensor = kornia.image_to_tensor(img, keepdim=False)  # BxCxHxW

# create the operator
gauss = kornia.filters.GaussianBlur2d((11, 11), (10.5, 10.5))

# blur the image
x_blur: torch.tensor = gauss(data.float())

# convert back to numpy
img_blur: np.ndarray = kornia.tensor_to_image(x_blur.byte())

# Create the plot
fig, axs = plt.subplots(1, 2, figsize=(16, 10))
axs = axs.ravel()

axs[0].axis('off')
axs[0].set_title('image source')
axs[0].imshow(img)
Beispiel #18
0
    def gen_template(self):
        num_iters = 500
        adam_lr = 10
        mu, sigma = 127, 5
        label_thr_iou = 0.2
        pert_sz_ratio = (0.6, 0.3)

        # Load state
        state = self.state
        device = state['device']
        p = state['p']
        s_z = state['s_z']

        # Get imgs and mask tensor
        im_shape = state['im'].shape[0:2]
        bbox_pert_temp = scale_bbox(state['gts'][0], pert_sz_ratio)
        bbox_pert_xcrop = scale_bbox(state['gts'][state['n_frame']],
                                     pert_sz_ratio)
        mask_template = get_bbox_mask(shape=im_shape,
                                      bbox=bbox_pert_temp,
                                      mode='tensor').to(device)
        mask_xcrop = get_bbox_mask(shape=im_shape,
                                   bbox=bbox_pert_xcrop,
                                   mode='tensor').to(device)
        im_template = kornia.image_to_tensor(state['first_im']).to(device)
        im_xcrop = kornia.image_to_tensor(state['im']).to(device)

        # Get Label
        track_res, score_res, pscore_res = self.get_tracking_result(
            state['template'], self.x_crop)
        labels = self.get_label(track_res,
                                thr_iou=label_thr_iou,
                                need_iou=True)

        im_template = im_template.unsqueeze(dim=0).to(torch.float)
        im_xcrop = im_xcrop.unsqueeze(dim=0).to(torch.float)
        bbox_pert_temp = torch.tensor(bbox_pert_temp).unsqueeze(
            dim=0).to(device)
        bbox_pert_xcrop = torch.tensor(bbox_pert_xcrop).unsqueeze(
            dim=0).to(device)

        pert_sz = (100, 75)
        # pert = (mu + sigma * torch.randn(3, pert_sz[0], pert_sz[1])).clamp(0,255)
        pert = cv2.resize(cv2.imread('data/patchnew0.jpg'),
                          (pert_sz[1], pert_sz[0]))  # W, H
        pert = kornia.image_to_tensor(pert).to(torch.float)
        pert = pert.clone().detach().to(device).requires_grad_(True).to(
            im_template.device)  # (3, H, W)
        optimizer = torch.optim.Adam([pert], lr=adam_lr)
        for i in range(num_iters):
            patch_warped_template = warp_patch(pert, im_template,
                                               bbox_pert_temp)
            patch_warped_search = warp_patch(pert, im_xcrop, bbox_pert_xcrop)
            patch_template = torch.where(mask_template == 1,
                                         patch_warped_template, im_template)
            patch_search = torch.where(mask_xcrop == 1, patch_warped_search,
                                       im_xcrop)

            template = test.get_subwindow_tracking_(patch_template,
                                                    state['pos_z'],
                                                    p.exemplar_size,
                                                    round(state['s_z']), 0)
            x_crop = test.get_subwindow_tracking_(patch_search,
                                                  state['target_pos'],
                                                  p.instance_size,
                                                  round(self.s_x), 0)

            score, delta = self.model.track(x_crop, template)

            ###################  Show Loss and Delta Change ######################
            # if i%10==0:
            #     score_data = F.softmax(score.view(score.shape[0], 2, -1), dim=1)[:,1]
            #     delta_data = delta.view(delta.shape[0], 4, -1).data
            #     res_cx, res_cy, res_w, res_h = track_res
            #     track_res_data = (res_cx-res_w/2, res_cy-res_h/2, res_w, res_h)
            #     self.show_pscore_delta(score_data, delta_data, track_res_data)
            #     self.show_attacking(track_res, score_res, pscore_res, template, x_crop)

            loss = self.loss2(score, delta, pscore_res, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            pert.data = (pert.data).clamp(0, 255)

            # fig, ax = plt.subplots(1,2,num='x_crop & template')
            # ax[0].set_title('template')
            # ax[0].imshow(kornia.tensor_to_image(template.byte()))
            # ax[1].set_title('x_crop')
            # ax[1].imshow(kornia.tensor_to_image(x_crop.byte()))
            # plt.pause(0.01)

        state['pert'] = pert.detach()
        state['pert_template'] = template.detach()
        state['pert_sz_ratio'] = pert_sz_ratio

        # self.show_label(labels, track_res)
        # self.show_attacking(track_res, score_res, pscore_res, template, x_crop)
        plt.show()

        return template, x_crop
Beispiel #19
0
    bbox = [[200, 200, 207, 395], [310, 157, 220, 250]]

    cv2.namedWindow('img', cv2.WND_PROP_FULLSCREEN)
    x, y, w, h = bbox[0]
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 4)
    cv2.imshow('img', img)

    cv2.namedWindow('img2', cv2.WND_PROP_FULLSCREEN)
    x, y, w, h = bbox[1]
    cv2.rectangle(img2, (x, y), (x + w, y + h), (0, 255, 0), 4)
    cv2.imshow('img2', img2)

    cv2.imshow('patch1', patch1)
    cv2.imshow('patch2', patch2)

    img_tensor = kornia.image_to_tensor(img).unsqueeze(0).to(torch.float32)
    img_tensor2 = kornia.image_to_tensor(img2).unsqueeze(0).to(torch.float32)
    patch_tensor1 = kornia.image_to_tensor(patch1).to(torch.float32)
    patch_tensor2 = kornia.image_to_tensor(patch2).to(torch.float32)
    bbox = torch.tensor(bbox)
    bbox_dest = scale_bbox(bbox, (0.6, 0.3))

    mask = get_bbox_mask_tv(img_tensor.shape[-2:], bbox)
    cv2.namedWindow('mask1', cv2.WND_PROP_FULLSCREEN)
    cv2.namedWindow('mask2', cv2.WND_PROP_FULLSCREEN)
    cv2.imshow('mask1', kornia.tensor_to_image(mask[0]))
    cv2.imshow('mask2', kornia.tensor_to_image(mask[1]))

    res_img = warp_patch(torch.stack([patch_tensor1, patch_tensor2], 0),
                         torch.cat([img_tensor, img_tensor2], 0), bbox_dest)
 def temporal_bev_image_callback(self, msg):
     # print("temporal_bev_image_callback")
     cv_temporal_bev_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
     tensor_bev_image = kornia.image_to_tensor(cv_temporal_bev_image, keepdim=True).float()
     self.temporal_bev_map = tensor_bev_image
     self.temporal_bev_image_callback_flag = True
Beispiel #21
0
import cv2
import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt

import kornia

#############################
# We use OpenCV to load an image to memory represented in a numpy.ndarray
img_bgr: np.ndarray = cv2.imread('./data/simba.png', cv2.IMREAD_COLOR)

#############################
# Convert the numpy array to torch
x_bgr: torch.Tensor = kornia.image_to_tensor(img_bgr, keepdim=False)

#############################
# Using `kornia` we easily perform color transformation in batch mode.


def hflip(input: torch.Tensor) -> torch.Tensor:
    return torch.flip(input, [-1])


def vflip(input: torch.Tensor) -> torch.Tensor:
    return torch.flip(input, [-2])


def rot180(input: torch.Tensor) -> torch.Tensor:
    return torch.flip(input, [-2, -1])
Beispiel #22
0
import torch
from torch import nn

if __name__ == '__main__':
    import cv2
    import kornia
    from matplotlib import pyplot as plt

    patch = cv2.imread('data/patchnew0.jpg')
    patch = kornia.image_to_tensor(patch).to(torch.float)  #.unsqueeze(dim=0)

    # ColorJitter
    # trans = kornia.augmentation.ColorJitter(0.1, 0.1, 0.1, 0.1)
    # fig, axes = plt.subplots(1,2,num=type(trans).__name__)
    # for i in range(100):
    #     res = trans(patch/255.0)
    #     ax = axes[0]
    #     ax.set_title('patch')
    #     ax.imshow(kornia.tensor_to_image(patch.byte()))
    #     ax = axes[1]
    #     ax.set_title('res')
    #     ax.imshow(kornia.tensor_to_image(res))
    #     plt.pause(0.001)

    # RandomAffine
    trans = kornia.augmentation.RandomAffine(degrees=5,
                                             translate=[0.1, 0.1],
                                             scale=[0.9, 1.1],
                                             shear=[-5, 5])
    for i in range(100):
        res = trans(patch)
Beispiel #23
0
"""

import torch
import kornia
import cv2
import numpy as np

import matplotlib.pyplot as plt

# read the image with OpenCV
img: np.ndarray = cv2.imread('./data/bennett_aden.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

# convert to torch tensor
data: torch.tensor = kornia.image_to_tensor(img)  # BxCxHxW

# create transformation (rotation)
alpha: float = 45.0  # in degrees
angle: torch.tensor = torch.ones(1) * alpha

# define the rotation center
center: torch.tensor = torch.ones(1, 2)
center[..., 0] = data.shape[3] / 2  # x
center[..., 1] = data.shape[2] / 2  # y

# define the scale factor
scale: torch.tensor = torch.ones(1)

# compute the transformation matrix
M: torch.tensor = kornia.get_rotation_matrix2d(center, angle, scale)
def load_img(path):
    img1 = cv2.imread(path)
    timg1 = kornia.image_to_tensor(img1, False)
    timg1 = kornia.color.bgr_to_rgb(timg1)
    return timg1
Beispiel #25
0
 def __getitem__(self, item):
     img = cv2.imread(self.arg.dataset_route[self.dataset] +
                      self.list[item][-1])
     img = image_to_tensor(img)
     img = bgr_to_rgb(img)
     return img
Beispiel #26
0
def test_image_to_tensor_keep_dtype(input_dtype, expected_dtype):
    image = np.ones((1, 3, 4, 5), dtype=input_dtype)
    tensor = kornia.image_to_tensor(image)
    assert tensor.dtype == expected_dtype
Beispiel #27
0
 def forward(self, x: Tensor) -> Tensor:
     x = image_to_tensor(np.array(x)).float() / 255.
     assert len(x.shape) == 3, x.shape
     out = self.preprocess(x)
     return out[0]
Beispiel #28
0
from matplotlib import pyplot as plt
import cv2
import numpy as np

import torch
import kornia
import torchvision

#############################
# We use OpenCV to load an image to memory represented in a numpy.ndarray
img_bgr: np.ndarray = cv2.imread('./data/ninja_turtles.jpg', cv2.IMREAD_COLOR)

#############################
# Convert the numpy array to torch
x_bgr: torch.Tensor = kornia.image_to_tensor(img_bgr)
x_rgb: torch.Tensor = kornia.bgr_to_rgb(x_bgr)

#############################
# Create batch and normalize
x_rgb = x_rgb.expand(4, -1, -1, -1)  # 4xCxHxW
x_rgb = x_rgb.float() / 255.


def imshow(input: torch.Tensor):
    out: torch.Tensor = torchvision.utils.make_grid(input, nrow=2, padding=5)
    out_np: np.ndarray = kornia.tensor_to_image(out)
    plt.imshow(out_np)
    plt.axis('off')
    plt.show()
def create_image_tensor(image, num, device, dtype):
    return kornia.image_to_tensor(image, keepdim=False).repeat(
        num, 1, 1, 1).to(device=get_device_by_string(device),
                         dtype=get_device_by_dtype(dtype))
Beispiel #30
0
"""

import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch

import kornia

#############################
# We use OpenCV to load an image to memory represented in a numpy.ndarray
img_bgr: np.ndarray = cv2.imread('./data/arturito.jpeg')  # HxWxC

#############################
# The image is convert to a 4D torch tensor
x_bgr: torch.tensor = kornia.image_to_tensor(img_bgr)  # 1xCxHxW

#############################
# Once with a torch tensor we can use any Kornia operator
x_rgb: torch.tensor = kornia.bgr_to_rgb(x_bgr)  # 1xCxHxW

#############################
# Convert back to numpy to visualize
img_rgb: np.ndarray = kornia.tensor_to_image(x_rgb.byte())  # HxWxC

#############################
# We use Matplotlib to visualize de results
fig, axs = plt.subplots(1, 2, figsize=(32, 16))
axs = axs.ravel()

axs[0].axis('off')