예제 #1
0
def dumpCameraInfo(trajectory, shape, maskShape, fov, camLookAt, camUp, outdir,
                   nsamples):
    ps, ts = trajectory
    images = []
    for i, (t, p) in enumerate(zip(ts.reshape(-1), ps.reshape(-1))):

        x, y, z = lin.pt2xyz(p, t, r)
        xl, yl, zl = [x, y, z]
        near = (x**2 + y**2 + z**2)**0.5 - 1.0
        far = (x**2 + y**2 + z**2)**0.5 + 1.0
        light = mts.pointlight([xl, yl, zl], intensity)

        ext = lin.lookAt(torch.Tensor([[x, y, z]]), camLookAt[None, ...],
                         camUp[None, ...])
        camera = mts.camera([x, y, z],
                            camLookAt,
                            camUp,
                            fov,
                            ext=ext,
                            near=near,
                            far=far,
                            w=w,
                            h=h,
                            nsamples=nsamples)
        scene = mts.generateScene(shape, light, camera)
        img = mts.renderScene(scene)
        images.append(img)

    images = np.stack(images, axis=0)
    im.imageseq2avi('renderout/tst.avi', images.transpose(0, 3, 1, 2), 10)
예제 #2
0
def rotateEnvmapSequence(outdir, envmap_fn, nimgs, env_w, env_h):
    if (not os.path.exists(outdir)):
        os.makedirs(outdir)

    im = cv2.imread(envmap_fn, -1)
    im = Im.antialiasResize(im, im.shape[1] // 8, im.shape[0] // 8)
    print('writing images to ', outdir)
    for i in tqdm.trange(nimgs):
        r = float(i) / nimgs * 360
        im2 = rotateEnvMap(im, r)
        im2 = Im.antialiasResize(im2, env_w, env_h)
        # im2 = (np.clip(im2,0,1) ** (1/2.2) * 255).astype(np.uint8)
        outfn = os.path.join(outdir, '%04i.exr' % i)
        # im2 = cv2.resize(im2,(im2.shape[1]*10,im2.shape[0]*10))
        cv2.imwrite(outfn, im2)
        cv2.imwrite(outfn.replace('.exr', '.png'),
                    np.clip(im2, 0, 1)**(1 / 2.2) * 255)

    fns = sorted(
        glob.glob(
            '/home/mohammad/Projects/NRV/dataset/envmap-videos/pisa/*.exr'))
    out = '/home/mohammad/Projects/NRV/dataset/envmap-videos/pisa.avi'
    ims = []
    for fn in tqdm.tqdm(fns[:150]):
        im = cv2.imread(fn, -1)[:, :, ::-1].copy()
        im = cv2.resize(im, (im.shape[1] * 10, im.shape[0] * 10))
        ims.append(im)
    ims = torch.from_numpy(np.stack(ims, 0)).permute(0, 3, 1, 2)

    imageseq2avi(out, ims)
예제 #3
0
def renderDepth(shape, light, camera):
    Integrator = """<integrator type="aov">
    <string name="aovs" value="dd.y:depth,nn:sh_normal"/>
    <integrator type="path" name="my_image"/>
    </integrator>"""
    scene = generateScene(shape, light, camera, Integrator)
    fn = 'tmp.exr'
    renderToFile(scene, fn)
    import cvgutils.Image as im
    return im.readExrImage(fn, [['dd.y']])
예제 #4
0
def testBackProject():
    camOrig = torch.Tensor([0.,.3,-2.])
    camLookAt = torch.Tensor([0,0,0])
    camUp = torch.Tensor([0.000,1.0,0.000])
    fov = 60.0
    far = 3
    near = 1
    w = 60
    h = 64
    u, v = np.linspace(0,w-1,w)/w, np.linspace(0,h-1,h)/h
    u, v = np.meshgrid(u,v)
    uv = np.stack((u,v),axis=0).reshape(2,-1)
    ext = lin.lookAt(camOrig[None,...],camLookAt[None,...],camUp[None,...])
    camxml = mts.camera(camOrig,camLookAt,camUp,fov,ext=ext,near=near,far=far,w=w,h=h,nsamples=4)
    cam = xml.load_string(camxml)
    # ray = mts.renderRays(cam,uv)
    
    #create scene
    texturefn = 'cvgutils/tests/testimages/5x5pattern.png'
    radius = 1.
    center = [0,0,0]
    intensity = [1.0,1.0,1.0]
    material = mts.diffuse(texturefn)
    shape = mts.sphere(center, radius,material)
    light = mts.pointlight(camOrig.cpu().numpy(),intensity)
    scene = mts.generateScene(shape,light,camxml)
    depth = mts.renderDepth(shape,light,camxml)[0]
    img = mts.renderScene(scene)
    ray, mtscloud, depth, renderedCloud, mask = mts.renderDepthInWorld(scene,uv)

    ourRay = lin.sampleRay(h,w,far,near,fov,torch.Tensor(uv)[None,...],ext)
    ourRay = ourRay.reshape(-1,3,h,w).permute(3,2,1,0)[...,0]
    
    ourcloud = torch.Tensor(ourRay * depth.astype(np.float32))[None,...].permute(0,3,1,2).float() + camOrig[None,:,None,None].float()

    print((ourcloud * torch.Tensor(mask)[None,...].permute(0,3,1,2) - torch.Tensor(mtscloud*mask)[None,...].permute(0,3,1,2)).abs().sum())
    imu.depth2txt('renderout/depth1.txt',ourcloud[0].permute(2,1,0).cpu().numpy(),img)
예제 #5
0
def pointlight2envmap(imseq, indexfn, envmap, scale, output, w, h):
    #load images
    #interpolate

    envmap = cvgim.imread(envmap)
    envmap = cvgim.resize(envmap, dx=w, dy=h)
    h = envmap.shape[0]
    w = envmap.shape[1]
    envmap = envmap[:, :, :3]
    print(envmap.shape)
    index = util.loadPickle(indexfn)
    assert len(index) == h * w

    x = np.linspace(0, w - 1, w)
    y = np.linspace(0, h - 1, h)
    u = (x + 0.5) / w
    v = (y + 0.5) / h
    u, v = np.meshgrid(u, v)
    x, y = np.meshgrid(x, y)
    sintScaled = np.sin(v * np.pi) * scale
    sumsin = sintScaled.sum()
    img = cvgim.imread(imseq % int(index['0000_0000']))[:, :, :3] * 0
    for x0, y0, intensity, idx in tqdm.tqdm(
            zip(
                x.reshape(-1).astype(np.int32),
                y.reshape(-1).astype(np.int32), envmap.reshape(-1, 3),
                index.values())):
        intensity = envmap[y0, x0, None, None, :] * sintScaled[y0, x0]
        fn = os.path.join(imseq % int(index['%04d_%04d' % (x0, y0)]))
        im = cvgim.imread(fn)[:, :, :3]
        img += im * intensity / w * h
    cvgim.imwrite(output, img.astype(np.float32)[:, :, ::-1])
    cvgim.imwrite(output.replace('exr', 'png'),
                  (np.clip(img[:, :, ::-1], 0, 1)**(1 / 2.2) *
                   (2**16 - 1)).astype(np.uint16))
    return img.astype(np.float32)[:, :, ::-1].copy()
예제 #6
0
    # sphereOrig = torch.rand((1,1,3))
    cx, cy, cz = (0, 0, 0)
    sphereOrig = torch.Tensor([[[cx, cy, cz]]])
    sphereOrigt = torch.Tensor([[[cy, cx, -cz]]])
    x, y, z = lin.sampleUniformOnSphere(samples[:, 0, :], samples[:, 1, :])
    xyz = torch.stack((x, y, z), dim=2) * radius + sphereOrigt

    #rasterization by pytorch3d
    aspect = w / h
    # R,T = torchCam.look_at_view_transform(dist=2.7, elev=0, azim=0)
    R = torchCam.look_at_rotation(camOrig, camLookAtt, camUp)
    T = torch.einsum('abc,ac->ab', R, camOrig)
    cam = torchCam.FoVPerspectiveCameras(near, far, aspect, fov, R=R, T=T)

    uv = cam.transform_points_screen(xyz, torch.Tensor([[h, w]]))
    uv = torch.clamp(uv, 0, h - 1)
    img = torch.zeros((h, w, 3))
    img[uv[0, :, 0].long(), uv[0, :, 1].long(), :] = 1

    outfn = os.path.join(outdir, outfmtTorch % (0, 0))
    im.writePng(outfn, img)

    #render by mitsuba
    light = mts.pointlight(camOrig)
    camera = mts.camera(camOrig, camLookAt, camUp, fov, nsamples=nsamples)
    shape = mts.sphere(sphereOrig[0], radius, material)

    scene = mts.generateScene(shape, light, camera)
    img = mts.renderScene(scene) > 0
    outfn = os.path.join(outdir, outfmt % (0, 0))
    im.writePng(outfn, img)
예제 #7
0
import numpy as np
import glob
import cvgutils.Image as cvgim
import os
import tqdm

indir = '/home/mohammad/Projects/NRV/dataset/envmaps/*.exr'
outdir = '/home/mohammad/Projects/NRV/dataset/envmaps_512_1024'
inimgs = glob.glob(indir)
max16 = (2**16 - 1)
for img in tqdm.tqdm(inimgs):
    im = cvgim.imread(img)
    im = cvgim.resize(im, dx=1024, dy=512)
    fn = os.path.join(outdir, os.path.basename(img).replace('4k', '1024x512'))
    # im = (im * max16).astype(np.uint16)
    cvgim.imwrite(fn, im)
예제 #8
0
import cvgutils.Image as im
import numpy as np
if __name__ == "__main__":
    a = np.random.rand(100, 100, 3)
    a = a.transpose((2, 0, 1))[None, ...]
    im.imageseq2avi('renderout/tst.avi', a)
예제 #9
0
import cvgutils.Image as im
import numpy as np
import cv2
if __name__ == "__main__":
    # fn = 'cvgutils/tests/testimages/multichannel.exr'
    # imgs = im.readExrImage(fn,[['R','G','B','A'],['dd.y'],['nn.X','nn.Y','nn.Z']])
    # cv2.imwrite('renderout/rgba.exr',imgs[0][:,:,:3])
    # cv2.imwrite('renderout/depth.exr',imgs[1])
    # cv2.imwrite('renderout/nn.exr',imgs[2][:,:,::-1])

    fn = 'cvgutils/tests/testimages/redRectangle.exr'
    imgs = im.readExrImage(fn, [['R', 'G', 'B']])
    im.writePng('out.png', imgs[0])
예제 #10
0
파일: resize.py 프로젝트: mshafiei/cvgutils
import cvgutils.Image as img
import numpy as np
import cv2

im = (cv2.imread('tests/testimages/highfreq.jpg', -1) / 255.0)**(2.2)
res = img.resize(im, dx=256, dy=256)
cv2.imshow('hi', (res**(1 / 2.2) * 255).astype(np.uint8))
cv2.waitKey(0)
예제 #11
0
import cvgutils.Image as im
import cvgutils.Linalg as lin
import numpy as np
import torch
import cv2
if __name__ == "__main__":
    probefn = 'cvgutils/tests/testimages/grace_probe.hdr'
    w = 512
    h = 256
    probeim = cv2.imread(probefn, -1)[:, :, ::-1].copy()
    u = np.linspace(0, w - 1, w) / w
    v = np.linspace(0, w - 1, w) / h
    u, v = np.meshgrid(u, v)
    p, t = lin.uv2pt(u, v)
    x, y, z = lin.pt2xyz(p, t)
    r = 1 / np.pi * np.arccos(z) / ((x**2 + y**2)**0.5 + 1e-20)
    up, vp = x * r, y * r
    uv = np.stack((up, vp), axis=-1)[None, ...] * 1000
    eq = torch.nn.functional.grid_sample(
        torch.Tensor(probeim[None, ...]).permute(0, 3, 1, 2), torch.Tensor(uv))
    im.writePng('./renderout/eq.png', eq[0].permute(1, 2, 0))
예제 #12
0
    def createTeaser(self,
                     im,
                     label,
                     title,
                     trnsfrm=lambda x: x,
                     dim_type='HWC',
                     mode='train',
                     text=''):
        counth, countw = 3, 4
        yoffset = 100
        showlbls = ['ambient', 'pred', 'noisy', 'flash', 'dx', 'dy']
        h, w = im['pred'].shape[1] - yoffset * 2, im['pred'].shape[2]
        font = dict(
            family="Courier New, monospace",
            size=25,
        )
        fontsize = 25
        specs = [[{
            'colspan': 2,
            'rowspan': 2
        }, None, {
            'colspan': 2,
            'rowspan': 2
        }, None], [None, None, None, None],
                 [{
                     'colspan': 1,
                     'rowspan': 1
                 }, {
                     'colspan': 1,
                     'rowspan': 1
                 }, {
                     'colspan': 1,
                     'rowspan': 1
                 }, {
                     'colspan': 1,
                     'rowspan': 1
                 }]]
        label = [
            'Ground Truth', 'Denoised', 'Noisy', 'Flash', 'Output x gradient',
            'Output y gradient'
        ]
        im = [im[i][0, yoffset:-yoffset, :, :] for i in showlbls]
        # im = [j for i,j in im.items() if i in showlbls]
        fig = make_subplots(rows=counth,
                            cols=countw,
                            specs=specs,
                            subplot_titles=label,
                            horizontal_spacing=0,
                            vertical_spacing=0.09)
        fig.update_yaxes(visible=False, showticklabels=False)
        fig.update_xaxes(visible=True, showticklabels=False)
        fig.update_layout(margin=dict(l=3, r=3, t=60, b=3), )
        fig.add_trace(px.imshow(np.clip(im[0], 0, 1)).data[0], row=1,
                      col=1)  #ambient
        fig.update_xaxes(title_text='a', row=1, col=1)
        fig.add_trace(px.imshow(np.clip(im[1], 0, 1)).data[0], row=1,
                      col=3)  #prediction
        fig.update_xaxes(title_text='b', row=1, col=3)
        fig.add_trace(px.imshow(np.clip(im[2], 0, 1)).data[0], row=3,
                      col=1)  #flash
        fig.update_xaxes(title_text='c', row=3, col=1)
        fig.add_trace(px.imshow(np.clip(im[3], 0, 1)).data[0], row=3,
                      col=2)  #noisy
        fig.update_xaxes(title_text='d', row=3, col=2)
        fig.add_trace(px.imshow(np.clip(im[4], 0, 1)).data[0], row=3,
                      col=3)  #gx
        fig.update_xaxes(title_text='e', row=3, col=3)
        fig.add_trace(px.imshow(np.clip(im[5], 0, 1)).data[0], row=3,
                      col=4)  #gy
        fig.update_xaxes(title_text='f', row=3, col=4)

        margin = 30
        fig.update_annotations(font_size=fontsize)
        fig.update_layout(height=h * counth + margin,
                          width=w * countw + margin * 2,
                          font=font)
        writer, path = self.path_parse(mode)

        im = cvgim.plotly_fig2array(fig)
        if (self.ltype == 'tb'):
            imshow = torch.Tensor(im[..., :3]).permute(2, 0, 1)
            writer.add_image(title.replace(' ', '_'), imshow, self.step)
        elif (self.ltype == 'filesystem'):
            name = os.path.join(
                path, '%010i_%s.png' % (self.step, title.replace(' ', '_')))
            cvgim.imwrite(name, im[..., :3])
예제 #13
0
    def addImage(self,
                 im,
                 label,
                 title,
                 trnsfrm=lambda x: x,
                 dim_type='HWC',
                 mode='train',
                 text='',
                 addinset=False,
                 annotation=None,
                 l=3,
                 r=3,
                 t=60,
                 b=3,
                 margin=30,
                 ltype=None):
        """[Clips and shows a an image or a list of images]

        Args:
            im ([ndarray or list of ndarray]): []
            label ([str or list of str]): [description]
            dim_type ([str]): [arrangement of dimensions 'HWC' or 'BHWC' or 'CHW' or 'BCHW']
        """
        ann = []
        if (annotation != None):
            for k, v in im.items():
                if (k in annotation.keys()):
                    ann.append(annotation[k])
                else:
                    ann.append(None)

        if (type(im) == type(dict()) or type(im) == type(OrderedDict())):
            if (dim_type == 'BCHW'):
                im = [trnsfrm(i).transpose([0, 2, 3, 1]) for i in im.values()]
            elif (dim_type == 'CHW'):
                im = [
                    trnsfrm(i).transpose([1, 2, 0])[None, ...]
                    for i in im.values()
                ]
            elif (dim_type == 'HWC'):
                im = [trnsfrm(i)[None, ...] for i in im.values()]
            elif (dim_type == 'BHWC'):
                im = [trnsfrm(i) for i in im.values()]
            if (addinset):
                im = [
                    cvgim.addInset(i[0, 100:-100, ...])[None, ...] for i in im
                ]
            else:
                im = [i for i in im]

            label = [i for i in label.values()]
            count, nbatch, h, w, _ = len(im), im[0].shape[0], im[0].shape[
                1], im[0].shape[2], im[0].shape[3]
            fig = make_subplots(rows=nbatch,
                                cols=count,
                                subplot_titles=label,
                                horizontal_spacing=0,
                                vertical_spacing=0)
            fig.update_yaxes(visible=False, showticklabels=False)
            fig.update_xaxes(visible=False, showticklabels=False)
            fig.update_layout(margin=dict(l=l, r=r, t=t, b=b), )
            [
                fig.add_trace(px.imshow(np.clip(im_j, 0, 1)).data[0],
                              row=j + 1,
                              col=i + 1) for i, im_i in enumerate(im)
                for j, im_j in enumerate(im_i)
            ]
            for i, an in enumerate(ann):
                if (an == None):
                    continue
                fig.add_annotation(xref='x domain',
                                   yref='y domain',
                                   x=0.00,
                                   y=1.00,
                                   text=an,
                                   bgcolor="#c3c9c5",
                                   showarrow=False,
                                   row=1,
                                   col=i + 1,
                                   font=dict(
                                       color='black',
                                       family="linux libertine",
                                       size=32,
                                   ),
                                   align="left")

            fig.update_layout(height=h * nbatch + margin,
                              width=w * count + margin * 2,
                              title_text=text)
            im = cvgim.plotly_fig2array(fig)

        writer, path = self.path_parse(mode)

        if (ltype == 'wandb' or self.ltype == 'wandb'):
            wandb.log({title: [wandb.Image(im)]}, step=self.step)
        if (ltype == 'tb' or self.ltype == 'tb'):
            imshow = torch.Tensor(im[..., :3]).permute(2, 0, 1)
            writer.add_image(title.replace(' ', '_'), imshow, self.step)
        if (ltype == 'filesystem' or self.ltype == 'filesystem'):
            name = os.path.join(
                path, '%010i_%s.png' % (self.step, title.replace(' ', '_')))
            cvgim.imwrite(name, im[..., :3])
        if (ltype == 'html' or self.ltype == 'html'):
            name = os.path.join(
                path, '%010i_%s.html' % (self.step, title.replace(' ', '_')))
            fig.write_html(name, include_mathjax='cdn')