Example #1
0
def transformObject(v, vn, chScale, chObjAz, chPosition):
    #print ('utils.py:16  transformObject')
    #print ('v', type(v))
    #import ipdb
    #ipdb.set_trace()
    if chScale.size == 1:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[0])[0:3, 0:3]
    elif chScale.size == 2:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[1])[0:3, 0:3]
    else:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[1],
                                  z=chScale[2])[0:3, 0:3]
    chRotAzMat = geometry.RotateZ(a=chObjAz)[0:3, 0:3]
    chRotAzMatX = geometry.RotateX(a=0)[0:3, 0:3]

    # transformation = scaleMat
    transformation = ch.dot(ch.dot(chRotAzMat, chRotAzMatX), scaleMat)
    invTranspModel = ch.transpose(ch.inv(transformation))

    vtransf = []
    vntransf = []
    for mesh_i, mesh in enumerate(v):
        vtransf = vtransf + [ch.dot(v[mesh_i], transformation) + chPosition]
        vndot = ch.dot(vn[mesh_i], invTranspModel)
        vndot = vndot / ch.sqrt(ch.sum(vndot**2, 1))[:, None]
        vntransf = vntransf + [vndot]
    return vtransf, vntransf
Example #2
0
def transformObjectFull(v, vn, chScale, chObjAz, chObjAx, chObjAz2,
                        chPosition):
    if chScale.size == 1:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[0])[0:3, 0:3]
    elif chScale.size == 2:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[0],
                                  z=chScale[1])[0:3, 0:3]
    else:
        scaleMat = geometry.Scale(x=chScale[0], y=chScale[1],
                                  z=chScale[2])[0:3, 0:3]

    chRotAzMat = geometry.RotateZ(a=chObjAz)[0:3, 0:3]
    chRotAxMat = geometry.RotateX(a=-chObjAx)[0:3, 0:3]
    chRotAzMat2 = geometry.RotateZ(a=chObjAz2)[0:3, 0:3]

    transformation = ch.dot(
        ch.dot(ch.dot(chRotAzMat, chRotAxMat), chRotAzMat2), scaleMat)

    invTranspModel = ch.transpose(ch.inv(transformation))

    vtransf = []
    vntransf = []
    for mesh_i, mesh in enumerate(v):
        vtransf = vtransf + [ch.dot(v[mesh_i], transformation) + chPosition]
        vndot = ch.dot(vn[mesh_i], invTranspModel)
        vndot = vndot / ch.sqrt(ch.sum(vndot**2, 1))[:, None]
        vntransf = vntransf + [vndot]
    return vtransf, vntransf
Example #3
0
def computeHemisphereTransformation(chAz, chEl, chDist, objCenter):

    chDistMat = geometry.Translate(x=ch.Ch(0), y=-chDist, z=ch.Ch(0))
    chToObjectTranslate = geometry.Translate(x=objCenter[0],
                                             y=objCenter[1],
                                             z=objCenter[2])

    chRotAzMat = geometry.RotateZ(a=chAz)
    chRotElMat = geometry.RotateX(a=-chEl)
    chCamModelWorld = ch.dot(chToObjectTranslate,
                             ch.dot(chRotAzMat, ch.dot(chRotElMat, chDistMat)))

    return chCamModelWorld
Example #4
0
def computeGlobalAndDirectionalLighting(vn, vc, chLightAzimuth,
                                        chLightElevation, chLightIntensity,
                                        chGlobalConstant):

    # Construct point light source
    rangeMeshes = range(len(vn))
    vc_list = []
    chRotAzMat = geometry.RotateZ(a=chLightAzimuth)[0:3, 0:3]
    chRotElMat = geometry.RotateX(a=chLightElevation)[0:3, 0:3]
    chLightVector = -ch.dot(chRotAzMat, ch.dot(chRotElMat, np.array([0, 0, -1
                                                                     ])))
    for mesh in rangeMeshes:
        l1 = ch.maximum(ch.dot(vn[mesh], chLightVector).reshape((-1, 1)), 0.)
        vcmesh = vc[mesh] * (chLightIntensity * l1 + chGlobalConstant)
        vc_list = vc_list + [vcmesh]
    return vc_list
Example #5
0
def setupCamera(v, cameraParams):

    chDistMat = geometry.Translate(x=0,
                                   y=cameraParams['Zshift'],
                                   z=cameraParams['chCamHeight'])

    chRotElMat = geometry.RotateX(a=-cameraParams['chCamEl'])

    chCamModelWorld = ch.dot(chDistMat, chRotElMat)

    flipZYRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0, 1.0, 0.0],
                               [0.0, -1.0, 0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    chMVMat = ch.dot(chCamModelWorld, flipZYRotation)

    chInvCam = ch.inv(chMVMat)

    modelRotation = chInvCam[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = chInvCam[0:3, 3]

    translation, rotation = (chTranslation, chRod)

    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=1000 * cameraParams['chCamFocalLength'] *
                           cameraParams['a'],
                           c=cameraParams['c'],
                           k=ch.zeros(5))

    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    camera.openglMat = flipXRotation  #Needed to match OpenGL flipped axis.

    return camera, modelRotation, chMVMat
Example #6
0
def setupCamera(v, cameraParams, is_ycb=False):
    chDistMat = geometry.Translate(x=0,
                                   y=cameraParams['Zshift'],
                                   z=cameraParams['chCamHeight'])
    #print ('chDistMat', chDistMat)

    chRotElMat = geometry.RotateX(a=-cameraParams['chCamEl'])

    chCamModelWorld = ch.dot(chDistMat, chRotElMat)

    flipZYRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0, 1.0, 0.0],
                               [0.0, -1.0, 0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    chMVMat = ch.dot(chCamModelWorld, flipZYRotation)
    if is_ycb:
        chMVMat = ch.Ch(np.eye(4))
    # np.save('extrinsics.npy', chMVMat, allow_pickle=False)

    chInvCam = ch.inv(chMVMat)

    modelRotation = chInvCam[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = chInvCam[0:3, 3]

    translation, rotation = (chTranslation, chRod)

    # camera parameters format suitable for YCB video dataset
    if 'a' in cameraParams.keys():
        # NOTE: Focal lenght is represented in mm and a is no.of pixels per mm
        _f = 1000 * cameraParams['chCamFocalLength'] * cameraParams['a']

    else:
        # NOTE: Focal length is already in terms of pixels
        if np.any(cameraParams['chCamFocalLength'] < 1):
            import sys
            sys.exit(
                "Camera Focal length 'chCamFocalLength' is represented in number of pixels."
            )
        _f = cameraParams['chCamFocalLength']

    if 'k' in cameraParams.keys():
        _k = cameraParams['k']
    else:
        _k = ch.zeros(5)
    print('Using k', _k)
    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=_f,
                           c=cameraParams['c'],
                           k=_k)
    # _f = 1000 * cameraParams['chCamFocalLength'] * cameraParams['a']
    # _c = cameraParams['c']

    #np.save('intrinsics.npy', camera.camera_mtx, allow_pickle=False)
    ##print ('camera shape', camera.shape)
    ##print ('camera   ', camera)
    print('camera.camera_mtx', camera.camera_mtx)
    np.save('projection_matrix', camera.camera_mtx)
    #import ipdb
    #ipdb.set_trace()

    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    camera.openglMat = flipXRotation  #Needed to match OpenGL flipped axis.

    return camera, modelRotation, chMVMat