def computeHemisphereTransformation(chAz, chEl, chDist, objCenter): chDistMat = geometry.Translate(x=ch.Ch(0), y=-chDist, z=ch.Ch(0)) chToObjectTranslate = geometry.Translate(x=objCenter[0], y=objCenter[1], z=objCenter[2]) chRotAzMat = geometry.RotateZ(a=chAz) chRotElMat = geometry.RotateX(a=-chEl) chCamModelWorld = ch.dot(chToObjectTranslate, ch.dot(chRotAzMat, ch.dot(chRotElMat, chDistMat))) return chCamModelWorld
def setupCamera(v, cameraParams): chDistMat = geometry.Translate(x=0, y=cameraParams['Zshift'], z=cameraParams['chCamHeight']) chRotElMat = geometry.RotateX(a=-cameraParams['chCamEl']) chCamModelWorld = ch.dot(chDistMat, chRotElMat) flipZYRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0, 1.0, 0.0], [0.0, -1.0, 0, 0.0], [0.0, 0.0, 0.0, 1.0]]) chMVMat = ch.dot(chCamModelWorld, flipZYRotation) chInvCam = ch.inv(chMVMat) modelRotation = chInvCam[0:3, 0:3] chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3) chTranslation = chInvCam[0:3, 3] translation, rotation = (chTranslation, chRod) camera = ProjectPoints(v=v, rt=rotation, t=translation, f=1000 * cameraParams['chCamFocalLength'] * cameraParams['a'], c=cameraParams['c'], k=ch.zeros(5)) flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0], [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) camera.openglMat = flipXRotation #Needed to match OpenGL flipped axis. return camera, modelRotation, chMVMat
def setupCamera(v, cameraParams, is_ycb=False): chDistMat = geometry.Translate(x=0, y=cameraParams['Zshift'], z=cameraParams['chCamHeight']) #print ('chDistMat', chDistMat) chRotElMat = geometry.RotateX(a=-cameraParams['chCamEl']) chCamModelWorld = ch.dot(chDistMat, chRotElMat) flipZYRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0, 1.0, 0.0], [0.0, -1.0, 0, 0.0], [0.0, 0.0, 0.0, 1.0]]) chMVMat = ch.dot(chCamModelWorld, flipZYRotation) if is_ycb: chMVMat = ch.Ch(np.eye(4)) # np.save('extrinsics.npy', chMVMat, allow_pickle=False) chInvCam = ch.inv(chMVMat) modelRotation = chInvCam[0:3, 0:3] chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3) chTranslation = chInvCam[0:3, 3] translation, rotation = (chTranslation, chRod) # camera parameters format suitable for YCB video dataset if 'a' in cameraParams.keys(): # NOTE: Focal lenght is represented in mm and a is no.of pixels per mm _f = 1000 * cameraParams['chCamFocalLength'] * cameraParams['a'] else: # NOTE: Focal length is already in terms of pixels if np.any(cameraParams['chCamFocalLength'] < 1): import sys sys.exit( "Camera Focal length 'chCamFocalLength' is represented in number of pixels." ) _f = cameraParams['chCamFocalLength'] if 'k' in cameraParams.keys(): _k = cameraParams['k'] else: _k = ch.zeros(5) print('Using k', _k) camera = ProjectPoints(v=v, rt=rotation, t=translation, f=_f, c=cameraParams['c'], k=_k) # _f = 1000 * cameraParams['chCamFocalLength'] * cameraParams['a'] # _c = cameraParams['c'] #np.save('intrinsics.npy', camera.camera_mtx, allow_pickle=False) ##print ('camera shape', camera.shape) ##print ('camera ', camera) print('camera.camera_mtx', camera.camera_mtx) np.save('projection_matrix', camera.camera_mtx) #import ipdb #ipdb.set_trace() flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0], [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) camera.openglMat = flipXRotation #Needed to match OpenGL flipped axis. return camera, modelRotation, chMVMat