Exemplo n.º 1
0
def go_render(vision, json, compute_mu, write_binary):
    position = numpy.array(json["position"])
    attitude = numpy.array(json["attitude"])

    # XXX hack to try orientation theory
    # correcting for left-handeness
    # should use a reflection instead, but it's all good because rotz
    attitude = attitude.T

    linear_velocity_body = numpy.array(json["linear_velocity_body"])
    angular_velocity_body = numpy.array(json["angular_velocity_body"])

    linear_velocity_body = cgtypes.vec3(linear_velocity_body)
    angular_velocity_body = cgtypes.vec3(angular_velocity_body)
    position = cgtypes.vec3(position)
    orientation = cgtypes.quat.fromMat(cgtypes.quat(), cgtypes.mat3(attitude))

    vision.step(position, orientation)

    R = vision.get_last_retinal_imageR()
    G = vision.get_last_retinal_imageG()
    B = vision.get_last_retinal_imageB()
    y = (R + G + B) / 3.0 / 255

    answer = {}

    answer["position"] = list(position)
    answer["orientation"] = numpy.array(orientation.toMat3()).tolist()
    answer["linear_velocity_body"] = list(linear_velocity_body)
    answer["angular_velocity_body"] = list(angular_velocity_body)

    answer["luminance"] = y.tolist()

    # Get Q_dot and mu from body velocities
    # WARNING: Getting Q_dot and mu can be very slow
    if compute_mu:
        lvel = json["linear_velocity_body"]
        avel = json["angular_velocity_body"]
        linear_velocity_body = cgtypes.vec3(numpy.array(lvel))
        angular_velocity_body = cgtypes.vec3(numpy.array(avel))

        qdot, mu = vision.get_last_retinal_velocities(linear_velocity_body, angular_velocity_body)

        answer["nearness"] = mu
        answer["retinal_velocities"] = qdot

    if write_binary:
        what = answer["luminance"]
        n = len(what)
        s = struct.pack(">" + "f" * n, *what)
        positive_answer({"binary_length": len(s), "mean": numpy.mean(what)}, "Sending binary data (n * 4 bytes)")
        sys.stdout.write(s)
        sys.stdout.flush()
    else:
        positive_answer(answer)
Exemplo n.º 2
0
    def rotationZ(self):
        list = nuke.thisNode().metadata("exr/cameraTransform", nuke.frame(), "view")
        if list == None:
            print "NoneType"
            return 0
        else:
            list.pop(15)
            list.pop(14)
            list.pop(13)
            list.pop(12)
            list.pop(11)
            list.pop(7)
            list.pop(3)

            M = mat3(list)
            swappedM = mat3(list)
            swappedM.setRow(1, M.getRow(2) * -1.0)
            swappedM.setRow(2, M.getRow(1))

            eulerAngles = swappedM.toEulerXYZ()
            return math.degrees(eulerAngles[2])
Exemplo n.º 3
0
    def rotationZ(self):
        list = nuke.thisNode().metadata('exr/cameraTransform', nuke.frame(),
                                        'view')
        if (list == None):
            print 'NoneType'
            return 0
        else:
            list.pop(15)
            list.pop(14)
            list.pop(13)
            list.pop(12)
            list.pop(11)
            list.pop(7)
            list.pop(3)

            M = mat3(list)
            swappedM = mat3(list)
            swappedM.setRow(1, M.getRow(2) * -1.0)
            swappedM.setRow(2, M.getRow(1))

            eulerAngles = swappedM.toEulerXYZ()
            return math.degrees(eulerAngles[2])
def generate_calibration(
    n_cameras=5,
    return_full_info=False,
    radial_distortion=False,
):
    pi = numpy.pi

    sccs = []

    # 1. extrinsic parameters:
    if 1:
        # method 1:
        #  arrange cameras in circle around common point
        common_point = numpy.array((0, 0, 0), dtype=numpy.float64)
        r = 10.0

        theta = numpy.linspace(0, 2 * pi, n_cameras, endpoint=False)
        x = numpy.cos(theta)
        y = numpy.sin(theta)
        z = numpy.zeros(y.shape)

        cc = numpy.c_[x, y, z]
        #cam_up = numpy.array((0,0,1))

        #cam_ups = numpy.resize(cam_up,cc.shape)
        #cam_forwads = -cc
        cam_centers = r * cc + common_point

        # Convert up/forward into rotation matrix.
        if 1:
            Rs = []
            for i, th in enumerate(theta):
                pos = cam_centers[i]
                target = common_point
                up = (0, 0, 1)
                if 0:
                    print 'pos', pos
                    print 'target', target
                    print 'up', up
                R = cgtypes.mat4().lookAt(pos, target, up)
                #print 'R4',R
                R = R.getMat3()
                #print 'R3',R
                R = numpy.asarray(R).T
                #print 'R',R
                #print
                Rs.append(R)

        else:
            # (Camera coords: looking forward -z, up +y, right +x)
            R = cgtypes.mat3().identity()

            if 1:
                # (looking forward -z, up +x, right -y)
                R = R.rotation(-pi / 2, (0, 0, 1))

                # (looking forward +x, up +z, right -y)
                R = R.rotation(-pi / 2, (0, 1, 0))

                # rotate to point -theta (with up +z)
                Rs = [R.rotation(float(th) + pi, (0, 0, 1)) for th in theta]
                #Rs = [ R for th in theta ]
            else:
                Rs = [R.rotation(pi / 2.0, (1, 0, 0)) for th in theta]
                #Rs = [ R for th in theta ]
            Rs = [numpy.asarray(R).T for R in Rs]
            print 'Rs', Rs

    # 2. intrinsic parameters

    resolutions = {}
    for cam_no in range(n_cameras):
        cam_id = 'fake_%d' % (cam_no + 1)

        # resolution of image
        res = (1600, 1200)
        resolutions[cam_id] = res

        # principal point
        cc1 = res[0] / 2.0
        cc2 = res[1] / 2.0

        # focal length
        fc1 = 1.0
        fc2 = 1.0
        alpha_c = 0.0
        #        R = numpy.asarray(Rs[cam_no]).T # conversion between cgkit and numpy
        R = Rs[cam_no]
        C = cam_centers[cam_no][:, numpy.newaxis]

        K = numpy.array(((fc1, alpha_c * fc1, cc1), (0, fc2, cc2), (0, 0, 1)))
        t = numpy.dot(-R, C)
        Rt = numpy.concatenate((R, t), axis=1)
        P = numpy.dot(K, Rt)
        if 0:
            print 'cam_id', cam_id
            print 'P'
            print P
            print 'K'
            print K
            print 'Rt'
            print Rt
            print
            KR = numpy.dot(K, R)
            print 'KR', KR
            K3, R3 = reconstruct.my_rq(KR)
            print 'K3'
            print K3
            print 'R3'
            print R3
            K3R3 = numpy.dot(K3, R3)
            print 'K3R3', K3R3

            print '*' * 60

        if radial_distortion:
            f = 1000.0
            r1 = 0.8
            r2 = -0.2

            helper = reconstruct_utils.ReconstructHelper(
                f,
                f,  # focal length
                cc1,
                cc2,  # image center
                r1,
                r2,  # radial distortion
                0,
                0)  # tangential distortion

        scc = reconstruct.SingleCameraCalibration_from_basic_pmat(
            P,
            cam_id=cam_id,
            res=res,
        )
        sccs.append(scc)
        if 1:
            # XXX test
            K2, R2 = scc.get_KR()
            if 0:
                print 'C', C
                print 't', t
                print 'K', K
                print 'K2', K2
                print 'R', R
                print 'R2', R2
                print 'P', P
                print 'KR|t', numpy.dot(K, Rt)
                t2 = scc.get_t()
                print 't2', t2
                Rt2 = numpy.concatenate((R2, t2), axis=1)
                print 'KR2|t', numpy.dot(K2, Rt2)
                print
            KR2 = numpy.dot(K2, R2)
            KR = numpy.dot(K, R)
            if not numpy.allclose(KR2, KR):
                if not numpy.allclose(KR2, -KR):
                    raise ValueError('expected KR2 and KR to be identical')
                else:
                    print 'WARNING: weird sign error in calibration math FIXME!'
    recon = reconstruct.Reconstructor(sccs)

    full_info = {
        'reconstructor': recon,
        'center': common_point,  # where all the cameras are looking
        'camera_dist_from_center': r,
        'resolutions': resolutions,
    }
    if return_full_info:
        return full_info
    return recon
Exemplo n.º 5
0
def Mat3Slot(value=mat3(), flags=0):
    return _core.Mat3Slot(value, flags)
Exemplo n.º 6
0
 def __init__(self, proc):
     _core.Mat3Slot.__init__(self, mat3(1), 2) # 2 = NO_INPUT_CONNECTIONS
     self._proc = proc
Exemplo n.º 7
0
Arquivo: Observer.py Projeto: K8H/fsee
    def get_last_retinal_velocities(self, vel_vec3, angular_vel, direction='ommatidia'):
        # NOTE: Both vel_vec3 and angular_vel should be in body frame

        x=self.last_pos_vec3 # eye origin
        q=self.last_ori_quat
        qi=q.inverse()

        vel_vecs = []
        mu_list = []
        dir_body_list = []
        dir_world_list = []

        if direction == 'ommatidia':
            dir_body_list = self.cvs.precomputed_optics_module.receptor_dirs
            for dir_body in dir_body_list:
                dir_world_quat = q*cgtypes.quat(0,dir_body.x, dir_body.y, dir_body.z)*qi
                dir_world_list.append(cgtypes.vec3(dir_world_quat.x,
                                                   dir_world_quat.y,
                                                   dir_world_quat.z))
        elif direction == 'emds':
            dir_body_quats = self.emd_dirs_unrotated_quats
            for dir_body_quat in dir_body_quats:
                dir_world_quat=q*dir_body_quat*qi # dir_body_quat.rotate(q)
                dir_world_list.append(cgtypes.vec3(dir_world_quat.x,
                                                   dir_world_quat.y,
                                                   dir_world_quat.z))
                dir_body_list.append(cgtypes.vec3(dir_body_quat.x,
                                                  dir_body_quat.y,
                                                  dir_body_quat.z))
        else:
            print 'ERROR: Directions need to be either ommatidia or EMDs.'
            quit()
            
        # Need a more efficient way to do this loop
        for n in range(len(dir_body_list)):
            dir_body = dir_body_list[n]
            dir_world = dir_world_list[n]

            vend = x + (dir_world*1e5) # XXX should figure out maximum length in OSG
            vstart = x + (dir_world*1e-5) # avoid intesecting with the origin
            
            rx, ry, rz, is_hit = self.sim.get_world_point(vstart, vend)
            
            if is_hit == 1: # is_hit is always 1 in the presence of skybox
                sigma = (cgtypes.vec3(rx,ry,rz) - x).length() # distance
                mu = 1.0/sigma
            else:
                mu = 0.0          # objects are at infinity

            # Use the formula in Sean Humbert's paper to calculate retinal velocity
            vr = - angular_vel.cross(dir_body) - mu * (vel_vec3 - dir_body * (dir_body*vel_vec3))

            # Convert vr into spherical coordinates
            # Equation: cf http://en.wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates
            sx = dir_body.x
            sy = dir_body.y
            sz = dir_body.z
            phi = numpy.math.atan2(sy,sx)
            theta = numpy.math.acos(sz)

            cphi = numpy.cos(phi)
            sphi = numpy.sin(phi)
            ctheta = numpy.cos(theta)
            stheta = numpy.sin(theta)

            R = cgtypes.mat3(stheta*cphi, stheta*sphi, ctheta, 
                             ctheta*cphi, ctheta*sphi, -stheta,
                             -sphi, cphi, 0)  # Row-major order
            vr = R*vr                         
            # vr[0]: r (should be zero), vr[1]: theta, vr[2]: phi
            vel_vecs.append([vr[1],vr[2]])
            mu_list.append(mu)
                        
        return vel_vecs, mu_list
Exemplo n.º 8
0
def Mat3Slot(value=mat3(), flags=0):
    return _core.Mat3Slot(value, flags)
Exemplo n.º 9
0
 def __init__(self, proc):
     _core.Mat3Slot.__init__(self, mat3(1), 2)  # 2 = NO_INPUT_CONNECTIONS
     self._proc = proc