Exemple #1
0
    def get_matrix(self):
        qx = cgtypes.quat().fromAngleAxis(self.r_x * D2R,
                                          cgtypes.vec3(1, 0, 0))
        qy = cgtypes.quat().fromAngleAxis(self.r_y * D2R,
                                          cgtypes.vec3(0, 1, 0))
        qz = cgtypes.quat().fromAngleAxis(self.r_z * D2R,
                                          cgtypes.vec3(0, 0, 1))
        Rx = cgmat2np(qx.toMat3())
        Ry = cgmat2np(qy.toMat3())
        Rz = cgmat2np(qz.toMat3())
        R = np.dot(Rx, np.dot(Ry, Rz))

        t = np.array([self.tx, self.ty, self.tz], np.float)
        s = self.s

        T = np.zeros((4, 4), dtype=np.float)
        T[:3, :3] = s * R
        T[:3, 3] = t
        T[3, 3] = 1.0

        # convert bool to -1 or 1
        fx = fy = fz = 1
        if self.flip_x:
            fx = -1
        if self.flip_y:
            fy = -1
        if self.flip_z:
            fz = -1

        flip = np.array(
            [[fx, 0, 0, 0], [0, fy, 0, 0], [0, 0, fz, 0], [0, 0, 0, 1]],
            dtype=np.float)
        T = np.dot(flip, T)
        # T = np.dot(T,flip)
        return T
Exemple #2
0
    def __init__(self,
                 name="GLDistantLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 cast_shadow = False,
                 auto_insert=True,
                 **params
                 ):
        _initWorldObject(self, baseClass=_core.GLDistantLight,
                         name=name, parent=parent,
                         auto_insert=auto_insert,
                         **params)

        self.enabled = enabled
        self.intensity = intensity
        if ambient!=None:
            self.ambient = vec3(ambient)
        if diffuse!=None:
            self.diffuse = vec3(diffuse)
        if specular!=None:
            self.specular = vec3(specular)

        self.cast_shadow = cast_shadow
Exemple #3
0
    def as_dict(self):
        qx = cgtypes.quat().fromAngleAxis(self.r_x * D2R,
                                          cgtypes.vec3(1, 0, 0))
        qy = cgtypes.quat().fromAngleAxis(self.r_y * D2R,
                                          cgtypes.vec3(0, 1, 0))
        qz = cgtypes.quat().fromAngleAxis(self.r_z * D2R,
                                          cgtypes.vec3(0, 0, 1))
        Rx = cgmat2np(qx.toMat3())
        Ry = cgmat2np(qy.toMat3())
        Rz = cgmat2np(qz.toMat3())
        _R = np.dot(Rx, np.dot(Ry, Rz))

        # convert bool to -1 or 1
        fx = fy = fz = 1
        if self.flip_x:
            fx = -1
        if self.flip_y:
            fy = -1
        if self.flip_z:
            fz = -1

        flip = np.array([[fx, 0, 0], [0, fy, 0], [0, 0, fz]], dtype=np.float)
        _R = np.dot(flip, _R)

        s = float(self.s)
        t = map(float, [self.tx, self.ty, self.tz])
        R = []
        for row in _R:
            R.append(map(float, [i for i in row]))

        result = {"s": s, "t": t, "R": R}
        return result
Exemple #4
0
    def __init__(self,
                 name="GLPointLight",
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 constant_attenuation=1.0,
                 linear_attenuation=0.0,
                 quadratic_attenuation=0.0,
                 cast_shadow = False,
                 parent = None,
                 auto_insert = True,
                 **params
                 ):
        exec _preInitWorldObject
        _core.GLPointLight.__init__(self, name)

        _initWorldObject(self, name=name, parent=parent,
                         auto_insert=auto_insert,
                         **params)

        self.enabled = enabled
        self.intensity = intensity
        if ambient!=None:
            self.ambient = vec3(ambient)
        if diffuse!=None:
            self.diffuse = vec3(diffuse)
        if specular!=None:
            self.specular = vec3(specular)
        self.constant_attenuation = constant_attenuation
        self.linear_attenuation = linear_attenuation
        self.quadratic_attenuation = quadratic_attenuation

        self.cast_shadow = cast_shadow
Exemple #5
0
    def __init__(self,
                 name="GLDistantLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 cast_shadow=False,
                 auto_insert=True,
                 **params):
        exec _preInitWorldObject
        _core.GLDistantLight.__init__(self, name)

        _initWorldObject(self,
                         name=name,
                         parent=parent,
                         auto_insert=auto_insert,
                         **params)

        self.enabled = enabled
        self.intensity = intensity
        if ambient != None:
            self.ambient = vec3(ambient)
        if diffuse != None:
            self.diffuse = vec3(diffuse)
        if specular != None:
            self.specular = vec3(specular)

        self.cast_shadow = cast_shadow
Exemple #6
0
    def __init__(self,
                 name="GLPointLight",
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 constant_attenuation=1.0,
                 linear_attenuation=0.0,
                 quadratic_attenuation=0.0,
                 cast_shadow=False,
                 parent=None,
                 auto_insert=True,
                 **params):
        _initWorldObject(self,
                         baseClass=_core.GLPointLight,
                         name=name,
                         parent=parent,
                         auto_insert=auto_insert,
                         **params)

        self.enabled = enabled
        self.intensity = intensity
        if ambient != None:
            self.ambient = vec3(ambient)
        if diffuse != None:
            self.diffuse = vec3(diffuse)
        if specular != None:
            self.specular = vec3(specular)
        self.constant_attenuation = constant_attenuation
        self.linear_attenuation = linear_attenuation
        self.quadratic_attenuation = quadratic_attenuation

        self.cast_shadow = cast_shadow
Exemple #7
0
def main():
    model_path = os.path.join(
            fsee.data_dir,"models/WT1/WT1.osg") # trigger extraction

    hz = 100.0
    dt = 1/hz
    done = False
    vision = None

    results = {}

    path_maker = PathMaker(vel_0=cgtypes.vec3(100,0,0),
                           pos_0=cgtypes.vec3(000,200,150))

    if 1:
        if 1:
            fname = 'guf_output'
            print 'saving',fname
            vision = fsee.Observer.Observer(model_path=model_path,
                                            scale=1000.0, # convert model from meters to mm
                                            hz=hz,
                                            skybox_basename=None,
                                            full_spectrum=True,
                                            optics='buchner71',
                                            do_luminance_adaptation=False,
                                            )
            t = -dt
            count = 0
            while count<1000:
                t+=dt
                count += 1

                cur_pos = None
                cur_ori = None
                cur_pos, cur_ori, vel, angular_vel = path_maker.step(t)
                vision.step(cur_pos,cur_ori)

                EMDs = vision.get_last_emd_outputs()
                R = vision.get_last_retinal_imageR()
                G = vision.get_last_retinal_imageG()
                B = vision.get_last_retinal_imageB()

                vision.save_last_environment_map('envmap%03d.png'%count)

                results.setdefault('position_vec3_xyz',[]).append(
                    tuple(cur_pos))
                results.setdefault('orientation_quat_wxyz',[]).append(
                    (cur_ori.w,cur_ori.x,cur_ori.y,cur_ori.z))

                results.setdefault('R',[]).append(R)
                results.setdefault('G',[]).append(G)
                results.setdefault('B',[]).append(B)
                results.setdefault('EMDs',[]).append(EMDs)
    if 1:
        for key in results.keys():
            # make sure scipy can save it
            results[key] = numpy.asarray(results[key])#,dtype=numpy.Float64)
        scipy.io.savemat('guf_data',results)
        done = True
Exemple #8
0
 def generate(self, concrete):
     """Instantiate only 1 triangle to pass validity check"""
     parent = concrete.get_node(self._element.values.gate_id + "_impl")
     parent.add_dressing_faces(
         [cgtypes.vec3(3),
          cgtypes.vec3(4),
          cgtypes.vec3(5)], [[0, 1, 2]],
         concrete_room.get_texture_definition("../texture.png"))
Exemple #9
0
 def generate(self, concrete):
     """Instantiate only 1 triangle to pass validity check"""
     parent = concrete.get_node("parent")
     parent.add_dressing_faces(
         [cgtypes.vec3(0),
          cgtypes.vec3(1),
          cgtypes.vec3(2)], [[0, 1, 2]],
         concrete_room.get_texture_definition("../texture.png"))
Exemple #10
0
 def __init__(self,
              pos_0=cgtypes.vec3(0,0,0),
              ori_0=cgtypes.quat(1,0,0,0),
              vel_0=cgtypes.vec3(0,0,0),
              ):
     self.pos_0 = pos_0
     self.ori_0 = ori_0
     self.vel_0 = vel_0
     self.reset()
Exemple #11
0
def go_render(vision, json, compute_mu, write_binary):
    position = numpy.array(json["position"])
    attitude = numpy.array(json["attitude"])

    # XXX hack to try orientation theory
    # correcting for left-handeness
    # should use a reflection instead, but it's all good because rotz
    attitude = attitude.T

    linear_velocity_body = numpy.array(json["linear_velocity_body"])
    angular_velocity_body = numpy.array(json["angular_velocity_body"])

    linear_velocity_body = cgtypes.vec3(linear_velocity_body)
    angular_velocity_body = cgtypes.vec3(angular_velocity_body)
    position = cgtypes.vec3(position)
    orientation = cgtypes.quat.fromMat(cgtypes.quat(), cgtypes.mat3(attitude))

    vision.step(position, orientation)

    R = vision.get_last_retinal_imageR()
    G = vision.get_last_retinal_imageG()
    B = vision.get_last_retinal_imageB()
    y = (R + G + B) / 3.0 / 255

    answer = {}

    answer["position"] = list(position)
    answer["orientation"] = numpy.array(orientation.toMat3()).tolist()
    answer["linear_velocity_body"] = list(linear_velocity_body)
    answer["angular_velocity_body"] = list(angular_velocity_body)

    answer["luminance"] = y.tolist()

    # Get Q_dot and mu from body velocities
    # WARNING: Getting Q_dot and mu can be very slow
    if compute_mu:
        lvel = json["linear_velocity_body"]
        avel = json["angular_velocity_body"]
        linear_velocity_body = cgtypes.vec3(numpy.array(lvel))
        angular_velocity_body = cgtypes.vec3(numpy.array(avel))

        qdot, mu = vision.get_last_retinal_velocities(linear_velocity_body, angular_velocity_body)

        answer["nearness"] = mu
        answer["retinal_velocities"] = qdot

    if write_binary:
        what = answer["luminance"]
        n = len(what)
        s = struct.pack(">" + "f" * n, *what)
        positive_answer({"binary_length": len(s), "mean": numpy.mean(what)}, "Sending binary data (n * 4 bytes)")
        sys.stdout.write(s)
        sys.stdout.flush()
    else:
        positive_answer(answer)
Exemple #12
0
 def generate(self, concrete):
     """generate 1 structure triangle to be able to check validity"""
     logger.info("Called generate")
     parent = concrete.add_child(self.gate.values.gate_id,
                                 self.gate.values.gate_id + "_impl")
     index0 = parent.add_structure_points(
         [cgtypes.vec3(3),
          cgtypes.vec3(4),
          cgtypes.vec3(5)])
     parent.add_structure_faces(index0, [[0, 1, 2], [3, 4, 5], [6, 7, 8]],
                                [concrete_room.Node.CATEGORY_PHYSICS],
                                [concrete_room.Node.HINT_BUILDING], [0])
Exemple #13
0
    def __init__(self,
                 name="GLSpotLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 constant_attenuation=1.0,
                 linear_attenuation=0.0,
                 quadratic_attenuation=0.0,
                 exponent=0.0,
                 cutoff=45.0,
                 target=vec3(0, 0, 0),
                 cast_shadow=False,
                 auto_insert=True,
                 **params):

        _initWorldObject(self,
                         baseClass=_core.GLSpotLight,
                         name=name,
                         parent=parent,
                         auto_insert=auto_insert,
                         **params)

        target = vec3(target)

        # Target
        self.target_slot = Vec3Slot(target)
        self.addSlot("target", self.target_slot)

        self.enabled = enabled
        self.intensity = intensity
        if ambient != None:
            self.ambient = vec3(ambient)
        if diffuse != None:
            self.diffuse = vec3(diffuse)
        if specular != None:
            self.specular = vec3(specular)
        self.constant_attenuation = constant_attenuation
        self.linear_attenuation = linear_attenuation
        self.quadratic_attenuation = quadratic_attenuation
        self.exponent = exponent
        self.cutoff = cutoff

        self.cast_shadow = cast_shadow

        # Create the internal LookAt component
        self._lookat = lookat.LookAt()
        self._lookat.name = "GLTargetSpot_LookAt"
        self._lookat.output_slot.connect(self.rot_slot)
        self.pos_slot.connect(self._lookat.pos_slot)
        self.target_slot.connect(self._lookat.target_slot)
Exemple #14
0
    def __init__(self,
                 name="GLSpotLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 constant_attenuation=1.0,
                 linear_attenuation=0.0,
                 quadratic_attenuation=0.0,
                 exponent=0.0,
                 cutoff=45.0,
                 target=vec3(0,0,0),
                 cast_shadow = False,
                 auto_insert=True,
                 **params
                 ):
        exec _preInitWorldObject
        _core.GLSpotLight.__init__(self, name)

        _initWorldObject(self, name=name, parent=parent,
                         auto_insert=auto_insert,
                         **params)

        target = vec3(target)

        # Target
        self.target_slot = Vec3Slot(target)
        self.addSlot("target", self.target_slot)

        self.enabled = enabled
        self.intensity = intensity
        if ambient!=None:
            self.ambient = vec3(ambient)
        if diffuse!=None:
            self.diffuse = vec3(diffuse)
        if specular!=None:
            self.specular = vec3(specular)
        self.constant_attenuation = constant_attenuation
        self.linear_attenuation = linear_attenuation
        self.quadratic_attenuation = quadratic_attenuation
        self.exponent = exponent
        self.cutoff = cutoff

        self.cast_shadow = cast_shadow

        # Create the internal LookAt component
        self._lookat = lookat.LookAt()
        self._lookat.name = "GLTargetSpot_LookAt"
        self._lookat.output_slot.connect(self.rot_slot)
        self.pos_slot.connect(self._lookat.pos_slot)
        self.target_slot.connect(self._lookat.target_slot)
Exemple #15
0
def main():
    # Generate OSG model from the XML file
    stim_xml_filename = 'nopost.xml'
    stim_xml = xml_stimulus.xml_stimulus_from_filename(stim_xml_filename)
    stim_xml_osg = xml_stimulus_osg.StimulusWithOSG( stim_xml.get_root() )

    # Simulation parameters
    hz = 60.0 # fps
    dt = 1.0/hz

    # Use to generate pose and velocities
    path_maker = PathMaker(vel_0=cgtypes.vec3(0.100,0,0),
                           pos_0=cgtypes.vec3(0.000,0.000,0.150))


    with stim_xml_osg.OSG_model_path() as osg_model_path:
        vision = fsee.Observer.Observer(model_path=osg_model_path,
                                        # scale=1000.0, # from meters to mm
                                        hz=hz,
                                        skybox_basename=None,
                                        full_spectrum=True,
                                        optics='buchner71',
                                        do_luminance_adaptation=False,
                                        )
        t = -dt 
        count = 0
        y_old = None
        cur_pos = None
        cur_ori = None
 
        while count < 1000:
            t += dt
            count += 1

            cur_pos, cur_ori, vel, angular_vel = path_maker.step(t)
            print cur_pos

            vision.step(cur_pos,cur_ori)

            R=vision.get_last_retinal_imageR()
            G=vision.get_last_retinal_imageG()
            B=vision.get_last_retinal_imageB()
            y = (R+G+B)/3.0/255
            
            if y_old != None:
                ydot = (y - y_old) / dt
            else:
                y_old = y
                    
            # Get Q_dot and mu from body velocities
            # WARNING: Getting Q_dot and mu can be very slow
            qdot, mu = vision.get_last_retinal_velocities(vel, angular_vel)
Exemple #16
0
 def generate(self, concrete):
     """generate 1 structure triangle to be able to check validity"""
     parent = concrete.add_child(None, "parent")
     if "pads" in self.room.values:
         for pad in self.room.values.pads:
             concrete.add_child("parent", pad.pad_id)
     index0 = parent.add_structure_points(
         [cgtypes.vec3(0),
          cgtypes.vec3(1),
          cgtypes.vec3(2)])
     parent.add_structure_faces(index0, [[0, 1, 2], [3, 4, 5], [6, 7, 8]],
                                [concrete_room.Node.CATEGORY_PHYSICS],
                                [concrete_room.Node.HINT_BUILDING], [0])
Exemple #17
0
    def __init__(self):
        object.__init__(self)

        # Handedness ('r' or 'l')
        self._handedness = "r"
        # Normalized up direction
        self._up = vec3(0, 0, 1)
        # Unit   (1 unit = unitscale [Unit])
        self._unit = "m"
        # Unit scale
        self._unitscale = 1.0

        # Global options
        self._globals = {}

        self.items = []
        #        self.items_by_name = {}

        self._worldroot = _core.WorldObject("WorldRoot")
        self._timer = timer.Timer(auto_insert=False)

        # Key: ID  Value:Joystick object
        self._joysticks = {}

        self.clear()
Exemple #18
0
    def step(self, im_getter, pos_vec3, ori_quat ):
        self.last_environment_map = None # clear cached image if it exists...

        if type(pos_vec3) != cgtypes.vec3:
            pos_vec3 = cgtypes.vec3( *pos_vec3 )

        if type(ori_quat) != cgtypes.quat:
            ori_quat = cgtypes.quat( *ori_quat )

        # get direction of each face of the environment map cube as quat
        for fn, viewdir in self.viewdirs.iteritems():
            face_dir_quat = ori_quat*viewdir
            im=im_getter( pos_vec3, face_dir_quat )
            self.last_optical_images[fn] = im

        whole_env_mapG = numpy.concatenate( [numpy.ravel(self.last_optical_images[fn][:,:,1])
                                             for fn in self.cube_order], axis=0 )
        self.responsesG = self.rweights*whole_env_mapG # sparse matrix times vector = vector

        if self.full_spectrum:
            whole_env_mapR = numpy.concatenate( [numpy.ravel(self.last_optical_images[fn][:,:,0])
                                                 for fn in self.cube_order], axis=0 )
            self.responsesR = self.rweights * whole_env_mapR
            whole_env_mapB = numpy.concatenate( [numpy.ravel(self.last_optical_images[fn][:,:,2])
                                                 for fn in self.cube_order], axis=0 )
            self.responsesB = self.rweights * whole_env_mapB

        self.emd_sim.step( self.responsesG )
Exemple #19
0
    def step(self, pos_vec3, ori_quat):
        if type(pos_vec3) != cgtypes.vec3:
            pos_vec3 = cgtypes.vec3( *pos_vec3 )

        if type(ori_quat) != cgtypes.quat:
            ori_quat = cgtypes.quat( *ori_quat )

        # calculate photoreceptors, EMDs, etc...
        self.cvs.step( self.sim.get_flyview, pos_vec3, ori_quat )

        self.last_pos_vec3 = pos_vec3
        self.last_ori_quat = ori_quat

        if self.plot_realtime:
            if self.full_spectrum:
                R = self.get_last_retinal_imageR()
                G = self.get_last_retinal_imageG()
                B = self.get_last_retinal_imageB()
            else:
                R = self.get_last_retinal_imageG()
                G = self.get_last_retinal_imageG()
                B = self.get_last_retinal_imageG()
            A = numpy.ones( B.shape, dtype=numpy.float32 )
            RGBA = numpy.vstack([R,G,B,A]).T
            RGBA[:,:3] = RGBA[:,:3]/255.0


            for num,eye_name in enumerate(self.rp.get_eye_names()):
                slicer = self.rp.get_slicer( eye_name )
                self.sim.set_eyemap_face_colors(num,RGBA[slicer,:])
Exemple #20
0
    def __init__(self):
        object.__init__(self)

        # Handedness ('r' or 'l')
        self._handedness = "r"
        # Normalized up direction
        self._up = vec3(0, 0, 1)
        # Unit   (1 unit = unitscale [Unit])
        self._unit = "m"
        # Unit scale
        self._unitscale = 1.0

        # Global options
        self._globals = {}

        self.items = []
        #        self.items_by_name = {}

        self._worldroot = _core.WorldObject("WorldRoot")
        self._timer = timer.Timer(auto_insert=False)

        # Key: ID  Value:Joystick object
        self._joysticks = {}

        self.clear()
Exemple #21
0
    def _setUp(self, up):
        """Set the up vector.

        This method is used for setting the \a up property.

        \param up (\c vec3) Up vector
        """
        self._up = vec3(up).normalize()
Exemple #22
0
def pos_ori2fu(pos, ori):
    """convert position (xyz) vector and orientation (wxyz) quat into focal_point and view_up"""
    pos = cgtypes.vec3(pos)
    ori = cgtypes.quat(ori)
    if np.isnan(ori.w):
        raise ValueError('ori is nan')
    forward = cgtypes.vec3((0, 0, -1))
    up = cgtypes.vec3((0, 1, 0))
    mori = ori.toMat4().inverse()
    #view_forward_dir = rotate(ori,forward)
    view_forward_dir = mori * forward
    focal_point = pos + view_forward_dir
    if np.isnan(focal_point[0]):
        raise ValueError('focal point is nan')
    #view_up_dir = rotate(ori,up)
    view_up_dir = mori * up
    return focal_point, view_up_dir
Exemple #23
0
    def _setUp(self, up):
        """Set the up vector.

        This method is used for setting the \a up property.

        \param up (\c vec3) Up vector
        """
        self._up = vec3(up).normalize()
Exemple #24
0
 def readSURF_TFAL(self, length):
     """TFAL chunk."""
     if length!=12:
         raise LWOBError("Invalid TFAL size (%d instead of 12)"%length)
     if self._current_tex==None:
         raise LWOBError("Invalid position of the TFAL chunk")
     data = self.readNBytes(12)
     v = vec3(struct.unpack(">fff", data))
     self._current_tex.falloff = v
Exemple #25
0
def vspnoise(*args):
    """vspnoise(point, period) / vspnoise(point, t, pperiod, tperiod) -> noiseval

    Periodic noise function. Basically this is the same than vsnoise
    but with a periodic return value: vspnoise(point) = vspnoise(point+period).
    The time value can be either part of the point or it can be
    specified separately. The point and period must always have the
    same dimension. The components of the return value are in the range
    from -1 to 1.
    """

    n = len(args)
    if n == 2:
        v, pv = args
        try:
            m = len(v)
        except:
            return _vspnoise(v, pv)
        if m == 1:
            return _vspnoise(v[0], pv[0])
        elif m == 2:
            x, y = v
            px, py = pv
            return _vspnoise(x, y, px, py)
        elif m == 3:
            x, y, z = v
            px, py, pz = pv
            return _vspnoise(x, y, z, px, py, pz)
        elif m == 4:
            x, y, z, t = v
            px, py, pz, pt = pv
            return _vspnoise(x, y, z, t, px, py, pz, pt)
        else:
            raise ValueError("Invalid arguments")

    elif n == 4:
        v, t, pv, pt = args
        try:
            m = len(v)
        except:
            return _vspnoise(v, t, pv, pt)
        if m == 1:
            return _vspnoise(v[0], t, pv[0], pt)
        elif m == 2:
            x, y = v
            px, py = pv
            return _vspnoise(x, y, t, px, py, pt)
        elif m == 3:
            x, y, z = v
            px, py, pz = pv
            res = _vspnoise(x, y, z, t, px, py, pz, pt)
            return vec3(res.x, res.y, res.z)
        else:
            raise ValueError("Invalid arguments")

    else:
        raise TypeError("only 2 or 4 arguments allowed")
Exemple #26
0
 def readSURF_TVEL(self, length):
     """TVEL chunk."""
     if length!=12:
         raise LWOBError, "Invalid TVEL size (%d instead of 12)"%length
     if self._current_tex==None:
         raise LWOBError, "Invalid position of the TVEL chunk"
     data = self.readNBytes(12)
     v = vec3(struct.unpack(">fff", data))
     self._current_tex.velocity = v
Exemple #27
0
def vspnoise(*args):
    """vspnoise(point, period) / vspnoise(point, t, pperiod, tperiod) -> noiseval

    Periodic noise function. Basically this is the same than vsnoise
    but with a periodic return value: vspnoise(point) = vspnoise(point+period).
    The time value can be either part of the point or it can be
    specified separately. The point and period must always have the
    same dimension. The components of the return value are in the range
    from -1 to 1.
    """
    
    n = len(args)
    if n==2:
        v, pv = args
        try:
            m = len(v)
        except:
            return _vspnoise(v, pv)
        if m==1:
            return _vspnoise(v[0], pv[0])
        elif m==2:
            x,y = v
            px,py = pv
            return _vspnoise(x,y,px,py)
        elif m==3:
            x,y,z = v
            px,py,pz = pv
            return _vspnoise(x,y,z,px,py,pz)
        elif m==4:
            x,y,z,t = v
            px,py,pz,pt = pv
            return _vspnoise(x,y,z,t,px,py,pz,pt)
        else:
            raise ValueError("Invalid arguments")
        
    elif n==4:
        v,t,pv,pt = args
        try:
            m = len(v)
        except:
            return _vspnoise(v, t, pv, pt)
        if m==1:
            return _vspnoise(v[0], t, pv[0], pt)
        elif m==2:
            x,y = v
            px,py = pv
            return _vspnoise(x,y,t,px,py,pt)
        elif m==3:
            x,y,z = v
            px,py,pz = pv
            res = _vspnoise(x,y,z,t,px,py,pz,pt)
            return vec3(res.x, res.y, res.z)
        else:
            raise ValueError("Invalid arguments")
    
    else:
        raise TypeError("only 2 or 4 arguments allowed")
Exemple #28
0
    def __init__(self,
                 name="GLDistantLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 target=vec3(0,0,0),
                 cast_shadow = False,
                 auto_insert=True,
                 **params
                 ):
        exec _preInitWorldObject
        _core.GLDistantLight.__init__(self, name)

        _initWorldObject(self, name=name, parent=parent,
                         auto_insert=auto_insert,
                         **params)

        target = vec3(target)

        # Target
        self.target_slot = Vec3Slot(target)
        self.addSlot("target", self.target_slot)

        self.enabled = enabled
        self.intensity = intensity
        if ambient!=None:
            self.ambient = vec3(ambient)
        if diffuse!=None:
            self.diffuse = vec3(diffuse)
        if specular!=None:
            self.specular = vec3(specular)

        self.cast_shadow = cast_shadow

        # Create the internal LookAt component
        self._lookat = lookat.LookAt()
        self._lookat.name = "GLTargetDistant_LookAt"
        self._lookat.output_slot.connect(self.rot_slot)
        self.pos_slot.connect(self._lookat.pos_slot)
        self.target_slot.connect(self._lookat.target_slot)
Exemple #29
0
    def __init__(self,
                 name="GLDistantLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 target=vec3(0,0,0),
                 cast_shadow = False,
                 auto_insert=True,
                 **params
                 ):

        _initWorldObject(self, baseClass=_core.GLDistantLight,
                         name=name, parent=parent,
                         auto_insert=auto_insert,
                         **params)

        target = vec3(target)

        # Target
        self.target_slot = Vec3Slot(target)
        self.addSlot("target", self.target_slot)

        self.enabled = enabled
        self.intensity = intensity
        if ambient!=None:
            self.ambient = vec3(ambient)
        if diffuse!=None:
            self.diffuse = vec3(diffuse)
        if specular!=None:
            self.specular = vec3(specular)

        self.cast_shadow = cast_shadow

        # Create the internal LookAt component
        self._lookat = lookat.LookAt()
        self._lookat.name = "GLTargetDistant_LookAt"
        self._lookat.output_slot.connect(self.rot_slot)
        self.pos_slot.connect(self._lookat.pos_slot)
        self.target_slot.connect(self._lookat.target_slot)
Exemple #30
0
def precompute_ring_configuration(conf_name, fov_degrees, num_receptors):
    fov = deg2rad(fov_degrees)
    thetas = numpy.linspace( -fov/2, +fov/2, num_receptors)
    
    receptor_dirs = [ cgtypes.vec3(cos(theta), 0.01*sin(100*theta), sin(theta) ) 
        for theta in thetas]

    tris = [(n,n+1,n+2) for n in range(0, num_receptors-2)]
    # BUG: otherwise pseudo_voronoi does not terminate (?)
    tris.reverse()
    precompute_generic(conf_name, receptor_dirs, tris)
Exemple #31
0
 def __init__(self, position=[0, 0, 0], direction=[1, 0, 0], rotation=[0, -1, 0]):
     """
     `position': The [x, y, z] coordinate position of the turtle in Euclidean space.
     `direction': The [i, j, k] unit vector for the turtle's direction.
     `rotation': The [i, j, k] unit vector for the turtle's rotation (direction the shell faces)
     """
     self._pos = self.setPos(*position)  # Floating point representation of turtle's position
     self._tilepos = self.getTilePos()  # Integer representation of turtle's position
     self._dir = self.setDir(*direction)
     self._rot = vec3(rotation)
     self._blocktype = 'GRASS'
     self._pendown = True
Exemple #32
0
    def __init__(self,
                 name="GLSpotLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 constant_attenuation=1.0,
                 linear_attenuation=0.0,
                 quadratic_attenuation=0.0,
                 exponent=0.0,
                 cutoff=45.0,
                 cast_shadow=False,
                 auto_insert=True,
                 **params):
        exec _preInitWorldObject
        _core.GLSpotLight.__init__(self, name)

        _initWorldObject(self,
                         name=name,
                         parent=parent,
                         auto_insert=auto_insert,
                         **params)

        self.enabled = enabled
        self.intensity = intensity
        if ambient != None:
            self.ambient = vec3(ambient)
        if diffuse != None:
            self.diffuse = vec3(diffuse)
        if specular != None:
            self.specular = vec3(specular)
        self.constant_attenuation = constant_attenuation
        self.linear_attenuation = linear_attenuation
        self.quadratic_attenuation = quadratic_attenuation
        self.exponent = exponent
        self.cutoff = cutoff
        self.cast_shadow = cast_shadow
Exemple #33
0
    def __init__(self,
                 name="GLSpotLight",
                 parent=None,
                 enabled=True,
                 intensity=1.0,
                 ambient=None,
                 diffuse=None,
                 specular=None,
                 constant_attenuation=1.0,
                 linear_attenuation=0.0,
                 quadratic_attenuation=0.0,
                 exponent=0.0,
                 cutoff=45.0,
                 cast_shadow = False,
                 auto_insert=True,
                 **params
                 ):
        _initWorldObject(self, baseClass=_core.GLSpotLight,
                         name=name, parent=parent,
                         auto_insert=auto_insert,
                         **params)

        self.enabled = enabled
        self.intensity = intensity
        if ambient!=None:
            self.ambient = vec3(ambient)
        if diffuse!=None:
            self.diffuse = vec3(diffuse)
        if specular!=None:
            self.specular = vec3(specular)
        self.constant_attenuation = constant_attenuation
        self.linear_attenuation = linear_attenuation
        self.quadratic_attenuation = quadratic_attenuation
        self.exponent = exponent
        self.cutoff = cutoff
        self.cast_shadow = cast_shadow
Exemple #34
0
def make_look_at(eye, center, up):
    # simlar to gluLookAt.
    # this was made from osg/Matrix_implementation.cpp
    eye = cgtypes.vec3(*eye)
    center = cgtypes.vec3(*center)
    up = cgtypes.vec3(*up)

    f = center-eye
    f=f.normalize()

    s=f.cross(up)
    s=s.normalize()

    u=s.cross(f)
    u=u.normalize()

    arr = np.array( [[s[0], u[0], -f[0], 0],
                     [s[1], u[1], -f[1], 0],
                     [s[2], u[2], -f[2], 0],
                     [0,0,0,1]], dtype=np.float).T
    trans = np.eye(4)
    trans[:3,3] = [-eye[0], -eye[1], -eye[2]]
    result = np.dot( arr, trans)
    return result
Exemple #35
0
def test_cgmat2np():
    point1 = (1, 0, 0)
    point1_out = (0, 1, 0)

    cg_quat = cgtypes.quat().fromAngleAxis(90.0 * D2R, (0, 0, 1))
    cg_in = cgtypes.vec3(point1)

    m_cg = cg_quat.toMat3()
    cg_out = m_cg * cg_in
    cg_out_tup = (cg_out[0], cg_out[1], cg_out[2])
    assert np.allclose(cg_out_tup, point1_out)

    m_np = cgmat2np(m_cg)
    np_out = np.dot(m_np, point1)
    assert np.allclose(np_out, point1_out)
Exemple #36
0
 def readPNTS(self, length):
     """Read a PNTS chunk and call the PNTS handler.
     """
     if length%12!=0:
         raise LWOBError, "Invalid PNTS chunk size (%d)"%length
     
     numpnts = int(length/12)
     # Read the chunk data...
     data = self.readNBytes(length)
     # Unpack the coordinates...
     coords = struct.unpack(">%s"%(numpnts*"fff"), data)
     # Initialize vec3s...
     pnts = []
     for i in range(numpnts):
         pnts.append(vec3(coords[i*3:(i+1)*3]))
         
     self.handlePNTS(pnts)
Exemple #37
0
 def __init__(self,
              position=[0, 0, 0],
              direction=[1, 0, 0],
              rotation=[0, -1, 0]):
     """
     `position': The [x, y, z] coordinate position of the turtle in Euclidean space.
     `direction': The [i, j, k] unit vector for the turtle's direction.
     `rotation': The [i, j, k] unit vector for the turtle's rotation (direction the shell faces)
     """
     self._pos = self.setPos(
         *position)  # Floating point representation of turtle's position
     self._tilepos = self.getTilePos(
     )  # Integer representation of turtle's position
     self._dir = self.setDir(*direction)
     self._rot = vec3(rotation)
     self._blocktype = 'GRASS'
     self._pendown = True
Exemple #38
0
def RiuHeightfield(image):
    if not _PIL_installed:
        raise ImportError, "the Python Imaging Library (PIL) is not installed"
    
    if type(image)==types.StringType or type(image)==types.UnicodeType:
        image = Image.open(image)

    points = []
    width, height = image.size
    for j in range(height):
        y = float(j)/(height-1)
        for i in range(width):
            x = float(i)/(height-1)
            col=image.getpixel((i,j))
            z=col[0]/255.0
            p=vec3(x,y,z)
            points.append(p)

    RiPatchMesh(RI_BILINEAR, width, RI_NONPERIODIC, height, RI_NONPERIODIC, P=points)
Exemple #39
0
 def forward(self, steps):
     """
     Generate a list of integer coordinates that represent a forward movement along
     the current direction.
     """
     # coords is a list of all the steps that the turtle needs to take
     coords = []
     # Split the number of steps into increasingly larger distances, starting with 1 and ending
     # with the final destination
     for i in range(steps):
         i += 1
         # Multiply the current step distance by the direction unit vector
         # and round down to an integer.
         coords.append((int(round(self.x + i * self._dir[0])),
                        int(round(self.y + i * self._dir[1])),
                        int(round(self.z + i * self._dir[2]))))
     self._coords = coords
     self._pos += vec3(*[steps * coord for coord in self._dir])
     self.x, self.y, self.z = self._pos
     self._tilepos = self.getTilePos()
Exemple #40
0
 def forward(self, steps):
     """
     Generate a list of integer coordinates that represent a forward movement along
     the current direction.
     """
     # coords is a list of all the steps that the turtle needs to take
     coords = []
     # Split the number of steps into increasingly larger distances, starting with 1 and ending
     # with the final destination
     for i in range(steps):
         i += 1
         # Multiply the current step distance by the direction unit vector
         # and round down to an integer.
         coords.append((
             int(round(self.x + i*self._dir[0])),
             int(round(self.y + i*self._dir[1])),
             int(round(self.z + i*self._dir[2]))
         ))
     self._coords = coords
     self._pos += vec3(*[steps*coord for coord in self._dir])
     self.x, self.y, self.z = self._pos
     self._tilepos = self.getTilePos()
Exemple #41
0
def test_repr():
    x = repr_vec3(1,2,3.0000001)
    ra = repr(x)
    x2 = eval(ra)
    assert x2.z == x.z

    y = [cgtypes.vec3(1,2,3.0000001)]
    y2 = map(make_repr_able,y)
    assert y[0].z == y2[0].z

    x = repr_quat(0.1,1,2,3.0000001)
    ra = repr(x)
    x2 = eval(ra)
    assert x2.z == x.z

    y = [cgtypes.quat(0.1,1,2,3.0000001)]
    y2 = map(make_repr_able,y)
    assert y[0].z == y2[0].z

    y3 = [y]
    y4 = map(make_repr_able,y3)
    assert y3[0][0].z == y4[0][0].z
Exemple #42
0
Fichier : util.py Projet : K8H/fsee
def project_angular_velocity_to_emds(angular_vels, receptor_dirs, edges):
    # angular_vels comes from same data as edges
    assert len(angular_vels) == len( edges )

    emd_outputs = []
    zero_vec = cgtypes.vec3(0,0,0)
    for (vi0,vi1),angular_vel in zip(edges,angular_vels):
        if angular_vel == zero_vec:
            emd_outputs.append( 0.0 )
            continue
        # find center of EMD correlation pair and its preferred direction
        v0 = receptor_dirs[vi0]
        v1 = receptor_dirs[vi1]
        emd_lpd_dir = (v0-v1).normalize() # local preferred direction
        emd_pos = ((v0+v1)*0.5).normalize() # EMD center

        # project angular velocity onto EMD baseline
        tangential_vel = angular_vel.cross( emd_pos )
        theta = emd_lpd_dir.angle( tangential_vel )
        projected_mag = abs(tangential_vel)*math.cos( theta )
        emd_outputs.append( projected_mag )
    return emd_outputs
Exemple #43
0
def test_repr():
    x = repr_vec3(1, 2, 3.0000001)
    ra = repr(x)
    x2 = eval(ra)
    assert x2.z == x.z

    y = [cgtypes.vec3(1, 2, 3.0000001)]
    y2 = map(make_repr_able, y)
    assert y[0].z == y2[0].z

    x = repr_quat(0.1, 1, 2, 3.0000001)
    ra = repr(x)
    x2 = eval(ra)
    assert x2.z == x.z

    y = [cgtypes.quat(0.1, 1, 2, 3.0000001)]
    y2 = map(make_repr_able, y)
    assert y[0].z == y2[0].z

    y3 = [y]
    y4 = map(make_repr_able, y3)
    assert y3[0][0].z == y4[0][0].z
Exemple #44
0
def RiuHeightfield(image):
    if not _PIL_installed:
        raise ImportError("the Python Imaging Library (PIL) is not installed")

    if type(image) == types.StringType or type(image) == types.UnicodeType:
        image = Image.open(image)

    points = []
    width, height = image.size
    for j in range(height):
        y = float(j) / (height - 1)
        for i in range(width):
            x = float(i) / (height - 1)
            col = image.getpixel((i, j))
            z = col[0] / 255.0
            p = vec3(x, y, z)
            points.append(p)

    RiPatchMesh(RI_BILINEAR,
                width,
                RI_NONPERIODIC,
                height,
                RI_NONPERIODIC,
                P=points)
Exemple #45
0
# Copyright (C) 2005-2008 California Institute of Technology, All rights reserved
import os

import cgtypes

import fsee
import fsee.Observer

pos_vec3 = cgtypes.vec3( 0, 0, 0)
ori_quat = cgtypes.quat( 1, 0, 0, 0)

model_path = os.path.join(fsee.data_dir,"models/tunnel_leftturn/tunnel.osg")
vision = fsee.Observer.Observer(model_path=model_path,
                                hz=200.0,
                                optics='synthetic',
                                do_luminance_adaptation=False,
                                )
vision.step(pos_vec3,ori_quat)
Exemple #46
0
 def setDir(self, i, j, k):
     self._dir = vec3([self.correctNearZero(coord) for coord in [i, j, k]])
     return self._dir
Exemple #47
0
 def getTilePos(self):
     return vec3(int(round(self.x)), int(round(self.y)), int(round(self.z)))
Exemple #48
0
 def getPos(self):
     return vec3(self.x, self.y, self.z)
Exemple #49
0
def make_receptor_sensitivities(all_d_q, delta_rho_q=None, res=64):
    """

    all_d_q are visual element directions as a 3-vector
    delta_rho_q (angular sensitivity) is in radians

    """
    if delta_rho_q is None:
        raise ValueError('must specify delta_rho_q (in radians)')

    if isinstance(delta_rho_q, float):
        all_delta_rho_qs = delta_rho_q * numpy.ones(
            (len(all_d_q), ), dtype=numpy.float64)
    else:
        all_delta_rho_qs = numpy.asarray(delta_rho_q)
        if len(all_delta_rho_qs.shape) != 1:
            raise ValueError("delta_rho_q must be scalar or vector")
        if all_delta_rho_qs.shape[0] != len(all_d_q):
            raise ValueError("if delta_rho_q is a vector, "
                             "it must have the same number of "
                             "elements as receptors")

    def G_q(zeta, delta_rho_q):
        # gaussian
        # From Snyder (1979) as cited in Burton & Laughlin (2003)
        return numpy.exp(-4 * math.log(2) * abs(zeta)**2 / delta_rho_q**2)

    half_res = res // 2
    vals = (numpy.arange(res) - half_res) / half_res

    weight_maps = []

    # setup vectors for initial face (posx)
    face_vecs = {}
    face_vecs['posx'] = []
    x = 1
    for z in vals:
        this_row_vecs = []
        for y in vals:
            on_cube_3d = (x, y, z)
            #print('on_cube_3d   %5.2f %5.2f %5.2f'%on_cube_3d)
            v3norm = normalize(on_cube_3d)  # get direction of each pixel
            p_p = cgtypes.quat(0.0, v3norm[0], v3norm[1], v3norm[2])
            this_row_vecs.append(p_p)
        this_row_vecs.reverse()
        face_vecs['posx'].append(this_row_vecs)

    def rot_face(facedict, facename, rotq):
        facedict[facename] = []
        for row in facedict['posx']:
            this_row_vecs = []
            for col in row:
                this_row_vecs.append(rotq * col * rotq.inverse())
            facedict[facename].append(this_row_vecs)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi / 2.0, cgtypes.vec3(0, 0, 1))
    rot_face(face_vecs, 'posy', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi, cgtypes.vec3(0, 0, 1))
    rot_face(face_vecs, 'negx', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(-math.pi / 2.0, cgtypes.vec3(0, 0, 1))
    rot_face(face_vecs, 'negy', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi / 2.0, cgtypes.vec3(0, -1, 0))
    rot_face(face_vecs, 'posz', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi / 2.0, cgtypes.vec3(0, 1, 0))
    rot_face(face_vecs, 'negz', rotq)

    # convert from quat to vec3
    rfv = {}
    for key in face_vecs:
        rows = face_vecs[key]
        rfv[key] = []
        for row in rows:
            this_row = [cgtypes.vec3(col.x, col.y, col.z)
                        for col in row]  # convert to vec3
            rfv[key].append(this_row)

    def get_weight_map(fn, rfv, d_q, delta_rho_q):
        angles = numpy.zeros((vals.shape[0], vals.shape[0]),
                             dtype=numpy.float64)
        for i, row_vecs in enumerate(rfv[fn]):
            for j, ovec in enumerate(row_vecs):
                angles[i, j] = d_q.angle(ovec)
        wm = G_q(angles, delta_rho_q)
        return wm

    for dqi, (d_q,
              this_delta_rho_q) in enumerate(zip(all_d_q, all_delta_rho_qs)):
        weight_maps_d_q = {}
        ssf = 0.0

        for fn in cube_order:
            wm = get_weight_map(fn, rfv, d_q, this_delta_rho_q)
            weight_maps_d_q[fn] = wm
            ssf += numpy.sum(wm.flat)

        # normalize
        for mapname in weight_maps_d_q:
            wm = weight_maps_d_q[mapname]
            weight_maps_d_q[mapname] = wm / ssf

        # save maps by receptor direction
        weight_maps.append(weight_maps_d_q)
    return weight_maps
Exemple #50
0
body_size = 5
lumpyness = 1
hair_length = 2
hair_count = 10000
hair_wavyness = 1

control_point_counts = []
control_points = []
widths = []

seed(12345)

for i in range(hair_count):
	control_point_counts.append(4)

	v = vec3(random() - 0.5, random() - 0.5, random() - 0.5).normalize()

	p1 = v * body_size
	p1 += vsnoise(p1) * lumpyness

	p4 = p1 + v * hair_length
	p4 += vsnoise(p4) * hair_wavyness

	p2 = mix(p1, p4, 0.2)
	p2 += vsnoise(p2)

	p3 = mix(p1, p4, 0.8)
	p3 += vsnoise(p3)

	control_points.append(p1)
	control_points.append(p2)
def main():
    Mforward = get_rot_mat(-numpy.pi/2,1,0,0)
    scale = numpy.eye(3)
    scale[2,2]=-1
    Mforward = numpy.dot(Mforward,scale)
    xform_my_long_lat_2_heisenberg = LongLatRotator(Mforward)
    Mreverse = numpy.linalg.inv(Mforward)
    xform_heisenberg_long_lat_2_my = LongLatRotator(Mreverse)

    ## triangulate data ###############################

    left_tri = delaunay.Triangulation(x, y)

    ## transform data to long & lat ###################
    hlong,hlat,hR = xform_stereographic_2_long_lat(x,y)
    long,lat,R = xform_heisenberg_long_lat_2_my(hlong,hlat,hR)

    ## put in form similar to output of make_receptor_info #
    left_receptor_dirs = numpy.asarray(long_lat2xyz(long,lat,R))
    left_receptor_dirs = numpy.transpose( left_receptor_dirs )
    left_receptor_dirs = [cgtypes.vec3(v) for v in left_receptor_dirs]
    left_triangles = left_tri.triangle_nodes
    left_ordered_tri_idxs = my_voronoi(left_tri,x,y)
    left_hex_faces = []
    for center_vert_idx in range(len(left_receptor_dirs)):
        center_vert = left_receptor_dirs[center_vert_idx]
        this_ordered_tri_idxs = left_ordered_tri_idxs[center_vert_idx]
        this_face = []
        for tri_idx in this_ordered_tri_idxs:
            if tri_idx == -1:
                this_vert = center_vert
            else:
                nodes = left_triangles[tri_idx]
                this_vert = (left_receptor_dirs[int(nodes[0])]+
                             left_receptor_dirs[int(nodes[1])]+
                             left_receptor_dirs[int(nodes[2])])*(1.0/3.0)
            this_face.append(this_vert)
        left_hex_faces.append(this_face)

    ###############################
    # duplicate for right eye

    right_receptor_dirs = [cgtypes.vec3((v.x,-v.y,v.z)) for v in left_receptor_dirs]
    receptor_dirs = left_receptor_dirs + right_receptor_dirs

    right_idx_offset = len(left_receptor_dirs)

    right_triangles = []
    for tri in left_triangles:
        newtri = []
        for idx in tri:
            newtri.append( idx+right_idx_offset )
        right_triangles.append(newtri)
    triangles = list(left_triangles) + right_triangles

    right_hex_faces = []
    for face in left_hex_faces:
        newface = []
        for v in face:
            newface.append( cgtypes.vec3((v.x,-v.y,v.z)) )
        right_hex_faces.append(newface)
    hex_faces = list(left_hex_faces) + right_hex_faces

    ###############################

    receptor_dir_slicer = {None:slice(0,len(receptor_dirs),1),
                           'left':slice(0,right_idx_offset,1),
                           'right':slice(right_idx_offset,len(receptor_dirs),1)}

    ###############################

    print('calculating interommatidial distances')
    delta_phi = get_mean_interommatidial_distance(receptor_dirs,triangles)
    delta_rho_q = numpy.asarray(delta_phi) * 1.1 # rough approximation. follows from caption of Fig. 18, Buchner, 1984 (in Ali)

    # make optical lowpass filters

    print('calculating weight_maps...')
    weight_maps_64 = make_receptor_sensitivities( receptor_dirs,
                                                  delta_rho_q=delta_rho_q,
                                                  res=64 )
    print('done')

    clip_thresh=1e-5
    floattype=numpy.float32
    tmp_weights = flatten_cubemap( weight_maps_64[0] ) # get first one to take size

    n_receptors = len(receptor_dirs)
    len_wm = len(tmp_weights)

    print('allocating memory...')
    bigmat_64 = numpy.zeros( (n_receptors, len_wm), dtype=floattype )
    print('done')

    print('flattening, clipping, casting...')
    for i, weight_cubemap in enumerate(weight_maps_64):
        weights = flatten_cubemap( weight_cubemap )
        if clip_thresh is not None:
            weights = numpy.choose(weights<clip_thresh,(weights,0))
        bigmat_64[i,:] = weights.astype( bigmat_64.dtype )
    print('done')

    print('worst gain (should be unity)',min(numpy.sum( bigmat_64, axis=1)))
    print('filling spmat_64...')
    sys.stdout.flush()
    spmat_64 = scipy.sparse.csc_matrix(bigmat_64)
    print('done')

    M,N = bigmat_64.shape
    print('Compressed to %d of %d'%(len(spmat_64.data),M*N))

    ######################

    fd = open('receptor_directions_buchner71.csv','w')
    writer = csv.writer( fd )
    for row in receptor_dirs:
        writer.writerow( row )
    fd.close()

    fd = open('precomputed_buchner71.py','w')
    fd.write( '# -*- coding: utf-8 -*-\n')
    fd.write( '# Automatically generated. Do not modify this file.\n')
    fd.write( 'from __future__ import print_function\n')
    fd.write( 'import numpy\n')
    fd.write( 'import scipy.sparse\n')
    fd.write( 'import scipy.io\n')
    fd.write( 'import os\n')
    fd.write( 'datadir = os.path.split(__file__)[0]\n')
    fd.write( 'cube_order = %s\n'%repr(cube_order) )
    fd.write( 'from cgtypes import vec3, quat #cgkit 1.x\n')
    save_as_python(fd, receptor_dir_slicer, 'receptor_dir_slicer',
                   fname_extra='_buchner71' )
    save_as_python(fd, spmat_64, 'receptor_weight_matrix_64', fname_extra='_buchner71' )
    save_as_python(fd, list(map(make_repr_able,receptor_dirs)), 'receptor_dirs', fname_extra='_buchner71' )
    st = [ tuple(t) for t in triangles]
    save_as_python(fd, st,  'triangles', fname_extra='_buchner_1971' )
    save_as_python(fd, list(map(make_repr_able,hex_faces)), 'hex_faces', fname_extra='_buchner_1971' )
    fd.write( '\n')
    fd.write( '\n')
    fd.write( '\n')

    extra = open('plot_receptors_vtk.py','r').read()
    fd.write( extra )
    fd.close()
Exemple #52
0
def main():

    if 1:
        # load some data from Alice's arena
        data=scipy.io.loadmat('alice_data')
        d2=data['DATA']

        x=d2[0,:]

        y=d2[1,:]

        D2R = math.pi/180.0
        theta=d2[2,:]*D2R

        xoffset = 753
        yoffset = 597

        radius_pix = 753-188 # pixels

        # 10 inch = 25.4 cm = 254 mm = diameter = 2*radius
        pix_per_mm = 2.0*radius_pix/254.0

        mm_per_pixel = 1.0/pix_per_mm

        xgain = mm_per_pixel
        ygain = mm_per_pixel

        x_mm=(x-xoffset)*xgain
        y_mm=(y-yoffset)*ygain

    hz = 200.0
    tau_emd = 0.1

    # get IIR filter coefficients for tau and sample freq
    emd_lp_ba = fsee.EMDSim.FilterMaker(hz).iir_lowpass1(tau_emd)

    model_path = os.path.join(fsee.data_dir,"models/alice_cylinder/alice_cylinder.osg")
    vision = fsee.Observer.Observer(model_path=model_path,
                                    scale=1000.0, # make cylinder model 1000x bigger (put in mm instead of m)
                                    hz=hz,
                                    skybox_basename=None, # no skybox
                                    emd_lp_ba = emd_lp_ba,
                                    full_spectrum=True,
                                    optics='buchner71',
                                    do_luminance_adaptation=False,
                                    )

    dt = 1/hz
    z = 2 # 2 mm
    if 1:
        count = 0
        tstart = time.time()
        try:
            while count < len(x_mm):
                ori_quat = cgtypes.quat().fromAngleAxis(theta[count],(0,0,1))
                posnow = cgtypes.vec3(( x_mm[count], y_mm[count], z))
                vision.step(posnow,ori_quat)
                count += 1
        finally:
            tstop = time.time()
            dur = tstop-tstart
            fps = count/dur
            print '%d frames rendered in %.1f seconds (%.1f fps)'%(count,dur,fps)
Exemple #53
0
DRAWPHASE = 2
UPDATEPHASE = 1
OVERHEADPHASE = 0


##### global vars (module level)

mouseX = 0
mouseY = 0
mouseInWindow = False

# some camera-related default constants
camera2dElevation = 8.0
cameraTargetDistance = 13.0
cameraTargetOffset = cgtypes.vec3(0.0,cameraTargetDistance,0.0)


##########  SteerTest phase

phaseStack = [0,0,0,0,0]
phaseStackSize = 5
phaseStackIndex = 0

# list of floats
phaseTimers = [0.0,0.0,0.0,0.0]
# list of floats
phaseTimerBase = 0.0


Exemple #54
0
def make_receptor_sensitivities(all_d_q,delta_rho_q=None,res=64):
    """

    all_d_q are visual element directions as a 3-vector
    delta_rho_q (angular sensitivity) is in radians

    """
    if delta_rho_q is None:
        raise ValueError('must specify delta_rho_q (in radians)')

    if isinstance( delta_rho_q, float):
        all_delta_rho_qs = delta_rho_q*numpy.ones( (len(all_d_q),), dtype=numpy.float64)
    else:
        all_delta_rho_qs = numpy.asarray(delta_rho_q)
        if len(all_delta_rho_qs.shape) != 1:
            raise ValueError("delta_rho_q must be scalar or vector")
        if all_delta_rho_qs.shape[0] != len(all_d_q):
            raise ValueError("if delta_rho_q is a vector, "
                             "it must have the same number of "
                             "elements as receptors")

    def G_q(zeta,delta_rho_q):
        # gaussian
        # From Snyder (1979) as cited in Burton & Laughlin (2003)
        return numpy.exp( -4*math.log(2)*abs(zeta)**2 / delta_rho_q**2 )

    half_res = res//2
    vals = (numpy.arange(res)-half_res)/half_res

    weight_maps = []

    # setup vectors for initial face (posx)
    face_vecs = {}
    face_vecs['posx'] = []
    x = 1
    for z in vals:
        this_row_vecs = []
        for y in vals:
            on_cube_3d = (x,y,z)
            #print('on_cube_3d   %5.2f %5.2f %5.2f'%on_cube_3d)
            v3norm = normalize(on_cube_3d) # get direction of each pixel
            p_p = cgtypes.quat(0.0, v3norm[0], v3norm[1], v3norm[2])
            this_row_vecs.append(p_p)
        this_row_vecs.reverse()
        face_vecs['posx'].append( this_row_vecs )

    def rot_face( facedict, facename, rotq):
        facedict[facename] = []
        for row in facedict['posx']:
            this_row_vecs = []
            for col in row:
                this_row_vecs.append( rotq*col*rotq.inverse() )
            facedict[facename].append( this_row_vecs )

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi/2.0,cgtypes.vec3(0,0,1))
    rot_face( face_vecs, 'posy', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi,cgtypes.vec3(0,0,1))
    rot_face( face_vecs, 'negx', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(-math.pi/2.0,cgtypes.vec3(0,0,1))
    rot_face( face_vecs, 'negy', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi/2.0,cgtypes.vec3(0,-1,0))
    rot_face( face_vecs, 'posz', rotq)

    rotq = cgtypes.quat()
    rotq = rotq.fromAngleAxis(math.pi/2.0,cgtypes.vec3(0,1,0))
    rot_face( face_vecs, 'negz', rotq)

    # convert from quat to vec3
    rfv = {}
    for key in face_vecs:
        rows = face_vecs[key]
        rfv[key] = []
        for row in rows:
            this_row = [ cgtypes.vec3(col.x, col.y, col.z) for col in row ] # convert to vec3
            rfv[key].append( this_row )

    def get_weight_map(fn, rfv, d_q, delta_rho_q):
        angles = numpy.zeros( (vals.shape[0], vals.shape[0]), dtype=numpy.float64 )
        for i, row_vecs in enumerate(rfv[fn]):
            for j, ovec in enumerate(row_vecs):
                angles[i,j] = d_q.angle(ovec)
        wm = G_q(angles,delta_rho_q)
        return wm

    for dqi,(d_q,this_delta_rho_q) in enumerate(zip(all_d_q,all_delta_rho_qs)):
        weight_maps_d_q = {}
        ssf = 0.0

        for fn in cube_order:
            wm = get_weight_map(fn, rfv, d_q, this_delta_rho_q)
            weight_maps_d_q[fn] = wm
            ssf += numpy.sum( wm.flat )

        # normalize
        for mapname in weight_maps_d_q:
            wm = weight_maps_d_q[mapname]
            weight_maps_d_q[mapname] = wm/ssf

        # save maps by receptor direction
        weight_maps.append( weight_maps_d_q )
    return weight_maps