Пример #1
0
    def calculate_bit_depth_dependencies(self):
        """Calculate a number of parameters dependent on bit depth."""
        bit_depth_warning = False
        p = self.parameters # shorthand

        red_bits = gl.glGetIntegerv( gl.GL_RED_BITS )
        green_bits = gl.glGetIntegerv( gl.GL_GREEN_BITS )
        blue_bits = gl.glGetIntegerv( gl.GL_BLUE_BITS )
        min_bits = min( (red_bits,green_bits,blue_bits) )
        if min_bits < p.bit_depth:
            logger = logging.getLogger('VisionEgg.Gratings')
            logger.warning("Requested bit depth of %d in "
                           "LuminanceGratingCommon, which is "
                           "greater than your current OpenGL context "
                           "supports (%d)."% (p.bit_depth,min_bits))
        self.gl_internal_format = gl.GL_LUMINANCE
        self.format = gl.GL_LUMINANCE
        self.gl_type, self.numpy_dtype, self.max_int_val = _get_type_info( p.bit_depth )
        self.cached_bit_depth = p.bit_depth
Пример #2
0
    def calculate_bit_depth_dependencies(self):
        """Calculate a number of parameters dependent on bit depth."""
        bit_depth_warning = False
        p = self.parameters  # shorthand

        red_bits = gl.glGetIntegerv(gl.GL_RED_BITS)
        green_bits = gl.glGetIntegerv(gl.GL_GREEN_BITS)
        blue_bits = gl.glGetIntegerv(gl.GL_BLUE_BITS)
        min_bits = min((red_bits, green_bits, blue_bits))
        if min_bits < p.bit_depth:
            logger = logging.getLogger("VisionEgg.Gratings")
            logger.warning(
                "Requested bit depth of %d in "
                "LuminanceGratingCommon, which is "
                "greater than your current OpenGL context "
                "supports (%d)." % (p.bit_depth, min_bits)
            )
        self.gl_internal_format = gl.GL_LUMINANCE
        self.format = gl.GL_LUMINANCE
        self.gl_type, self.numpy_dtype, self.max_int_val = _get_type_info(p.bit_depth)
        self.cached_bit_depth = p.bit_depth
Пример #3
0
 def calculate_bit_depth_dependencies(self):
     """Calculate a number of parameters dependent on bit depth."""
     p = self.parameters # shorthand
     alpha_bit_depth = gl.glGetIntegerv( gl.GL_ALPHA_BITS )
     if alpha_bit_depth < p.bit_depth:
         logger = logging.getLogger('VisionEgg.Gratings')
         logger.warning("Requested bit depth of %d, which is "
                        "greater than your current OpenGL context "
                        "supports (%d)."% (p.bit_depth,min_bits))
     self.gl_internal_format = gl.GL_ALPHA
     self.format = gl.GL_ALPHA
     self.gl_type, self.numpy_dtype, self.max_int_val = _get_type_info( p.bit_depth )
     self.cached_bit_depth = p.bit_depth
Пример #4
0
 def calculate_bit_depth_dependencies(self):
     """Calculate a number of parameters dependent on bit depth."""
     p = self.parameters  # shorthand
     alpha_bit_depth = gl.glGetIntegerv(gl.GL_ALPHA_BITS)
     if alpha_bit_depth < p.bit_depth:
         logger = logging.getLogger("VisionEgg.Gratings")
         logger.warning(
             "Requested bit depth of %d, which is "
             "greater than your current OpenGL context "
             "supports (%d)." % (p.bit_depth, min_bits)
         )
     self.gl_internal_format = gl.GL_ALPHA
     self.format = gl.GL_ALPHA
     self.gl_type, self.numpy_dtype, self.max_int_val = _get_type_info(p.bit_depth)
     self.cached_bit_depth = p.bit_depth
Пример #5
0
    def __init__(self,**kw):
        LuminanceGratingCommon.__init__(self,**kw)

        p = self.parameters # shorthand

        self._texture_object_id = gl.glGenTextures(1)
        if p.mask:
            gl.glActiveTextureARB(gl.GL_TEXTURE0_ARB)
        gl.glBindTexture(gl.GL_TEXTURE_1D,self._texture_object_id)

        # Do error-checking on texture to make sure it will load
        max_dim = gl.glGetIntegerv(gl.GL_MAX_TEXTURE_SIZE)
        if p.num_samples > max_dim:
            raise NumSamplesTooLargeError("Grating num_samples too large for video system.\nOpenGL reports maximum size of %d"%(max_dim,))

        self.calculate_bit_depth_dependencies()

        w = p.size[0]
        inc = w/float(p.num_samples)
        phase = 0.0 # this data won't get used - don't care about phase
        self._last_phase = phase
        floating_point_sin = numpy.sin(2.0*math.pi*p.spatial_freq*numpy.arange(0.0,w,inc,dtype=numpy.float)+(phase/180.0*math.pi))*0.5*p.contrast+p.pedestal
        floating_point_sin = numpy.clip(floating_point_sin,0.0,1.0) # allow square wave generation if contrast > 1
        texel_data = (floating_point_sin*self.max_int_val).astype(self.numpy_dtype).tostring()

        # Because the MAX_TEXTURE_SIZE method is insensitive to the current
        # state of the video system, another check must be done using
        # "proxy textures".
        gl.glTexImage1D(gl.GL_PROXY_TEXTURE_1D,            # target
                        0,                                 # level
                        self.gl_internal_format,           # video RAM internal format
                        p.num_samples,                     # width
                        0,                                 # border
                        self.format,                       # format of texel data
                        self.gl_type,                      # type of texel data
                        texel_data)                        # texel data (irrelevant for proxy)
        if gl.glGetTexLevelParameteriv(gl.GL_PROXY_TEXTURE_1D, # Need PyOpenGL >= 2.0
                                       0,
                                       gl.GL_TEXTURE_WIDTH) == 0:
            raise NumSamplesTooLargeError("Grating num_samples is too wide for your video system!")

        # If we got here, it worked and we can load the texture for real.
        gl.glTexImage1D(gl.GL_TEXTURE_1D,                  # target
                        0,                                 # level
                        self.gl_internal_format,           # video RAM internal format
                        p.num_samples,                     # width
                        0,                                 # border
                        self.format,                       # format of texel data
                        self.gl_type,                      # type of texel data
                        texel_data)                        # texel data

        # Set texture object defaults
        gl.glTexParameteri(gl.GL_TEXTURE_1D,gl.GL_TEXTURE_WRAP_S,gl.GL_CLAMP_TO_EDGE)
        gl.glTexParameteri(gl.GL_TEXTURE_1D,gl.GL_TEXTURE_WRAP_T,gl.GL_CLAMP_TO_EDGE)
        gl.glTexParameteri(gl.GL_TEXTURE_1D,gl.GL_TEXTURE_MAG_FILTER,gl.GL_LINEAR)
        gl.glTexParameteri(gl.GL_TEXTURE_1D,gl.GL_TEXTURE_MIN_FILTER,gl.GL_LINEAR)

        if p.color2 is not None:
            if VisionEgg.Core.gl_renderer == 'ATi Rage 128 Pro OpenGL Engine' and VisionEgg.Core.gl_version == '1.1 ATI-1.2.22':
                logger = logging.getLogger('VisionEgg.Gratings')
                logger.warning("Your video card and driver have known "
                               "bugs which prevent them from rendering "
                               "color gratings properly.")
Пример #6
0
    def __init__(self, **kw):
        LuminanceGratingCommon.__init__(self, **kw)

        p = self.parameters  # shorthand

        self._texture_object_id = gl.glGenTextures(1)
        if p.mask:
            gl.glActiveTextureARB(gl.GL_TEXTURE0_ARB)
        gl.glBindTexture(gl.GL_TEXTURE_1D, self._texture_object_id)

        # Do error-checking on texture to make sure it will load
        max_dim = gl.glGetIntegerv(gl.GL_MAX_TEXTURE_SIZE)
        if p.num_samples > max_dim:
            raise NumSamplesTooLargeError(
                "Grating num_samples too large for video system.\nOpenGL reports maximum size of %d" % (max_dim,)
            )

        self.calculate_bit_depth_dependencies()

        w = p.size[0]
        inc = w / float(p.num_samples)
        phase = 0.0  # this data won't get used - don't care about phase
        self._last_phase = phase
        floating_point_sin = (
            numpy.sin(
                2.0 * math.pi * p.spatial_freq * numpy.arange(0.0, w, inc, dtype=numpy.float)
                + (phase / 180.0 * math.pi)
            )
            * 0.5
            * p.contrast
            + p.pedestal
        )
        floating_point_sin = numpy.clip(floating_point_sin, 0.0, 1.0)  # allow square wave generation if contrast > 1
        texel_data = (floating_point_sin * self.max_int_val).astype(self.numpy_dtype).tostring()

        # Because the MAX_TEXTURE_SIZE method is insensitive to the current
        # state of the video system, another check must be done using
        # "proxy textures".
        gl.glTexImage1D(
            gl.GL_PROXY_TEXTURE_1D,  # target
            0,  # level
            self.gl_internal_format,  # video RAM internal format
            p.num_samples,  # width
            0,  # border
            self.format,  # format of texel data
            self.gl_type,  # type of texel data
            texel_data,
        )  # texel data (irrelevant for proxy)
        if gl.glGetTexLevelParameteriv(gl.GL_PROXY_TEXTURE_1D, 0, gl.GL_TEXTURE_WIDTH) == 0:  # Need PyOpenGL >= 2.0
            raise NumSamplesTooLargeError("Grating num_samples is too wide for your video system!")

        # If we got here, it worked and we can load the texture for real.
        gl.glTexImage1D(
            gl.GL_TEXTURE_1D,  # target
            0,  # level
            self.gl_internal_format,  # video RAM internal format
            p.num_samples,  # width
            0,  # border
            self.format,  # format of texel data
            self.gl_type,  # type of texel data
            texel_data,
        )  # texel data

        # Set texture object defaults
        gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
        gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
        gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
        gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)

        if p.color2 is not None:
            if (
                VisionEgg.Core.gl_renderer == "ATi Rage 128 Pro OpenGL Engine"
                and VisionEgg.Core.gl_version == "1.1 ATI-1.2.22"
            ):
                logger = logging.getLogger("VisionEgg.Gratings")
                logger.warning(
                    "Your video card and driver have known "
                    "bugs which prevent them from rendering "
                    "color gratings properly."
                )