def __init__( self, buffer, gl_type = GL.GL_UNSIGNED_INT, stride = None, offset = None, **kwargs ): super( IndexAttribute, self ).__init__( buffer ) self.gl_type = gl_type # we need to get the size of the actual gl_type, not the GL enum gl_type_size = ctypes.sizeof( enum_to_type( self.gl_type ) ) self.stride = stride if stride else self.values_per_vertex * gl_type_size self.offset = offset
def __init__( self, buffer, values_per_vertex = 2, gl_type = GL.GL_FLOAT, stride = None, offset = None, **kwargs ): super( TextureCoordAttribute, self ).__init__( buffer ) self.values_per_vertex = values_per_vertex self.gl_type = gl_type # we need to get the size of the actual gl_type, not the GL enum gl_type_size = ctypes.sizeof( enum_to_type( self.gl_type ) ) self.stride = stride if stride else self.values_per_vertex * gl_type_size self.offset = offset
def __init__( self, buffer, location, values_per_vertex = 3, gl_type = GL.GL_FLOAT, stride = None, offset = None, normalise = False, **kwargs ): super( GenericAttribute, self ).__init__( buffer ) self.values_per_vertex = values_per_vertex self.gl_type = gl_type # we need to get the size of the actual gl_type, not the GL enum gl_type_size = ctypes.sizeof( enum_to_type( self.gl_type ) ) self.stride = stride if stride else self.values_per_vertex * gl_type_size self.offset = offset self.location = location self.normalise = normalise