def __init__(self, dtype: str): """ :param dtype: str, describe IBO dtype, one of (uint, ushort, ubyte) """ self.__cache = BffrCache(np.dtype([('idx', dtype)]), (0,), def_val=PRV) self.__cache.fill_array(PRV)
class MetaIndxBffr(_MetaBffr): """ Buffer of GL_ELEMENT_ARRAY_BUFFER """ def __init__(self, dtype: str): """ :param dtype: str, describe IBO dtype, one of (uint, ushort, ubyte) """ self.__cache = BffrCache(np.dtype([('idx', dtype)]), (0,), def_val=PRV) self.__cache.fill_array(PRV) @property def target(self): return gl.GL_ELEMENT_ARRAY_BUFFER @property def cache(self): return self.__cache def _create_entity(self): """ :return: """ bffr = gl.glGenBuffers(1) bffr.set_target(self.target) bffr.set_cache(self.__cache) return bffr
def create_bffr_cache(self, size) -> BffrCache: """ create buffer cache for vertex attributes :param size: size o :return: """ cache = BffrCache(self._dtype, self._attr_locs, size) return cache
def create_bffr_cache(self, size): """ create buffer cache for uniform values :return: """ cache = BffrCache(self._dtype, self._locs, size) for name, val in zip(self._dtype.fields, self._def_val): if val is not None: cache.array[name][...] = val return cache
def __init__(self, attr_desc: np.dtype, attr_locs: (list, tuple)): """ :param attr_desc: attribute description for ogl prgrm in ndarray dtype format ! use same dtype of CPUBffr cooperating :param attr_locs: (Int, ...), describe each field's attribute location of glsl prgrm """ # check attribute description being numpy dtype if not (isinstance(attr_desc, np.dtype) and attr_desc.fields is not None): raise StructuredDtypeError self.__attr_desc = attr_desc # check attribute location if not (isinstance(attr_locs, (tuple, list)) and all(isinstance(l, int) for l in attr_locs)): raise TypeError('attr_loc should be (tuple, list) of int value') if len(set(attr_locs)) != len(attr_locs): raise ValueError('attr_loc has to have unique values') if len(self.__attr_desc.fields) != len(attr_locs): raise ValueError('all attribute has to have location value') self.__attr_locs = attr_locs # cache shareness # deoide whether to use single cache for all entities per context self.__cache = BffrCache(attr_desc, attr_locs)
Case it could be compatible in frame render time is when a (really)big block is released. But even then simply drawing all will be faster. But packing is still a method of reducing render time if it could be done asynchronously. """ glfw.init() window = glfw.create_window(1000, 1000, 'mywindow', None, None) glfw.make_context_current(window) # prepare data num_points = 100000 dtype = np.dtype([('vtx', 'f4', 4), ('clr', 'f4', 4)]) points = np.ndarray(num_points, dtype) # create index test1_idx = BffrCache(np.dtype([('idx', 'uint')]), (0, ), size=num_points) test1_blocks = [] # create points for i in range(num_points): point = [ random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1) ] color = [ random.random(), random.random(), random.random(), random.random()
conclusion: Index tight packing reduces number of indices to render but packing itself takes too much time. Therefore release_fill operation is not fast enough to be executed commonly in frame-time. """ glfw.init() window = glfw.create_window(1000, 1000, 'mywindow', None, None) glfw.make_context_current(window) # prepare data num_points = 100000 dtype = np.dtype([('vtx', 'f4', 4), ('clr', 'f4', 4)]) points = np.ndarray(num_points, dtype) # create index test1_idx = BffrCache(np.dtype([('idx', 'uint')]), (0, ), size=num_points) test2_idx = BffrCache(np.dtype([('idx', 'uint')]), (0, ), size=num_points) test3_idx = BffrCache(np.dtype([('idx', 'uint')]), (0, ), size=num_points) test1_blocks = [] test2_blocks = [] test3_blocks = [] # create points for i in range(num_points): point = [ random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1) ] color = [