示例#1
0
 def _background_worker(self):   
     try:     
         ni = self._isec / self._psec
         itbeg = _perf_counter()    
         count = 0
         self._rflag = True
         while self._rflag:
             tbeg = _perf_counter()               
             with self._buffers_lock:
                 self._manage_buffers()
                 for (t,i), b in self._buffers.items():
                     b.incr()
                     dat = self._ssfunc(i, t, date_time=True, throw_if_data_lost=False)                    
                     if dat:
                         self._parse_data(t,i,dat)
                     if b.count == 0:
                         continue
                     if (b.count % ni) == 0:
                         #print("b.count mod interval: %i" % b.count)
                         self._handle_null_interval(t, i, b)
             count += 1
             tend = _perf_counter()
             trem = self._psec - (tend - tbeg)
             if trem < 0:
                 ## TODO :: this will create problems handling nulls as we wont be able
                 ##         to speed up by using WAIT_ADJ_DOWN (below)
                 ##         considering adjusting _psec 
                 print("WARN: _background_worker taking longer than _psec (%i) seconds"
                       % self._psec)
             _sleep( max(trem,0) * (self.WAIT_ADJ_DOWN ** self._wait_adj_down_exp) )
             if (count % ni) == 0:
                 self._tune_background_worker(count,ni,itbeg)
     finally:
         self._rflag = False
 def inner(*a, **kw):
     start = _perf_counter()
     for n in range(repeats - 1):
         fn(*a, **kw)
     result = fn(*a, **kw)
     elapsed_time = _perf_counter() - start
     print(
         f"elapsed time of '{fn.__name__}': {round(elapsed_time, 3)}s (repeats={repeats})"
     )
     return result
 def inner(arg):
     start = _perf_counter()
     for n in range(repeats - 1):
         fn(*arg)
     result = fn(*arg)
     elapsed_time = _perf_counter() - start
     print(f"'{fn.__qualname__}':")
     print(f"\telapsed time: {elapsed_time:.3}s", end=", ")
     print("repeats: ", repeats, end=", ")
     print("result: ", result)
     print()
示例#4
0
 def __enter__(self):
     try:
         self._block.add_items(*self._items)
         self._block.add_topics(*self._topics)           
         self._buffers = {k:[] for k in _product(self._items, self._topics)}            
         self._good_init = False
         self._stream_time_start = _perf_counter()
         self._cutoff_mktime = 0            
     except:
         self._block.close()
         raise     
     return self
示例#5
0
 def _background_worker(self):
     try:
         ni = self._isec / self._psec
         itbeg = _perf_counter()
         count = 0
         self._rflag = True
         while self._rflag:
             tbeg = _perf_counter()
             with self._buffers_lock:
                 self._manage_buffers()
                 for (t, i), b in self._buffers.items():
                     b.incr()
                     dat = self._ssfunc(i,
                                        t,
                                        date_time=True,
                                        throw_if_data_lost=False)
                     if dat:
                         self._parse_data(t, i, dat)
                     if b.count == 0:
                         continue
                     if (b.count % ni) == 0:
                         #print("b.count mod interval: %i" % b.count)
                         self._handle_null_interval(t, i, b)
             count += 1
             tend = _perf_counter()
             trem = self._psec - (tend - tbeg)
             if trem < 0:
                 ## TODO :: this will create problems handling nulls as we wont be able
                 ##         to speed up by using WAIT_ADJ_DOWN (below)
                 ##         considering adjusting _psec
                 print(
                     "WARN: _background_worker taking longer than _psec (%i) seconds"
                     % self._psec)
             _sleep(
                 max(trem, 0) *
                 (self.WAIT_ADJ_DOWN**self._wait_adj_down_exp))
             if (count % ni) == 0:
                 self._tune_background_worker(count, ni, itbeg)
     finally:
         self._rflag = False
示例#6
0
 def _tune_background_worker(self, count, ni, itbeg):
     tnow = _perf_counter()
     adjbeg = itbeg + (count // ni) * self._isec
     terr = tnow - adjbeg
     #print("DEBUG RUNNING ERROR SECONDS: ", str(terr))
     if terr < 0:
         #print("DEBUG RUNNING ERROR SLEEP: ", str(terr))
         if self._wait_adj_down_exp > 0:
             self._wait_adj_down_exp -= 1
         _sleep(abs(terr))
     elif abs(terr) > (self._isec * self.WAIT_ADJ_THRESHOLD_FATAL):
         TOSDB_Error("_background worker entered fatal wait/sync states: %f, %i" \
                     % (terr, self._wait_adj_down_exp))
     elif terr > (self._isec * self.WAIT_ADJ_THRESHOLD):
         self._wait_adj_down_exp += 1
示例#7
0
 def _tune_background_worker(self, count, ni, itbeg):
     tnow = _perf_counter()
     adjbeg = itbeg + (count // ni) * self._isec
     terr = tnow - adjbeg
     #print("DEBUG RUNNING ERROR SECONDS: ", str(terr))
     if terr < 0:
         #print("DEBUG RUNNING ERROR SLEEP: ", str(terr))
         if self._wait_adj_down_exp > 0:
             self._wait_adj_down_exp -= 1
         _sleep(abs(terr))
     elif abs(terr) > (self._isec * self.WAIT_ADJ_THRESHOLD_FATAL):
         TOSDB_Error("_background worker entered fatal wait/sync states: %f, %i" \
                     % (terr, self._wait_adj_down_exp))
     elif terr > (self._isec * self.WAIT_ADJ_THRESHOLD):
         self._wait_adj_down_exp += 1
示例#8
0
 def _sleep_or_timeout(self, time_start, bad_buffers):
     time_now = _perf_counter()
     if not self._good_init:
         time_left = (self.INIT_TIMEOUT/1000) - (time_now - self._stream_time_start)
         if time_left > 0:
             _sleep( min(self._latency, time_left) )
             return                       
         raise tosdb.TOSDB_TimeoutError("StreamingSession timed out trying to INIT -" +
                                        " BAD STREAMS " + str(bad_buffers))                          
     elif self._timeout: 
         time_left = self._timeout - (time_now - time_start)
         if time_left <= 0:        
             raise tosdb.TOSDB_TimeoutError("StreamingSession timed out -" +
                                            " BAD STREAMS " + str(bad_buffers))
         _sleep( min(self._latency, time_left) )                   
     else:
         _sleep(self._latency)
示例#9
0
    def __next__(self): 
        good = []   
        time_start = _perf_counter()
        while True:
            # look for data
            for item in self._items:
                for topic in self._topics:            
                    dat = self._block.stream_snapshot_from_marker(
                        item, topic, date_time=True, throw_if_data_lost=False)
                    if dat is not None:
                        dat.reverse()
                        self._buffers[item, topic].extend(dat)                        

            # check for empty buffers 
            bad_buffers = [k for k,v in self._buffers.items() if not v]
            need_to_sleep = bool(bad_buffers) if self._guarantee else \
                            len(bad_buffers) == len(self._buffers)
            # sleep and check for timeout if necessary
            if need_to_sleep:
                self._sleep_or_timeout(time_start, bad_buffers)
                continue
            else:
                self._good_init = True                            

            # of newest datum in each stream, find the oldest
            oldest_new_mktime = INT_MAX
            for v in self._buffers.values():
                if v and v[-1][1].mktime_micro < oldest_new_mktime:
                    oldest_new_mktime = v[-1][1].mktime_micro
            self._cutoff_mktime = oldest_new_mktime if oldest_new_mktime != INT_MAX else 0

            # remove all data older or equal to the aforementioned datum
            for k, v in self._buffers.items():             
                ngood = 0
                for vv in v:
                    if vv[1].mktime_micro <= self._cutoff_mktime: 
                        good.append( (k[0], k[1], vv[0], vv[1]) )
                        ngood += 1
                self._buffers[k] = v[ngood:]
            assert good, "'good' list is empty (PLEASE REPORT THIS BUG)"

            # sort all that and return it
            return sorted(good, key=lambda v: v[3].mktime_micro, reverse=True)
	def _apply_baked_materials_bmesh(self, obj: 'Object', mesh: 'Mesh', bm: 'BMesh'):
		# Через BMesh редактировать UV намного быстрее.
		# Прямой доступ к UV слоям через bpy.types.Mesh раза в 4 медленее.
		self._bmesh_loops_mem.clear()
		self._bmesh_loops_mem_hits = 0
		for material_index in range(len(mesh.materials)):
			source_mat = mesh.materials[material_index]
			transforms = self._transforms.get(source_mat)
			if transforms is None:
				continue  # Нет преобразований для данного материала
			# Дегенеративная геометрия вызывает проблемы, по этому нужен epsilon.
			# Зазор между боксами не менее epsilon материала, по этому возьмём половину.
			epsilon = self._get_epsilon_safe(obj, source_mat)
			src_size_x, src_size_y = self._matsizes[source_mat]
			epsilon_x, epsilon_y = epsilon / src_size_x / 2, epsilon / src_size_y / 2
			target_mat = self._materials.get((obj, source_mat))
			uv_name = self.get_uv_name(obj, source_mat) or 0
			bm_uv_layer = bm.loops.layers.uv[uv_name]  # type: BMLayerItem
			# Здесь мы, получается, обходм все фейсы по несколько раз (на каждый материал)
			# Но это лучше, чем проходить один раз, но каждый раз дёргать
			# dict.get(bm_face.material_index) и распаковывать метаданные
			_t3 = _perf_counter()
			bm.faces.ensure_lookup_table()
			for bm_face in bm.faces:
				if bm_face.material_index != material_index:
					continue
				# Среднее UV фейса. По идее можно брать любую точку для теста принадлежности,
				# но я не хочу проблем с пограничными случаями.
				# Нужно тестировать, скорее всего тут можно ускорить.
				mean_uv = _Vector((0, 0))
				for bm_loop in bm_face.loops:
					mean_uv += bm_loop[bm_uv_layer].uv
				mean_uv /= len(bm_face.loops)
				transform = None
				# Поиск трансформа для данного полигона
				_t1 = _perf_counter()
				for t in transforms:
					# Должно работать без эпсилонов
					if t.is_match(mean_uv, epsilon_x=epsilon_x, epsilon_y=epsilon_y):
						transform = t
						break
				self._perf_find_transform += _perf_counter() - _t1
				if transform is None:
					# Такая ситуация не должна случаться:
					# Если материал подлежал запеканию, то все участки должны были ранее покрыты трансформами.
					msg = 'No UV transform for Obj={0}, Mesh={1}, SMat={2}, Poly={3}, UV={4}, Transforms:' \
						.format(repr(obj.name), repr(mesh.name), repr(source_mat.name), repr(bm_face), repr(mean_uv))
					_log.error(msg)
					for transform in transforms:
						_log.error('\t- {}'.format(repr(transform)))
					raise AssertionError(msg, obj, source_mat, repr(bm_face), mean_uv, transforms)
				_t2 = _perf_counter()
				for bm_loop in bm_face.loops:
					if bm_loop.index in self._bmesh_loops_mem:
						# Что бы не применить трансформ дважды к одному loop
						# Хотя как я понял в bmesh они не переиспользуются
						# Нужно изучать, возможно можно убрать self._already_processed_loops вовсе
						self._bmesh_loops_mem_hits += 1
						continue
					self._bmesh_loops_mem.add(bm_loop.index)
					vec2 = bm_loop[bm_uv_layer].uv
					vec2 = transform.apply(vec2)
					bm_loop[bm_uv_layer].uv = vec2
				self._perf_apply_transform += _perf_counter() - _t2
			self._perf_iter_polys += _perf_counter() - _t3
			# Внимание! Меняем меш, хотя потом она будет перезаписана из bmesh.
			# Но это ОК, т.к. bmesh похуй на материалы, там хранятся только индексы.
			mesh.materials[material_index] = target_mat
			obj.material_slots[material_index].material = target_mat
		if _log.is_debug():
			_log.info("BMesh loops hits for {} = {}".format(repr(obj.name), repr(self._bmesh_loops_mem_hits)))
		self._bmesh_loops_mem.clear()
	def _bake_image(self, bake_type: 'str', target_image: 'Image'):
		_log.info("Preparing for bake atlas Image={0} type={1} size={2}...".format(
			repr(target_image.name), bake_type, tuple(target_image.size)))
		
		# Поскольку cycles - ссанина, нам проще сделать копию ._bake_obj
		# Сделать копии материалов на ._bake_obj
		# Кастомизировать материалы, вывести всё через EMIT
		
		_commons.ensure_deselect_all_objects()
		_commons.activate_object(self._bake_obj)
		_commons.ensure_op_finished(_bpy.ops.object.duplicate(linked=False), name='bpy.ops.object.duplicate')
		local_bake_obj = _bpy.context.view_layer.objects.active
		self._bake_obj.hide_set(True)
		_commons.ensure_deselect_all_objects()
		_commons.activate_object(local_bake_obj)
		_commons.ensure_op_finished(_bpy.ops.object.make_single_user(
			object=True, obdata=True, material=True, animation=False,
		), name='bpy.ops.object.make_single_user')
		
		if bake_type in ('ALPHA', 'DIFFUSE', 'METALLIC', 'ROUGHNESS'):
			self._try_edit_mats_for_bake(local_bake_obj, bake_type)
		
		for slot in local_bake_obj.material_slots:  # type: MaterialSlot
			n_bake = _snodes.prepare_and_get_node_for_baking(slot.material)
			n_bake.image = target_image
		
		_bpy.context.scene.render.engine = 'CYCLES'
		_bpy.context.scene.cycles.feature_set = 'SUPPORTED'
		_bpy.context.scene.cycles.device = 'GPU'  # can be overriden in before_bake
		_bpy.context.scene.cycles.use_adaptive_sampling = True
		_bpy.context.scene.cycles.adaptive_threshold = 0
		_bpy.context.scene.cycles.adaptive_min_samples = 0
		
		emit_types = ('EMIT', 'ALPHA', 'DIFFUSE', 'METALLIC', 'ROUGHNESS')
		cycles_bake_type = 'EMIT' if bake_type in emit_types else bake_type
		_bpy.context.scene.cycles.bake_type = cycles_bake_type
		_bpy.context.scene.render.bake.use_pass_direct = False
		_bpy.context.scene.render.bake.use_pass_indirect = False
		_bpy.context.scene.render.bake.use_pass_color = False
		_bpy.context.scene.render.bake.use_pass_emit = bake_type in emit_types
		_bpy.context.scene.render.bake.normal_space = 'TANGENT'
		_bpy.context.scene.render.bake.margin = 64
		_bpy.context.scene.render.bake.use_clear = True
		_bpy.context.scene.render.use_lock_interface = True
		_bpy.context.scene.render.use_persistent_data = False
		
		self._call_before_bake_safe(bake_type, target_image)
		
		_log.info("Trying to bake atlas Image={0} type={1}/{2} size={3}...".format(
			repr(target_image.name), bake_type, cycles_bake_type, tuple(target_image.size)))
		_commons.ensure_deselect_all_objects()
		_commons.activate_object(local_bake_obj)
		_gc_collect()  # Подчищаем память прямо перед печкой т.к. оно моного жрёт.
		_bpy.ops.wm.memory_statistics()
		bake_start = _perf_counter()
		_commons.ensure_op_finished(_bpy.ops.object.bake(type=cycles_bake_type, use_clear=True), name='bpy.ops.object.bake')
		bake_time = _perf_counter() - bake_start
		_bpy.ops.wm.memory_statistics()
		_log.info("Baked atlas Image={0} type={1}, time spent: {2:.1f} sec.".format(repr(target_image.name), bake_type, bake_time))
		
		garbage_materials = set(slot.material for slot in local_bake_obj.material_slots)
		mesh = local_bake_obj.data
		_bpy.context.blend_data.objects.remove(local_bake_obj, do_unlink=True)
		_bpy.context.blend_data.meshes.remove(mesh, do_unlink=True)
		for mat in garbage_materials:
			_bpy.context.blend_data.materials.remove(mat, do_unlink=True)
		if _bpy.app.version >= (2, 93, 0):
			_bpy.data.orphans_purge(do_recursive=True)
		
		self._call_after_bake_safe(bake_type, target_image)
示例#12
0
	def __init__(self, report_time=5.0):
		self.report_time = report_time
		self.time_begin = _perf_counter()
		self.time_progress = _perf_counter()
示例#13
0
	def ask_report(self, force=False):
		now = _perf_counter()
		if force is False and now - self.time_progress < self.report_time:
			return
		self.time_progress = now
		self.do_report(now - self.time_begin)