def test_filter_processor(names, expected): fs = mock_fs() with mock_fs_scope(fs): app = fs.getApp() processors = app.plugin_loader.getProcessors() procs = get_filtered_processors(processors, names) actual = [p.PROCESSOR_NAME for p in procs] assert sorted(actual) == sorted(expected)
def test_filter_processor(names, expected): fs = mock_fs() with mock_fs_scope(fs): app = fs.getApp() processors = app.plugin_loader.getProcessors() procs = get_filtered_processors(processors, names) actual = [p.PROCESSOR_NAME for p in procs] assert sorted(actual) == sorted(expected)
def run(self, src_dir_or_file=None, *, delete=True, previous_record=None, save_record=True): start_time = time.perf_counter() # Get the list of processors for this run. processors = self.app.plugin_loader.getProcessors() if self.enabled_processors is not None: logger.debug("Filtering processors to: %s" % self.enabled_processors) processors = get_filtered_processors(processors, self.enabled_processors) if self.additional_processors_factories is not None: logger.debug("Adding %s additional processors." % len(self.additional_processors_factories)) for proc_fac in self.additional_processors_factories: proc = proc_fac() self.app.env.registerTimer(proc.__class__.__name__, raise_if_registered=False) proc.initialize(self.app) processors.append(proc) # Invoke pre-processors. pipeline_ctx = PipelineContext(-1, self.app, self.out_dir, self.tmp_dir, self.force) for proc in processors: proc.onPipelineStart(pipeline_ctx) # Pre-processors can define additional ignore patterns. self.ignore_patterns += make_re( pipeline_ctx._additional_ignore_patterns) # Create the pipeline record. record = TransitionalProcessorPipelineRecord() record_cache = self.app.cache.getCache('proc') record_name = ( hashlib.md5(self.out_dir.encode('utf8')).hexdigest() + '.record') if previous_record: record.setPrevious(previous_record) elif not self.force and record_cache.has(record_name): with format_timed_scope(logger, 'loaded previous bake record', level=logging.DEBUG, colored=False): record.loadPrevious(record_cache.getCachePath(record_name)) logger.debug("Got %d entries in process record." % len(record.previous.entries)) record.current.success = True record.current.processed_count = 0 # Work! def _handler(res): entry = record.getCurrentEntry(res.path) assert entry is not None entry.flags = res.flags entry.proc_tree = res.proc_tree entry.rel_outputs = res.rel_outputs if entry.flags & FLAG_PROCESSED: record.current.processed_count += 1 if res.errors: entry.errors += res.errors record.current.success = False rel_path = os.path.relpath(res.path, self.app.root_dir) logger.error("Errors found in %s:" % rel_path) for e in entry.errors: logger.error(" " + e) jobs = [] self._process(src_dir_or_file, record, jobs) pool = self._createWorkerPool() ar = pool.queueJobs(jobs, handler=_handler) ar.wait() # Shutdown the workers and get timing information from them. reports = pool.close() record.current.timers = {} for i in range(len(reports)): timers = reports[i] if timers is None: continue worker_name = 'PipelineWorker_%d' % i record.current.timers[worker_name] = {} for name, val in timers['data'].items(): main_val = record.current.timers.setdefault(name, 0) record.current.timers[name] = main_val + val record.current.timers[worker_name][name] = val # Invoke post-processors. pipeline_ctx.record = record.current for proc in processors: proc.onPipelineEnd(pipeline_ctx) # Handle deletions. if delete: for path, reason in record.getDeletions(): logger.debug("Removing '%s': %s" % (path, reason)) try: os.remove(path) except FileNotFoundError: pass logger.info('[delete] %s' % path) # Finalize the process record. record.current.process_time = time.time() record.current.out_dir = self.out_dir record.collapseRecords() # Save the process record. if save_record: with format_timed_scope(logger, 'saved bake record', level=logging.DEBUG, colored=False): record.saveCurrent(record_cache.getCachePath(record_name)) logger.info(format_timed( start_time, "processed %d assets." % record.current.processed_count)) return record.detach()
def run(self, src_dir_or_file=None, *, delete=True, previous_record=None, save_record=True): start_time = time.perf_counter() # Get the list of processors for this run. processors = self.app.plugin_loader.getProcessors() if self.enabled_processors is not None: logger.debug("Filtering processors to: %s" % self.enabled_processors) processors = get_filtered_processors(processors, self.enabled_processors) if self.additional_processors_factories is not None: logger.debug("Adding %s additional processors." % len(self.additional_processors_factories)) for proc_fac in self.additional_processors_factories: proc = proc_fac() self.app.env.registerTimer(proc.__class__.__name__, raise_if_registered=False) proc.initialize(self.app) processors.append(proc) # Invoke pre-processors. pipeline_ctx = PipelineContext(-1, self.app, self.out_dir, self.tmp_dir, self.force) for proc in processors: proc.onPipelineStart(pipeline_ctx) # Pre-processors can define additional ignore patterns. self.ignore_patterns += make_re( pipeline_ctx._additional_ignore_patterns) # Create the pipeline record. record = TransitionalProcessorPipelineRecord() record_cache = self.app.cache.getCache('proc') record_name = (hashlib.md5(self.out_dir.encode('utf8')).hexdigest() + '.record') if previous_record: record.setPrevious(previous_record) elif not self.force and record_cache.has(record_name): with format_timed_scope(logger, 'loaded previous bake record', level=logging.DEBUG, colored=False): record.loadPrevious(record_cache.getCachePath(record_name)) logger.debug("Got %d entries in process record." % len(record.previous.entries)) record.current.success = True record.current.processed_count = 0 # Work! def _handler(res): entry = record.getCurrentEntry(res.path) assert entry is not None entry.flags = res.flags entry.proc_tree = res.proc_tree entry.rel_outputs = res.rel_outputs if entry.flags & FLAG_PROCESSED: record.current.processed_count += 1 if res.errors: entry.errors += res.errors record.current.success = False rel_path = os.path.relpath(res.path, self.app.root_dir) logger.error("Errors found in %s:" % rel_path) for e in entry.errors: logger.error(" " + e) jobs = [] self._process(src_dir_or_file, record, jobs) pool = self._createWorkerPool() ar = pool.queueJobs(jobs, handler=_handler) ar.wait() # Shutdown the workers and get timing information from them. reports = pool.close() record.current.timers = {} for i in range(len(reports)): timers = reports[i] if timers is None: continue worker_name = 'PipelineWorker_%d' % i record.current.timers[worker_name] = {} for name, val in timers['data'].items(): main_val = record.current.timers.setdefault(name, 0) record.current.timers[name] = main_val + val record.current.timers[worker_name][name] = val # Invoke post-processors. pipeline_ctx.record = record.current for proc in processors: proc.onPipelineEnd(pipeline_ctx) # Handle deletions. if delete: for path, reason in record.getDeletions(): logger.debug("Removing '%s': %s" % (path, reason)) try: os.remove(path) except FileNotFoundError: pass logger.info('[delete] %s' % path) # Finalize the process record. record.current.process_time = time.time() record.current.out_dir = self.out_dir record.collapseRecords() # Save the process record. if save_record: with format_timed_scope(logger, 'saved bake record', level=logging.DEBUG, colored=False): record.saveCurrent(record_cache.getCachePath(record_name)) logger.info( format_timed( start_time, "processed %d assets." % record.current.processed_count)) return record.detach()