def transform(self, ast, env): logger.debug('Running composed stages: %s', self.stages) for stage in self.stages: if env.debug: stage_tuple = (stage, utils.ast2tree(ast)) logger.debug(pprint.pformat(stage_tuple)) ast = stage(ast, env) return ast
def run_pipeline(self): ast = self.ast for method_name in self.order: if __debug__ and logger.getEffectiveLevel() < logging.DEBUG: stage_tuple = (method_name, utils.ast2tree(ast)) logger.debug(pprint.pformat(stage_tuple)) ast = getattr(self, method_name)(ast) return self.func_signature, self.symtab, ast
def run_pipeline(self): ast = self.ast for method_name in self.order: if __debug__ and logger.getEffectiveLevel() < logging.DEBUG: stage_tuple = (method_name, utils.ast2tree(ast)) logger.debug(pprint.pformat(stage_tuple)) self._current_pipeline_stage = method_name ast = getattr(self, method_name)(ast) return self.func_signature, self.symtab, ast
def run_pipeline(self): # Uses a special logger for logging profiling information. logger = logging.getLogger("numba.pipeline.profiler") ast = self.ast talpha = _timer() # for profiling complete pipeline for method_name in self.order: ts = _timer() # for profiling individual stage if __debug__ and logger.getEffectiveLevel() < logging.DEBUG: stage_tuple = (method_name, utils.ast2tree(ast)) logger.debug(pprint.pformat(stage_tuple)) self._current_pipeline_stage = method_name ast = getattr(self, method_name)(ast) te = _timer() # for profiling individual stage logger.info("%X pipeline stage %30s:\t%.3fms", id(self), method_name, (te - ts) * 1000) tomega = _timer() # for profiling complete pipeline logger.info("%X pipeline entire:\t\t\t\t\t%.3fms", id(self), (tomega - talpha) * 1000) return self.func_signature, self.symtab, ast