def _open_scheduler(*args, **kwargs): scheduler, storage, options, spec_roots, symbol_table_cls = setup(*args, **kwargs) engine = LocalSerialEngine(scheduler, storage) engine.start() try: yield scheduler, engine, symbol_table_cls, spec_roots maybe_launch_pantsd(options, scheduler) finally: logger.debug('Cache stats: {}'.format(engine._cache.get_stats())) engine.close()
def visualize_build_request(build_root, goals, subjects): scheduler, storage = setup_json_scheduler(build_root) execution_request = scheduler.build_request(goals, subjects) # NB: Calls `reduce` independently of `execute`, in order to render a graph before validating it. engine = LocalSerialEngine(scheduler, storage) engine.start() try: engine.reduce(execution_request) visualize_execution_graph(scheduler, storage, execution_request) finally: engine.close()
def _open_scheduler(*args, **kwargs): scheduler, storage, options, spec_roots, symbol_table_cls = setup(*args, **kwargs) engine = LocalSerialEngine(scheduler, storage) engine.start() try: yield scheduler, engine, symbol_table_cls, spec_roots maybe_launch_pantsd(options, scheduler) finally: print('Cache stats: {}'.format(engine._cache.get_stats()), file=sys.stderr) engine.close()
def _open_graph(): scheduler, storage, spec_roots, symbol_table_cls = setup() # Populate the graph for the given request, and print the resulting Addresses. engine = LocalSerialEngine(scheduler, storage) engine.start() try: graph = ExpGraph(scheduler, engine, symbol_table_cls) addresses = tuple(graph.inject_specs_closure(spec_roots)) yield graph, addresses finally: print('Cache stats: {}'.format(engine._cache.get_stats()), file=sys.stderr) engine.close()