def run(pipelines, noprint, processes, is_fork, booster): """ Run all or defined pipelines. Process listening for a jobs until you press CTRL-C to exit. Use `pipelines:run` to run all pipelines. Use `pipelines:run pipeline_name` to run specific pipeline. """ project = get_project() project.set_verbose(not noprint) if project.verbose: print("Pipelines started") pipelines_to_run = [] if pipelines is not None: for p in pipelines: p_to_run = get_project().get_pipeline_by_name(p) if p_to_run is None: raise RuntimeError("Pipeline `%s` not found" % p) pipelines_to_run.append(p_to_run) else: pipelines_to_run = None spawn_context = get_context("fork") if project.stepist_app.use_booster: if not is_fork: processes_objects = [] for i in range(processes): p = spawn_context.Process(target=exec_current_one) p.start() processes_objects.append(p) p = spawn_context.Process(target=exec_current_one, kwargs=dict(use_booster=True)) p.start() processes_objects.append(p) for p in processes_objects: p.join() else: project.run_pipelines(pipelines_to_run, use_booster=booster) if processes > 1 and not is_fork: processes_objects = [] for i in range(processes): p = spawn_context.Process(target=exec_current_one) p.start() processes_objects.append(p) for p in processes_objects: p.join() else: project.run_pipelines(pipelines_to_run, use_booster=booster)
def get_from_monitor(app, monitoring_for_sec=10): worker_engine = get_project().stepist_app.worker_engine steps = [] for pipeline in app.components.pipelines.values(): steps.append(pipeline.step) s_push, s_pop = worker_engine.monitor_steps( steps, monitoring_for_sec=monitoring_for_sec) inspects = [] for pipeline in app.components.pipelines.values(): step_key = pipeline.step.step_key() new_jobs = s_push.get(step_key, 0) jobs_processed = s_pop.get(step_key, 0) s = MonitorQueueStats(name=pipeline.get_handler_name(), amount_of_jobs=get_jobs_count(pipeline), jobs_processed=jobs_processed, amount_of_time=monitoring_for_sec, new_jobs=new_jobs) inspects.append(s) return inspects
def init_session(name, noprint): """ Start listening pipelines and then execute jobs which were sent to specified consumer. """ project = get_project() project.set_verbose(not noprint) if project.verbose: print("Standalone consumer started, and listening for a tasks ...") print("Press CTRL-C to stop listening") consumer = get_project().get_consumer_by_name(name) if consumer is None: raise RuntimeError("Consumer not found") consumer.run_worker()
def flushall(): """ Flush all queues which were used in the project. """ p = get_project() msg = "Do you want to flush all jobs in your project?" if click.confirm(msg): for step in p.stepist_app.get_workers_steps(): step.flush_all()
def generator(app_name, generator_key): apps = get_project().apps app = [app for app in apps if app.app_name == app_name] if len(app) != 1: raise RuntimeError("APP not found") app = app[0] producer = app.components.producers.get(generator_key, None) if producer is None: raise RuntimeError("Producers not found") return render_template('generator.mako', apps=apps, app=app, producer=producer)
def pipeline(app_name, worker_key): apps = get_project().apps app = [app for app in apps if app.app_name == app_name] if len(app) != 1: raise RuntimeError("APP not found") app = app[0] worker = app.components.pipelines.get(worker_key, None) if worker is None: raise RuntimeError("Worker not found") return render_template('pipeline.mako', apps=apps, app=app, worker=worker)
def handle_sqs_event(pipeline, event): pickler = get_project().data_pickler for record in event: handle_job(pipeline, **pickler.loads(record.body))
def handle_job(pipeline, **data): pipeline.step.receive_job(**data) get_project().run_pipelines(pipeline, die_when_empty=True, die_on_error=True)
def get_jobs_count(pipeline): return get_project().stepist_app.worker_engine.jobs_count(pipeline.step)
def index(): apps = get_project().apps return render_template('main.mako', apps=apps)