def main(): mesos_address = os.getenv('MESOS', 'mesosmaster:5050') with open('./examples/cluster/secret') as f: secret = f.read().strip() processor = TaskProcessor() for p in ['mesos', 'stateful']: processor.load_plugin(provider_module='task_processing.plugins.' + p) mesos_executor = processor.executor_from_config(provider='mesos_task', provider_config={ 'secret': secret, 'mesos_address': mesos_address, 'role': 'taskproc', }) executor = processor.executor_from_config( provider='stateful', provider_config={ 'downstream_executor': mesos_executor, 'persister': FilePersistence(output_file='/tmp/foo') }) runner = Sync(executor=executor) tasks = set() TaskConfig = mesos_executor.TASK_CONFIG_INTERFACE for _ in range(1, 2): task_config = TaskConfig(image='busybox', cmd='/bin/true') tasks.add(task_config.task_id) runner.run(task_config) print(executor.status(task_config.task_id))
def main(): args = parse_args() processor = TaskProcessor() processor.load_plugin(provider_module='task_processing.plugins.mesos') mesos_executor = processor.executor_from_config(provider='mesos_task', provider_config={ 'secret': args.secret, 'mesos_address': args.master, 'pool': args.pool, 'role': args.role, }) executor = processor.executor_from_config(provider='logging', provider_config={ 'downstream_executor': mesos_executor, }) TaskConfig = mesos_executor.TASK_CONFIG_INTERFACE runner = Sync(executor=executor) task_config = TaskConfig( image="ubuntu:14.04", cmd="bash -c 'for i in $(seq 1 5); do echo $i&&sleep 10; done'") result = runner.run(task_config) print(result) runner.stop()
def main(): args = parse_args() processor = TaskProcessor() processor.load_plugin(provider_module='task_processing.plugins.mesos') mesos_executor = processor.executor_from_config(provider='mesos_task', provider_config={ 'secret': args.secret, 'mesos_address': args.master, 'pool': args.pool, 'role': args.role, }) executor = processor.executor_from_config(provider='timeout', provider_config={ 'downstream_executor': mesos_executor, }) TaskConfig = mesos_executor.TASK_CONFIG_INTERFACE runner = Sync(executor=executor) task_config = TaskConfig(image='docker-dev.yelpcorp.com/dumb-busybox', cmd='exec dumb-init /bin/sleep 30', timeout=10) result = runner.run(task_config) print(result) runner.stop()
def main(): mesos_address = os.getenv('MESOS', 'mesosmaster:5050') with open('./examples/cluster/secret') as f: secret = f.read().strip() processor = TaskProcessor() for p in ['mesos', 'stateful']: processor.load_plugin(provider_module='task_processing.plugins.' + p) mesos_executor = processor.executor_from_config(provider='mesos_task', provider_config={ 'secret': secret, 'mesos_address': mesos_address, 'role': 'taskproc', }) s = session.Session(region_name='foo', aws_access_key_id='foo', aws_secret_access_key='bar') dynamo_address = os.getenv('DYNAMO', 'http://dynamodb:5050') client = s.client( service_name='dynamodb', endpoint_url=dynamo_address, ) try: create_table(client) except ClientError: pass executor = processor.executor_from_config( provider='stateful', provider_config={ 'downstream_executor': mesos_executor, 'persister': DynamoDBPersister(table_name='events', endpoint_url=dynamo_address, session=s) }) runner = Sync(executor=executor) tasks = set() TaskConfig = mesos_executor.TASK_CONFIG_INTERFACE for _ in range(1, 2): task_config = TaskConfig(image='ubuntu:14.04', cmd='/bin/sleep 2') tasks.add(task_config.task_id) runner.run(task_config) print(executor.status(task_config.task_id))
def run_task(executor, task_config): """ Runs a task until a terminal event is received, which is returned. """ runner = Sync(executor) set_runner_signal_handlers(runner) terminal_event = runner.run(task_config) if getattr(terminal_event, 'platform_type', None) == 'lost': runner.kill(task_config.task_id) runner.stop() return terminal_event
def main(): # get address of the Mesos cluster mesos_address = os.getenv('MESOS', 'mesosmaster:5050') # read in secret, this is used to authenticate the taskproc scheduler with # Mesos with open('./examples/cluster/secret') as f: secret = f.read().strip() # create a processor instance processor = TaskProcessor() # configure plugins processor.load_plugin(provider_module='task_processing.plugins.mesos') # create an executor (taskproc executor NOT to be confused with a Mesos # executor) using this defined configuration. this config can also be used # to specify other Mesos properties, such as which role to use executor = processor.executor_from_config( provider='mesos_task', provider_config={ 'secret': secret, 'mesos_address': mesos_address, 'role': 'taskproc', } ) # creates a new Sync runner that will synchronously execute tasks # (i.e. block until completion) runner = Sync(executor) # next, create a TaskConfig to run # this is where properties of the Mesos task can be specified in this # example, we use the busybox Docker image and just echo "hello world" TaskConfig = executor.TASK_CONFIG_INTERFACE task_config = TaskConfig(image="busybox", cmd='echo "hello world"') # run our task and print the result result = runner.run(task_config) print(result) # this stops the taskproc framework and unregisters it from Mesos runner.stop() return 0 if result.success else 1
def main(): args = parse_args() processor = TaskProcessor() processor.load_plugin(provider_module='task_processing.plugins.mesos') executor = processor.executor_from_config(provider='mesos_task', provider_config={ 'secret': args.secret, 'mesos_address': args.master, 'pool': args.pool, 'role': args.role, }) TaskConfig = executor.TASK_CONFIG_INTERFACE task_config = TaskConfig(image="busybox", cmd='/bin/true') # This only works on agents that have added mesos as a containerizer # task_config = TaskConfig(containerizer='MESOS', cmd='/bin/true') runner = Sync(executor) result = runner.run(task_config) print(result) print(result.raw) runner.stop() return 0 if result.success else 1
def remote_run_start(args): system_paasta_config, service, cluster, \ soa_dir, instance, instance_type = extract_args(args) overrides_dict = {} constraints_json = args.constraints_json if constraints_json: try: constraints = json.loads(constraints_json) except Exception as e: paasta_print("Error while parsing constraints: %s", e) if constraints: overrides_dict['constraints'] = constraints if args.cmd: overrides_dict['cmd'] = args.cmd if args.instances: overrides_dict['instances'] = args.instances run_id = args.run_id if run_id is None: run_id = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(8)) paasta_print("Assigned random run-id: %s" % run_id) if args.detach: paasta_print("Running in background") if os.fork() > 0: return os.setsid() if os.fork() > 0: return sys.stdout = open('/dev/null', 'w') sys.stderr = open('/dev/null', 'w') paasta_print('Scheduling a task on Mesos') processor = TaskProcessor() processor.load_plugin(provider_module='task_processing.plugins.mesos') processor.load_plugin(provider_module='task_processing.plugins.stateful') MesosExecutor = processor.executor_cls(provider='mesos') native_job_config = load_paasta_native_job_config( service, instance, cluster, soa_dir=soa_dir, instance_type=instance_type, config_overrides=overrides_dict, load_deployments=not args.docker_image, ) try: task_config = MesosExecutor.TASK_CONFIG_INTERFACE( **paasta_to_task_config_kwargs( service=service, instance=instance, system_paasta_config=system_paasta_config, native_job_config=native_job_config, config_overrides=overrides_dict, docker_image=args.docker_image, offer_timeout=args.staging_timeout, ), ) except InvariantException as e: if len(e.missing_fields) > 0: paasta_print( PaastaColors.red( "Mesos task config is missing following fields: {}".format( ', '.join(e.missing_fields), ), ), ) elif len(e.invariant_errors) > 0: paasta_print( PaastaColors.red( "Mesos task config is failing following checks: {}".format( ', '.join(str(ie) for ie in e.invariant_errors), ), ), ) else: paasta_print(PaastaColors.red(f"Mesos task config error: {e}"), ) traceback.print_exc() emit_counter_metric('paasta.remote_run.start.failed', service, instance) sys.exit(1) except PTypeError as e: paasta_print( PaastaColors.red( f"Mesos task config is failing a type check: {e}", ), ) traceback.print_exc() emit_counter_metric('paasta.remote_run.start.failed', service, instance) sys.exit(1) def handle_interrupt(_signum, _frame): paasta_print( PaastaColors.red("Signal received, shutting down scheduler."), ) if runner is not None: runner.stop() if _signum == signal.SIGTERM: sys.exit(143) else: sys.exit(1) signal.signal(signal.SIGINT, handle_interrupt) signal.signal(signal.SIGTERM, handle_interrupt) default_role = system_paasta_config.get_remote_run_config().get( 'default_role') assert default_role try: executor_stack = build_executor_stack( processor=processor, service=service, instance=instance, role=native_job_config.get_role() or default_role, pool=native_job_config.get_pool(), cluster=cluster, run_id=run_id, system_paasta_config=system_paasta_config, framework_staging_timeout=args.staging_timeout, ) runner = Sync(executor_stack) terminal_event = runner.run(task_config) runner.stop() except (Exception, ValueError) as e: paasta_print("Except while running executor stack: %s", e) traceback.print_exc() emit_counter_metric('paasta.remote_run.start.failed', service, instance) sys.exit(1) if terminal_event.success: paasta_print("Task finished successfully") sys.exit(0) else: paasta_print(PaastaColors.red(f"Task failed: {terminal_event.raw}"), ) # This is not necessarily an infrastructure failure. It may just be a # application failure. emit_counter_metric('paasta.remote_run.start.failed', service, instance) sys.exit(1)
def remote_run_start(args): system_paasta_config, service, cluster, soa_dir, instance, instance_type = extract_args( args) overrides_dict = {} constraints_json = args.constraints_json if constraints_json: try: constraints = json.loads(constraints_json) except Exception as e: paasta_print("Error while parsing constraints: %s", e) if constraints: overrides_dict['constraints'] = constraints if args.cmd: overrides_dict['cmd'] = args.cmd if args.instances: overrides_dict['instances'] = args.instances run_id = args.run_id if run_id is None: run_id = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(8)) paasta_print("Assigned random run-id: %s" % run_id) if args.detach: paasta_print("Running in background") if os.fork() > 0: return os.setsid() if os.fork() > 0: return sys.stdout = open('/dev/null', 'w') sys.stderr = open('/dev/null', 'w') paasta_print('Scheduling a task on Mesos') executor_stack = build_executor_stack(service, instance, run_id, system_paasta_config, args.staging_timeout) runner = Sync(executor_stack) def handle_interrupt(_signum, _frame): paasta_print( PaastaColors.red("Signal received, shutting down scheduler.")) runner.stop() signal.signal(signal.SIGINT, handle_interrupt) signal.signal(signal.SIGTERM, handle_interrupt) task_config = MesosExecutor.TASK_CONFIG_INTERFACE( **paasta_to_task_config_kwargs( service, instance, cluster, system_paasta_config, instance_type=instance_type, soa_dir=soa_dir, config_overrides=overrides_dict, )) terminal_event = runner.run(task_config) runner.stop() if terminal_event.success: paasta_print("Task finished successfully") sys.exit(0) else: paasta_print( PaastaColors.red("Task failed: {}".format(terminal_event.raw))) sys.exit(1)