def test_disable_partition_policy(): hwc = HELLO_WORLD( production=True, priority=200, service=True, cron_collision_policy='RUN_OVERLAP', partition_policy=PystachioPartitionPolicy(reschedule=False), constraints={ 'dedicated': 'root', 'cpu': 'x86_64' }, environment='prod') job = convert_pystachio_to_thrift(hwc) assert job.taskConfig.partitionPolicy == PartitionPolicy(False, 0)
def test_config_with_options(): hwc = HELLO_WORLD(production=True, priority=200, service=True, cron_collision_policy='RUN_OVERLAP', partition_policy=PystachioPartitionPolicy(delay_secs=10), constraints={ 'dedicated': 'root', 'cpu': 'x86_64' }, environment='prod') job = convert_pystachio_to_thrift(hwc) assert job.instanceCount == 1 tti = job.taskConfig assert tti.production assert tti.priority == 200 assert tti.isService assert job.cronCollisionPolicy == CronCollisionPolicy.RUN_OVERLAP assert len(tti.constraints) == 2 assert job.key.environment == 'prod' assert tti.partitionPolicy == PartitionPolicy(True, 10)
def convert(job, metadata=frozenset(), ports=frozenset()): """Convert a Pystachio MesosJob to an Aurora Thrift JobConfiguration.""" owner = Identity(user=getpass.getuser()) key = JobKey( role=assert_valid_field('role', fully_interpolated(job.role())), environment=assert_valid_field('environment', fully_interpolated(job.environment())), name=assert_valid_field('name', fully_interpolated(job.name()))) task_raw = job.task() MB = 1024 * 1024 task = TaskConfig() def not_empty_or(item, default): return default if item is Empty else fully_interpolated(item) # job components task.production = fully_interpolated(job.production(), bool) task.isService = select_service_bit(job) task.maxTaskFailures = fully_interpolated(job.max_task_failures()) task.priority = fully_interpolated(job.priority()) task.contactEmail = not_empty_or(job.contact(), None) task.tier = not_empty_or(job.tier(), None) if job.has_partition_policy(): task.partitionPolicy = PartitionPolicy( fully_interpolated(job.partition_policy().reschedule()), fully_interpolated(job.partition_policy().delay_secs())) # Add metadata to a task, to display in the scheduler UI. metadata_set = frozenset() if job.has_metadata(): customized_metadata = job.metadata() metadata_set |= frozenset( (str(fully_interpolated(key_value_metadata.key())), str(fully_interpolated(key_value_metadata.value()))) for key_value_metadata in customized_metadata) metadata_set |= frozenset( (str(key), str(value)) for key, value in metadata) task.metadata = frozenset( Metadata(key=key, value=value) for key, value in metadata_set) # task components if not task_raw.has_resources(): raise InvalidConfig('Task must specify resources!') if (fully_interpolated(task_raw.resources().ram()) == 0 or fully_interpolated(task_raw.resources().disk()) == 0): raise InvalidConfig( 'Must specify ram and disk resources, got ram:%r disk:%r' % (fully_interpolated(task_raw.resources().ram()), fully_interpolated(task_raw.resources().disk()))) numCpus = fully_interpolated(task_raw.resources().cpu()) ramMb = fully_interpolated(task_raw.resources().ram()) / MB diskMb = fully_interpolated(task_raw.resources().disk()) / MB if numCpus <= 0 or ramMb <= 0 or diskMb <= 0: raise InvalidConfig( 'Task has invalid resources. cpu/ramMb/diskMb must all be positive: ' 'cpu:%r ramMb:%r diskMb:%r' % (numCpus, ramMb, diskMb)) numGpus = fully_interpolated(task_raw.resources().gpu()) task.resources = frozenset([ Resource(numCpus=numCpus), Resource(ramMb=ramMb), Resource(diskMb=diskMb) ] + [Resource(namedPort=p) for p in ports] + ([Resource(numGpus=numGpus)] if numGpus else [])) task.job = key task.owner = owner task.taskLinks = {} # See AURORA-739 task.constraints = constraints_to_thrift( not_empty_or(job.constraints(), {})) task.container = create_container_config(job.container()) underlying, refs = job.interpolate() # need to fake an instance id for the sake of schema checking underlying_checked = underlying.bind(mesos={ 'instance': 31337, 'hostname': '' }) try: ThermosTaskValidator.assert_valid_task(underlying_checked.task()) except ThermosTaskValidator.InvalidTaskError as e: raise InvalidConfig('Task is invalid: %s' % e) if not underlying_checked.check().ok(): raise InvalidConfig('Job not fully specified: %s' % underlying.check().message()) unbound = [] for ref in refs: if ref in (THERMOS_TASK_ID_REF, MESOS_INSTANCE_REF, MESOS_HOSTNAME_REF) or (Ref.subscope( THERMOS_PORT_SCOPE_REF, ref)): continue unbound.append(ref) if unbound: raise InvalidConfig('Config contains unbound variables: %s' % ' '.join(map(str, unbound))) # set the executor that will be used by the Mesos task. Thermos is the default executor = job.executor_config() if fully_interpolated(executor.name()) == AURORA_EXECUTOR_NAME: task.executorConfig = ExecutorConfig( name=AURORA_EXECUTOR_NAME, data=filter_aliased_fields(underlying).json_dumps()) else: task.executorConfig = ExecutorConfig( name=fully_interpolated(executor.name()), data=fully_interpolated(executor.data())) return JobConfiguration( key=key, owner=owner, cronSchedule=not_empty_or(job.cron_schedule(), None), cronCollisionPolicy=select_cron_policy(job.cron_collision_policy()), taskConfig=task, instanceCount=fully_interpolated(job.instances()))