Example #1
0
def test_deserialize_thermos_task_unbound_refs():
    # test unbound {{standard}} refs
    task_config = TaskConfig(executorConfig=ExecutorConfig(
        name='thermos', data=MESOS_JOB(task=HELLO_WORLD_UNBOUND).json_dumps()))
    assigned_task = AssignedTask(task=task_config, instanceId=0)
    with pytest.raises(TaskInfoError) as execinfo:
        mesos_task_instance_from_assigned_task(assigned_task)

    assert "Unexpected unbound refs: {{unbound_cmd}} {{unbound}}" in execinfo.value.message

    # test bound unscoped refs, valid case.
    task = BASE_TASK(name='task_name',
                     processes=[
                         Process(name='process_name',
                                 cmdline='echo {{thermos.ports[health]}}')
                     ])
    task_config = TaskConfig(executorConfig=ExecutorConfig(
        name='thermos', data=MESOS_JOB(task=task).json_dumps()))
    assigned_task = AssignedTask(task=task_config, instanceId=0)
    assert mesos_task_instance_from_assigned_task(assigned_task) is not None

    # test unbound unscoped refs
    for cmdline in ('echo {{hello_{{thermos.ports[health]}}}}',
                    'echo {{hello_{{thermos.user_id}}}}'):
        task = BASE_TASK(
            name='task_name',
            processes=[Process(name='process_name', cmdline=cmdline)])
        task_config = TaskConfig(executorConfig=ExecutorConfig(
            name='thermos', data=MESOS_JOB(task=task).json_dumps()))
        assigned_task = AssignedTask(task=task_config, instanceId=0)

        with pytest.raises(UnexpectedUnboundRefsError):
            mesos_task_instance_from_assigned_task(assigned_task)
Example #2
0
def test_deserialize_thermos_task():
    task_config = TaskConfig(executorConfig=ExecutorConfig(
        name='thermos', data=MESOS_JOB(task=HELLO_WORLD).json_dumps()))
    assigned_task = AssignedTask(task=task_config, instanceId=0)
    assert mesos_task_instance_from_assigned_task(assigned_task) == BASE_MTI(
        task=HELLO_WORLD)

    task_config = TaskConfig(executorConfig=ExecutorConfig(
        name='thermos', data=HELLO_WORLD_MTI.json_dumps()))
    assigned_task = AssignedTask(task=task_config, instanceId=0)
    assert mesos_task_instance_from_assigned_task(assigned_task) == BASE_MTI(
        task=HELLO_WORLD)
Example #3
0
def make_assigned_task(thermos_config, assigned_ports=None):
  from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
  from gen.apache.aurora.api.ttypes import (
      AssignedTask,
      ExecutorConfig,
      Identity,
      JobKey,
      TaskConfig
  )

  assigned_ports = assigned_ports or {}
  executor_config = ExecutorConfig(name=AURORA_EXECUTOR_NAME, data=thermos_config.json_dumps())
  task_config = TaskConfig(
      job=JobKey(
          role=thermos_config.role().get(),
          environment="prod",
          name=thermos_config.name().get()),
      owner=Identity(role=thermos_config.role().get(), user=thermos_config.role().get()),
      environment=thermos_config.environment().get(),
      jobName=thermos_config.name().get(),
      executorConfig=executor_config)

  return AssignedTask(
      instanceId=12345,
      task=task_config,
      assignedPorts=assigned_ports,
      slaveHost='test-host')
 def test_from_assigned_task_shell(self):
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     timeout_secs = 5
     shell_config = ShellHealthChecker(shell_command='failed command')
     task_config = TaskConfig(executorConfig=ExecutorConfig(
         name='thermos-generic',
         data=MESOS_JOB(
             task=HELLO_WORLD,
             health_check_config=HealthCheckConfig(
                 health_checker=HealthCheckerConfig(shell=shell_config),
                 interval_secs=interval_secs,
                 initial_interval_secs=initial_interval_secs,
                 max_consecutive_failures=max_consecutive_failures,
                 timeout_secs=timeout_secs,
             )).json_dumps()))
     assigned_task = AssignedTask(task=task_config, instanceId=1)
     execconfig_data = json.loads(assigned_task.task.executorConfig.data)
     assert execconfig_data['health_check_config']['health_checker'][
         'shell']['shell_command'] == 'failed command'
     health_checker = HealthCheckerProvider().from_assigned_task(
         assigned_task, None)
     assert health_checker.threaded_health_checker.interval == interval_secs
     assert health_checker.threaded_health_checker.initial_interval == initial_interval_secs
     hct_max_fail = health_checker.threaded_health_checker.max_consecutive_failures
     assert hct_max_fail == max_consecutive_failures
Example #5
0
 def test_from_assigned_task_shell_no_demotion(self, mock_getpwnam):
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     timeout_secs = 5
     shell_config = ShellHealthChecker(shell_command='failed command')
     task_config = TaskConfig(
         job=JobKey(role='role', environment='env', name='name'),
         executorConfig=ExecutorConfig(
             name='thermos-generic',
             data=MESOS_JOB(
                 task=HELLO_WORLD,
                 health_check_config=HealthCheckConfig(
                     health_checker=HealthCheckerConfig(shell=shell_config),
                     interval_secs=interval_secs,
                     initial_interval_secs=initial_interval_secs,
                     max_consecutive_failures=max_consecutive_failures,
                     timeout_secs=timeout_secs,
                 )).json_dumps()))
     assigned_task = AssignedTask(task=task_config,
                                  instanceId=1,
                                  assignedPorts={'foo': 9001})
     execconfig_data = json.loads(assigned_task.task.executorConfig.data)
     assert execconfig_data['health_check_config']['health_checker'][
         'shell']['shell_command'] == 'failed command'
     health_checker = HealthCheckerProvider(
         nosetuid_health_checks=True).from_assigned_task(
             assigned_task, None)
     assert health_checker.threaded_health_checker.interval == interval_secs
     assert health_checker.threaded_health_checker.initial_interval == initial_interval_secs
     hct_max_fail = health_checker.threaded_health_checker.max_consecutive_failures
     assert hct_max_fail == max_consecutive_failures
     # Should not be trying to access role's user info.
     assert not mock_getpwnam.called
 def test_from_assigned_task_http_endpoint_style_config(self):
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     http_config = HttpHealthChecker(endpoint='/foo',
                                     expected_response='bar',
                                     expected_response_code=201)
     task_config = TaskConfig(executorConfig=ExecutorConfig(
         name='thermos',
         data=MESOS_JOB(
             task=HELLO_WORLD,
             health_check_config=HealthCheckConfig(
                 health_checker=HealthCheckerConfig(http=http_config),
                 interval_secs=interval_secs,
                 initial_interval_secs=initial_interval_secs,
                 max_consecutive_failures=max_consecutive_failures,
                 timeout_secs=7)).json_dumps()))
     assigned_task = AssignedTask(task=task_config,
                                  instanceId=1,
                                  assignedPorts={'health': 9001})
     execconfig_data = json.loads(assigned_task.task.executorConfig.data)
     http_exec_config = execconfig_data['health_check_config'][
         'health_checker']['http']
     assert http_exec_config['endpoint'] == '/foo'
     assert http_exec_config['expected_response'] == 'bar'
     assert http_exec_config['expected_response_code'] == 201
     health_checker = HealthCheckerProvider().from_assigned_task(
         assigned_task, None)
     assert health_checker.threaded_health_checker.interval == interval_secs
     assert health_checker.threaded_health_checker.initial_interval == initial_interval_secs
Example #7
0
    def test_diff_unordered_configs(self):
        """Diff between two config objects with different repr but identical content works ok."""
        from_config = self.make_task_configs()[0]
        from_config.constraints = set([
            Constraint(name='value',
                       constraint=ValueConstraint(values=set(['1', '2']))),
            Constraint(name='limit',
                       constraint=TaskConstraint(limit=LimitConstraint(
                           limit=int(10))))
        ])
        from_config.taskLinks = {'task1': 'link1', 'task2': 'link2'}
        from_config.metadata = set(
            [Metadata(key='k2', value='v2'),
             Metadata(key='k1', value='v1')])
        from_config.executorConfig = ExecutorConfig(name='test',
                                                    data='test data')
        from_config.requestedPorts = set(['3424', '142', '45235'])

        # Deepcopy() almost guarantees from_config != to_config due to a different sequence of
        # dict insertions. That in turn generates unequal json objects. The ideal here would be to
        # assert to_config != from_config but that would produce a flaky test as I have observed
        # the opposite on rare occasions as the ordering is not stable between test runs.
        to_config = deepcopy(from_config)

        diff_result = self._updater._diff_configs(from_config, to_config)
        assert diff_result == "", ('diff result must be empty but was: %s' %
                                   diff_result)
 def test_interpolate_cmd(self):
     """Making sure thermos.ports[foo] gets correctly substituted with assignedPorts info."""
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     timeout_secs = 5
     shell_cmd = 'FOO_PORT={{thermos.ports[foo]}} failed command'
     shell_config = ShellHealthChecker(shell_command=shell_cmd)
     task_config = TaskConfig(executorConfig=ExecutorConfig(
         name='thermos-generic',
         data=MESOS_JOB(
             task=HELLO_WORLD,
             health_check_config=HealthCheckConfig(
                 health_checker=HealthCheckerConfig(shell=shell_config),
                 interval_secs=interval_secs,
                 initial_interval_secs=initial_interval_secs,
                 max_consecutive_failures=max_consecutive_failures,
                 timeout_secs=timeout_secs,
             )).json_dumps()))
     assigned_task = AssignedTask(task=task_config,
                                  instanceId=1,
                                  assignedPorts={'foo': 9001})
     interpolated_cmd = HealthCheckerProvider.interpolate_cmd(assigned_task,
                                                              cmd=shell_cmd)
     assert interpolated_cmd == 'FOO_PORT=9001 failed command'
Example #9
0
 def pretty_print_task(task):
     # The raw configuration is not interesting - we only care about what gets parsed.
     task.configuration = None
     task.executorConfig = ExecutorConfig(name=AURORA_EXECUTOR_NAME,
                                          data=json.loads(
                                              task.executorConfig.data))
     return pp.pformat(vars(task))
Example #10
0
    def create_scheduled_tasks(cls):
        tasks = []
        for name in ['foo', 'bar', 'baz']:
            task = ScheduledTask(
                failureCount=0,
                assignedTask=AssignedTask(
                    taskId=1287391823,
                    slaveHost='slavehost',
                    task=TaskConfig(
                        maxTaskFailures=1,
                        executorConfig=ExecutorConfig(data='fake data'),
                        metadata=[],
                        job=JobKey(role=cls.TEST_ROLE,
                                   environment=cls.TEST_ENV,
                                   name=name),
                        owner=Identity(role=cls.TEST_ROLE),
                        environment=cls.TEST_ENV,
                        jobName=name,
                        numCpus=2,
                        ramMb=2,
                        diskMb=2),
                    instanceId=4237894,
                    assignedPorts={}),
                status=ScheduleStatus.RUNNING,
                taskEvents=[
                    TaskEvent(timestamp=28234726395,
                              status=ScheduleStatus.RUNNING,
                              message="Hi there")
                ])

            tasks.append(task)
        return tasks
def test_deserialize_thermos_task_unbound_refs():
    task_config = TaskConfig(executorConfig=ExecutorConfig(
        name='thermos', data=MESOS_JOB(task=HELLO_WORLD_UNBOUND).json_dumps()))
    assigned_task = AssignedTask(task=task_config, instanceId=0)
    with pytest.raises(ValueError) as execinfo:
        mesos_task_instance_from_assigned_task(assigned_task)

    assert execinfo.value.message == "Unexpected unbound refs: {{unbound_cmd}} {{unbound}}"
Example #12
0
 def create_task_config(cls, name):
     return TaskConfig(
         maxTaskFailures=1,
         executorConfig=ExecutorConfig(data='{"fake": "data"}'),
         metadata=[],
         job=JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV,
                    name=name),
         numCpus=2,
         ramMb=2,
         diskMb=2)
Example #13
0
 def create_task_config(cls, name):
     return TaskConfig(maxTaskFailures=1,
                       executorConfig=ExecutorConfig(data='fake data'),
                       metadata=[],
                       job=JobKey(role=cls.TEST_ROLE,
                                  environment=cls.TEST_ENV,
                                  name=name),
                       owner=Identity(role=cls.TEST_ROLE),
                       environment=cls.TEST_ENV,
                       jobName=name,
                       numCpus=2,
                       ramMb=2,
                       diskMb=2)
Example #14
0
    def test_from_assigned_task_shell_filesystem_image(self, mock_getpwnam):
        interval_secs = 17
        initial_interval_secs = 3
        max_consecutive_failures = 2
        timeout_secs = 5
        shell_config = ShellHealthChecker(shell_command='failed command')
        task_config = TaskConfig(
            job=JobKey(role='role', environment='env', name='name'),
            executorConfig=ExecutorConfig(
                name='thermos-generic',
                data=MESOS_JOB(
                    task=HELLO_WORLD,
                    health_check_config=HealthCheckConfig(
                        health_checker=HealthCheckerConfig(shell=shell_config),
                        interval_secs=interval_secs,
                        initial_interval_secs=initial_interval_secs,
                        max_consecutive_failures=max_consecutive_failures,
                        timeout_secs=timeout_secs,
                    )).json_dumps()))
        assigned_task = AssignedTask(task=task_config,
                                     instanceId=1,
                                     assignedPorts={'foo': 9001})
        execconfig_data = json.loads(assigned_task.task.executorConfig.data)
        assert execconfig_data['health_check_config']['health_checker'][
            'shell']['shell_command'] == 'failed command'

        mock_sandbox = mock.Mock(spec_set=SandboxInterface)
        type(mock_sandbox).root = mock.PropertyMock(return_value='/some/path')
        type(mock_sandbox).is_filesystem_image = mock.PropertyMock(
            return_value=True)

        with mock.patch(
                'apache.aurora.executor.common.health_checker.ShellHealthCheck'
        ) as mock_shell:
            HealthCheckerProvider(
                nosetuid_health_checks=False,
                mesos_containerizer_path='/some/path/mesos-containerizer'
            ).from_assigned_task(assigned_task, mock_sandbox)

            class NotNone(object):
                def __eq__(self, other):
                    return other is not None

            assert mock_shell.mock_calls == [
                mock.call(cmd='failed command',
                          wrapper_fn=NotNone(),
                          preexec_fn=None,
                          timeout_secs=5.0)
            ]
Example #15
0
def make_task(thermos_config, assigned_ports={}, **kw):
    role = getpass.getuser()
    task_id = thermos_config.task().name().get() + '-001'
    at = AssignedTask(taskId=task_id,
                      task=TaskConfig(executorConfig=ExecutorConfig(
                          name=AURORA_EXECUTOR_NAME,
                          data=thermos_config.json_dumps()),
                                      owner=Identity(role=role, user=role)),
                      assignedPorts=assigned_ports,
                      **kw)
    td = mesos_pb2.TaskInfo()
    td.task_id.value = task_id
    td.name = thermos_config.task().name().get()
    td.data = serialize(at)
    return td
def make_assigned_task(thermos_config, assigned_ports=None):
    from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
    from gen.apache.aurora.api.ttypes import AssignedTask, ExecutorConfig, JobKey, TaskConfig

    assigned_ports = assigned_ports or {}
    executor_config = ExecutorConfig(name=AURORA_EXECUTOR_NAME,
                                     data=thermos_config.json_dumps())
    task_config = TaskConfig(job=JobKey(role=thermos_config.role().get(),
                                        environment='test',
                                        name=thermos_config.name().get()),
                             executorConfig=executor_config)

    return AssignedTask(instanceId=12345,
                        task=task_config,
                        assignedPorts=assigned_ports,
                        taskId="taskId-12345")
Example #17
0
 def make_task_configs(self, count=1, prod=True):
   return [TaskConfig(
       owner=Identity(role=self._job_key.role),
       environment=self._job_key.environment,
       jobName=self._job_key.name,
       numCpus=self._num_cpus,
       ramMb=self._num_ram,
       diskMb=self._num_disk,
       priority=0,
       maxTaskFailures=1,
       production=prod,
       taskLinks={'task': 'link'},
       contactEmail='*****@*****.**',
       executorConfig=ExecutorConfig(name='test', data='test data')
       # Not setting any set()-related properties as that throws off mox verification.
   )] * count
Example #18
0
  def pretty_print_task(self, task):
    task.configuration = None
    executor_config = json.loads(task.executorConfig.data)
    pretty_executor = json.dumps(executor_config, indent=2, sort_keys=True)
    pretty_executor = '\n'.join(["    %s" % s for s in pretty_executor.split("\n")])
    # Make start cleaner, and display multi-line commands across multiple lines.
    pretty_executor = '\n    ' + pretty_executor.replace(r'\n', '\n')
    # Avoid re-escaping as it's already pretty printed.
    class RawRepr(object):
      def __init__(self, data):
        self.data = data

      def __repr__(self):
        return self.data

    task.executorConfig = ExecutorConfig(
        name=AURORA_EXECUTOR_NAME,
        data=RawRepr(pretty_executor))
    return self.prettyprinter.pformat(vars(task))
Example #19
0
    def test_launchTask_deserialization_fail(self):  # noqa
        proxy_driver = ProxyDriver()

        role = getpass.getuser()
        task_info = mesos_pb2.TaskInfo()
        task_info.name = task_info.task_id.value = 'broken'
        task_info.data = serialize(
            AssignedTask(task=TaskConfig(
                job=JobKey(role=role, environment='env', name='name'),
                executorConfig=ExecutorConfig(name=AURORA_EXECUTOR_NAME,
                                              data='garbage'))))

        te = FastThermosExecutor(runner_provider=make_provider(safe_mkdtemp()),
                                 sandbox_provider=DefaultTestSandboxProvider())
        te.launchTask(proxy_driver, task_info)
        proxy_driver.wait_stopped()

        updates = proxy_driver.method_calls['sendStatusUpdate']
        assert len(updates) == 2
        assert updates[0][0][0].state == mesos_pb2.TASK_STARTING
        assert updates[1][0][0].state == mesos_pb2.TASK_FAILED
Example #20
0
 def test_from_assigned_task(self):
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     task_config = TaskConfig(executorConfig=ExecutorConfig(
         name='thermos',
         data=MESOS_JOB(
             task=HELLO_WORLD,
             health_check_config=HealthCheckConfig(
                 interval_secs=interval_secs,
                 initial_interval_secs=initial_interval_secs,
                 max_consecutive_failures=max_consecutive_failures,
                 timeout_secs=7)).json_dumps()))
     assigned_task = AssignedTask(task=task_config,
                                  instanceId=1,
                                  assignedPorts={'health': 9001})
     health_checker = HealthCheckerProvider().from_assigned_task(
         assigned_task, None)
     assert health_checker.threaded_health_checker.interval == interval_secs
     assert health_checker.threaded_health_checker.initial_interval == initial_interval_secs
     hct_max_fail = health_checker.threaded_health_checker.max_consecutive_failures
     assert hct_max_fail == max_consecutive_failures
Example #21
0
 def test_from_assigned_task_no_health_port(self):
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     timeout_secs = 5
     task_config = TaskConfig(executorConfig=ExecutorConfig(
         name='thermos-generic',
         data=MESOS_JOB(
             task=HELLO_WORLD,
             health_check_config=HealthCheckConfig(
                 interval_secs=interval_secs,
                 initial_interval_secs=initial_interval_secs,
                 max_consecutive_failures=max_consecutive_failures,
                 timeout_secs=timeout_secs,
             )).json_dumps()))
     # No health port and we don't have a shell_command.
     assigned_task = AssignedTask(task=task_config,
                                  instanceId=1,
                                  assignedPorts={'http': 9001})
     health_checker = HealthCheckerProvider().from_assigned_task(
         assigned_task, None)
     self.assertIsNone(health_checker)
Example #22
0
    def test_from_assigned_task_shell(self, mock_getpwnam):
        interval_secs = 17
        initial_interval_secs = 3
        max_consecutive_failures = 2
        timeout_secs = 5
        shell_config = ShellHealthChecker(shell_command='failed command')
        task_config = TaskConfig(
            job=JobKey(role='role', environment='env', name='name'),
            executorConfig=ExecutorConfig(
                name='thermos-generic',
                data=MESOS_JOB(
                    task=HELLO_WORLD,
                    health_check_config=HealthCheckConfig(
                        health_checker=HealthCheckerConfig(shell=shell_config),
                        interval_secs=interval_secs,
                        initial_interval_secs=initial_interval_secs,
                        max_consecutive_failures=max_consecutive_failures,
                        timeout_secs=timeout_secs,
                    )).json_dumps()))
        assigned_task = AssignedTask(task=task_config,
                                     instanceId=1,
                                     assignedPorts={'foo': 9001})
        execconfig_data = json.loads(assigned_task.task.executorConfig.data)
        assert execconfig_data['health_check_config']['health_checker'][
            'shell']['shell_command'] == 'failed command'

        mock_sandbox = mock.Mock(spec_set=SandboxInterface)
        type(mock_sandbox).root = mock.PropertyMock(return_value='/some/path')
        type(mock_sandbox).is_filesystem_image = mock.PropertyMock(
            return_value=False)

        health_checker = HealthCheckerProvider().from_assigned_task(
            assigned_task, mock_sandbox)
        assert health_checker.threaded_health_checker.interval == interval_secs
        assert health_checker.threaded_health_checker.initial_interval == initial_interval_secs
        hct_max_fail = health_checker.threaded_health_checker.max_consecutive_failures
        assert hct_max_fail == max_consecutive_failures
        mock_getpwnam.assert_called_once_with(task_config.job.role)
Example #23
0
    def _pretty_print_task(self, task):
        task.configuration = None
        executor_config = json.loads(task.executorConfig.data)
        pretty_executor = json.dumps(executor_config, indent=2, sort_keys=True)
        pretty_executor = '\n'.join(
            ["    %s" % s for s in pretty_executor.split("\n")])
        # Make start cleaner, and display multi-line commands across multiple lines.
        pretty_executor = '\n    ' + pretty_executor.replace(r'\n', '\n')

        # Avoid re-escaping as it's already pretty printed.
        class RawRepr(object):
            def __init__(self, data):
                self.data = data

            def __repr__(self):
                return self.data

        # Sorting sets, hence they turn into lists
        if task.constraints:
            task.constraints = sorted(task.constraints, key=str)

        if task.metadata:
            task.metadata = sorted(task.metadata, key=str)

        if task.resources:
            task.resources = sorted(task.resources, key=str)

        if task.requestedPorts:
            task.requestedPorts = sorted(task.requestedPorts, key=str)

        if task.mesosFetcherUris:
            task.mesosFetcherUris = sorted(task.mesosFetcherUris, key=str)

        task.executorConfig = ExecutorConfig(name=AURORA_EXECUTOR_NAME,
                                             data=RawRepr(pretty_executor))
        return self.prettyprinter.pformat(vars(task))
Example #24
0
 def pretty_print_task(self, task):
   task.configuration = None
   task.executorConfig = ExecutorConfig(
       name=AURORA_EXECUTOR_NAME,
       data=json.loads(task.executorConfig.data))
   return self.prettyprinter.pformat(vars(task))
Example #25
0
def convert(job, metadata=frozenset(), ports=frozenset()):
    """Convert a Pystachio MesosJob to an Aurora Thrift JobConfiguration."""

    owner = Identity(user=getpass.getuser())
    key = JobKey(
        role=assert_valid_field('role', fully_interpolated(job.role())),
        environment=assert_valid_field('environment',
                                       fully_interpolated(job.environment())),
        name=assert_valid_field('name', fully_interpolated(job.name())))

    task_raw = job.task()

    MB = 1024 * 1024
    task = TaskConfig()

    def not_empty_or(item, default):
        return default if item is Empty else fully_interpolated(item)

    # job components
    task.production = fully_interpolated(job.production(), bool)
    task.isService = select_service_bit(job)
    task.maxTaskFailures = fully_interpolated(job.max_task_failures())
    task.priority = fully_interpolated(job.priority())
    task.contactEmail = not_empty_or(job.contact(), None)
    task.tier = not_empty_or(job.tier(), None)

    if job.has_partition_policy():
        task.partitionPolicy = PartitionPolicy(
            fully_interpolated(job.partition_policy().reschedule()),
            fully_interpolated(job.partition_policy().delay_secs()))

    # Add metadata to a task, to display in the scheduler UI.
    metadata_set = frozenset()
    if job.has_metadata():
        customized_metadata = job.metadata()
        metadata_set |= frozenset(
            (str(fully_interpolated(key_value_metadata.key())),
             str(fully_interpolated(key_value_metadata.value())))
            for key_value_metadata in customized_metadata)
    metadata_set |= frozenset(
        (str(key), str(value)) for key, value in metadata)
    task.metadata = frozenset(
        Metadata(key=key, value=value) for key, value in metadata_set)

    # task components
    if not task_raw.has_resources():
        raise InvalidConfig('Task must specify resources!')

    if (fully_interpolated(task_raw.resources().ram()) == 0
            or fully_interpolated(task_raw.resources().disk()) == 0):
        raise InvalidConfig(
            'Must specify ram and disk resources, got ram:%r disk:%r' %
            (fully_interpolated(task_raw.resources().ram()),
             fully_interpolated(task_raw.resources().disk())))

    numCpus = fully_interpolated(task_raw.resources().cpu())
    ramMb = fully_interpolated(task_raw.resources().ram()) / MB
    diskMb = fully_interpolated(task_raw.resources().disk()) / MB
    if numCpus <= 0 or ramMb <= 0 or diskMb <= 0:
        raise InvalidConfig(
            'Task has invalid resources.  cpu/ramMb/diskMb must all be positive: '
            'cpu:%r ramMb:%r diskMb:%r' % (numCpus, ramMb, diskMb))
    numGpus = fully_interpolated(task_raw.resources().gpu())

    task.resources = frozenset([
        Resource(numCpus=numCpus),
        Resource(ramMb=ramMb),
        Resource(diskMb=diskMb)
    ] + [Resource(namedPort=p)
         for p in ports] + ([Resource(numGpus=numGpus)] if numGpus else []))

    task.job = key
    task.owner = owner
    task.taskLinks = {}  # See AURORA-739
    task.constraints = constraints_to_thrift(
        not_empty_or(job.constraints(), {}))
    task.container = create_container_config(job.container())

    underlying, refs = job.interpolate()

    # need to fake an instance id for the sake of schema checking
    underlying_checked = underlying.bind(mesos={
        'instance': 31337,
        'hostname': ''
    })
    try:
        ThermosTaskValidator.assert_valid_task(underlying_checked.task())
    except ThermosTaskValidator.InvalidTaskError as e:
        raise InvalidConfig('Task is invalid: %s' % e)
    if not underlying_checked.check().ok():
        raise InvalidConfig('Job not fully specified: %s' %
                            underlying.check().message())

    unbound = []
    for ref in refs:
        if ref in (THERMOS_TASK_ID_REF, MESOS_INSTANCE_REF,
                   MESOS_HOSTNAME_REF) or (Ref.subscope(
                       THERMOS_PORT_SCOPE_REF, ref)):
            continue
        unbound.append(ref)

    if unbound:
        raise InvalidConfig('Config contains unbound variables: %s' %
                            ' '.join(map(str, unbound)))

    # set the executor that will be used by the Mesos task. Thermos is the default
    executor = job.executor_config()
    if fully_interpolated(executor.name()) == AURORA_EXECUTOR_NAME:
        task.executorConfig = ExecutorConfig(
            name=AURORA_EXECUTOR_NAME,
            data=filter_aliased_fields(underlying).json_dumps())
    else:
        task.executorConfig = ExecutorConfig(
            name=fully_interpolated(executor.name()),
            data=fully_interpolated(executor.data()))

    return JobConfiguration(
        key=key,
        owner=owner,
        cronSchedule=not_empty_or(job.cron_schedule(), None),
        cronCollisionPolicy=select_cron_policy(job.cron_collision_policy()),
        taskConfig=task,
        instanceCount=fully_interpolated(job.instances()))