Ejemplo n.º 1
0
    def assert_correct_status_calls(cls, api):
        # getTasksWithoutConfigs gets called a lot of times. The exact number isn't fixed; it loops
        # over the health checks until all of them pass for a configured period of time.
        # The minumum number of calls is 20: once before the tasks are restarted, and then
        # once for each batch of restarts (Since the batch size is set to 1, and the
        # total number of tasks is 20, that's 20 batches.)
        assert api.getTasksWithoutConfigs.call_count >= 4

        status_calls = api.getTasksWithoutConfigs.call_args_list
        for status_call in status_calls:
            status_call[0][0] == TaskQuery(taskIds=None,
                                           jobKeys=[
                                               JobKey(role='bozo',
                                                      environment='test',
                                                      name='hello')
                                           ],
                                           statuses=set(
                                               [ScheduleStatus.RUNNING]))

        # getTasksStatus is called only once to build an generate update instructions
        assert api.getTasksStatus.call_count == 1

        api.getTasksStatus.assert_called_once_with(
            TaskQuery(taskIds=None,
                      jobKeys=[
                          JobKey(role='bozo', environment='test', name='hello')
                      ],
                      statuses=ACTIVE_STATES))
Ejemplo n.º 2
0
  def test_cron_status_multiple_jobs(self):
    _, mock_scheduler_proxy = self.create_mock_api()
    with contextlib.nested(
        patch('time.sleep'),
        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
        patch('apache.aurora.client.cli.context.AuroraCommandContext.print_out')) as (
            _, _, _, mock_print):
      response = self.create_simple_success_response()
      response.result = Result(getJobsResult=GetJobsResult(configs=[
          JobConfiguration(
              key=JobKey(role='bozo', environment='test', name='hello'),
              cronSchedule='* * * * *'),
          JobConfiguration(
              key=JobKey(role='bozo', environment='test', name='hello2'),
              cronSchedule='* * * * *')
      ]))
      mock_scheduler_proxy.getJobs.return_value = response

      cmd = AuroraCommandLine()
      result = cmd.execute(['cron', 'show', 'west/bozo/test/hello'])

      assert result == EXIT_OK
      mock_scheduler_proxy.getJobs.assert_called_once_with("bozo")
      mock_print.assert_called_with("west/bozo/test/hello\t * * * * *")
Ejemplo n.º 3
0
  def test_successful_diff(self):
    """Test the diff command."""
    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
    with contextlib.nested(
        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
        patch('subprocess.call', return_value=0),
        patch('json.loads', return_value=Mock())) as (_, _, subprocess_patch, _):

      mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response()
      self.setup_populate_job_config(mock_scheduler_proxy)
      with temporary_file() as fp:
        fp.write(self.get_valid_config())
        fp.flush()
        cmd = AuroraCommandLine()
        cmd.execute(['job', 'diff', 'west/bozo/test/hello', fp.name])

        # Diff should get the task status, populate a config, and run diff.
        mock_scheduler_proxy.getTasksStatus.assert_called_with(
            TaskQuery(jobKeys=[JobKey(role='bozo', environment='test', name='hello')],
                      statuses=ACTIVE_STATES))
        assert mock_scheduler_proxy.populateJobConfig.call_count == 1
        assert isinstance(mock_scheduler_proxy.populateJobConfig.call_args[0][0], JobConfiguration)
        assert (mock_scheduler_proxy.populateJobConfig.call_args[0][0].key ==
            JobKey(environment=u'test', role=u'bozo', name=u'hello'))
        # Subprocess should have been used to invoke diff with two parameters.
        assert subprocess_patch.call_count == 1
        assert len(subprocess_patch.call_args[0][0]) == 3
        assert subprocess_patch.call_args[0][0][0] == os.environ.get('DIFF_VIEWER', 'diff')
Ejemplo n.º 4
0
 def create_getjobs_response(cls):
     mock_job_one = JobConfiguration(
         key=JobKey(role='RoleA', environment='test', name='hithere'))
     mock_job_two = JobConfiguration(
         key=JobKey(role='bozo', environment='test', name='hello'))
     result = cls.create_simple_success_response()
     result.result = Result(getJobsResult=GetJobsResult(
         configs=[mock_job_one, mock_job_two]))
     return result
Ejemplo n.º 5
0
  def test_successful_ssh(self):
    """Test the ssh command."""
    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
    mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response()
    sandbox_args = {'slave_root': '/slaveroot', 'slave_run_directory': 'slaverun'}
    with contextlib.nested(
        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
        patch('apache.aurora.client.api.command_runner.DistributedCommandRunner.sandbox_args',
            return_value=sandbox_args),
        patch('subprocess.call', return_value=0)) as (
            mock_scheduler_proxy_class,
            mock_clusters,
            mock_runner_args_patch,
            mock_subprocess):
      cmd = AuroraCommandLine()
      cmd.execute(['task', 'ssh', 'west/bozo/test/hello/1', '--command=ls'])

      # The status command sends a getTasksStatus query to the scheduler,
      # and then prints the result.
      mock_scheduler_proxy.getTasksStatus.assert_called_with(TaskQuery(
          jobKeys=[JobKey(role='bozo', environment='test', name='hello')],
          instanceIds=set([1]),
          statuses=set([ScheduleStatus.RUNNING, ScheduleStatus.KILLING, ScheduleStatus.RESTARTING,
              ScheduleStatus.PREEMPTING, ScheduleStatus.DRAINING
              ])))
      mock_subprocess.assert_called_with(['ssh', '-t', 'bozo@slavehost',
          'cd /slaveroot/slaves/*/frameworks/*/executors/thermos-1287391823/runs/'
          'slaverun/sandbox;ls'])
Ejemplo n.º 6
0
  def test_cancel_update_api_level(self):
    """Test kill client-side API logic."""
    mock_options = self.setup_mock_options()

    mock_config = Mock()
    mock_config.hooks = []
    mock_config.raw.return_value.enable_hooks.return_value.get.return_value = False
    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
    mock_scheduler_proxy.releaseLock.return_value = self.get_release_lock_response()
    with contextlib.nested(
        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
        patch('twitter.common.app.get_options', return_value=mock_options),
        patch('apache.aurora.client.commands.core.get_job_config', return_value=mock_config)) as (
            mock_scheduler_proxy_class, mock_clusters, options, mock_get_job_config):
      with temporary_file() as fp:
        fp.write(self.get_valid_config())
        fp.flush()
        cancel_update(['west/mchucarroll/test/hello'], mock_options)

      # All that cancel_update really does is release the update lock.
      # So that's all we really need to check.
      assert mock_scheduler_proxy.releaseLock.call_count == 1
      assert mock_scheduler_proxy.releaseLock.call_args[0][0].key.job == JobKey(environment='test',
          role='mchucarroll', name='hello')
Ejemplo n.º 7
0
 def setUp(self):
   self._scheduler = create_autospec(spec=SchedulerThriftApiSpec, instance=True)
   self._quota_checker = QuotaCheck(self._scheduler)
   self._role = 'mesos'
   self._name = 'quotajob'
   self._env = 'test'
   self._job_key = JobKey(name=self._name, environment=self._env, role=self._role)
Ejemplo n.º 8
0
    def test_start_update_and_wait_success(self):
        mock_config = self.create_mock_config()
        self._fake_context.get_job_config = Mock(return_value=mock_config)
        self._mock_options.wait = True

        resp = self.create_simple_success_response()
        resp.result = Result(startJobUpdateResult=StartJobUpdateResult(
            key=JobUpdateKey(job=JobKey(
                role="role", environment="env", name="name"),
                             id="id")))
        self._mock_api.start_job_update.return_value = resp
        self._mock_api.query_job_updates.side_effect = [
            get_status_query_response(status=JobUpdateStatus.ROLLED_FORWARD)
        ]

        assert self._command.execute(self._fake_context) == EXIT_OK

        assert self._mock_api.start_job_update.mock_calls == [
            call(ANY, None, None)
        ]
        assert self._mock_api.query_job_updates.mock_calls == [
            call(update_key=resp.result.startJobUpdateResult.key)
        ]

        assert self._fake_context.get_out() == [
            StartUpdate.UPDATE_MSG_TEMPLATE %
            ('http://something_or_other/scheduler/role/env/name/update/id'),
            'Current state ROLLED_FORWARD'
        ]
        assert self._fake_context.get_err() == []
Ejemplo n.º 9
0
def test_simple_config():
    job = convert_pystachio_to_thrift(HELLO_WORLD)
    expected_key = JobKey(role=HELLO_WORLD.role().get(),
                          environment=HELLO_WORLD.environment().get(),
                          name=HELLO_WORLD.name().get())
    assert job.instanceCount == 1
    tti = job.taskConfig
    assert job.key == expected_key
    assert job.owner == Identity(role=HELLO_WORLD.role().get(),
                                 user=getpass.getuser())
    assert job.cronSchedule is None
    assert tti.job == expected_key
    assert tti.jobName == 'hello_world'
    assert tti.isService is False
    assert tti.numCpus == 0.1
    assert tti.ramMb == 64
    assert tti.diskMb == 64
    assert tti.requestedPorts == set()
    assert tti.production is False
    assert tti.priority == 0
    assert tti.maxTaskFailures == 1
    assert tti.constraints == set()
    assert tti.metadata == set()
    assert tti.environment == HELLO_WORLD.environment().get()
    assert tti.tier is None
Ejemplo n.º 10
0
    def test_start_update_command_line_succeeds(self):
        resp = self.create_simple_success_response()
        resp.result = Result(startJobUpdateResult=StartJobUpdateResult(
            key=JobUpdateKey(job=JobKey(
                role="role", environment="env", name="name"),
                             id="id")))
        self._mock_api.start_job_update.return_value = resp
        mock_config = self.create_mock_config()
        self._fake_context.get_job_config = Mock(return_value=mock_config)
        self._mock_options.instance_spec = TaskInstanceKey(self._job_key, None)
        self._mock_options.message = 'hello'

        with patch(
                'apache.aurora.client.cli.update.DiffFormatter') as formatter:
            formatter.return_value = self._formatter
            assert self._command.execute(self._fake_context) == EXIT_OK

        assert self._formatter.show_job_update_diff.mock_calls == [
            call(self._mock_options.instance_spec.instance)
        ]
        assert self._mock_api.start_job_update.mock_calls == [
            call(ANY, 'hello', None, ANY)
        ]
        assert self._fake_context.get_out() == [
            StartUpdate.UPDATE_MSG_TEMPLATE %
            ('http://something_or_other/scheduler/role/env/name/update/id'),
        ]
        assert self._fake_context.get_err() == []
Ejemplo n.º 11
0
def test_simple_config():
  job = convert_pystachio_to_thrift(HELLO_WORLD, ports=frozenset(['health']))
  expected_key = JobKey(
      role=HELLO_WORLD.role().get(),
      environment=HELLO_WORLD.environment().get(),
      name=HELLO_WORLD.name().get())
  assert job.instanceCount == 1
  tti = job.taskConfig
  assert job.key == expected_key
  assert job.owner == Identity(user=getpass.getuser())
  assert job.cronSchedule is None
  assert tti.job == expected_key
  assert tti.isService is False
  assert tti.numCpus == 0.1
  assert tti.ramMb == 64
  assert tti.diskMb == 64
  assert tti.requestedPorts == frozenset(['health'])
  assert tti.production is False
  assert tti.priority == 0
  assert tti.maxTaskFailures == 1
  assert tti.constraints == set()
  assert tti.metadata == set()
  assert tti.tier is None
  assert Resource(numCpus=0.1) in list(tti.resources)
  assert Resource(ramMb=64) in list(tti.resources)
  assert Resource(diskMb=64) in list(tti.resources)
  assert Resource(namedPort='health') in list(tti.resources)
Ejemplo n.º 12
0
    def test_restart_simple(self):
        # Test the client-side restart logic in its simplest case: everything succeeds
        (mock_api, mock_scheduler_proxy) = self.create_mock_api()
        mock_health_check = self.setup_health_checks(mock_api)
        self.setup_mock_scheduler_for_simple_restart(mock_api)
        with contextlib.nested(
                patch('apache.aurora.client.api.SchedulerProxy',
                      return_value=mock_scheduler_proxy),
                patch(
                    'apache.aurora.client.api.instance_watcher.StatusHealthCheck',
                    return_value=mock_health_check),
                patch('time.time',
                      side_effect=functools.partial(self.fake_time, self)),
                patch('threading._Event.wait')):
            with temporary_file() as fp:
                fp.write(self.get_valid_config())
                fp.flush()
                cmd = AuroraCommandLine()
                cmd.execute([
                    'job', 'restart', '--batch-size=5', 'west/bozo/test/hello',
                    '--config', fp.name
                ])

                # Like the update test, the exact number of calls here doesn't matter.
                # what matters is that it must have been called once before batching, plus
                # at least once per batch, and there are 4 batches.
                assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count >= 4
                # called once per batch
                assert mock_scheduler_proxy.restartShards.call_count == 4
                # parameters for all calls are generated by the same code, so we just check one
                mock_scheduler_proxy.restartShards.assert_called_with(
                    JobKey(environment=self.TEST_ENV,
                           role=self.TEST_ROLE,
                           name=self.TEST_JOB), [15, 16, 17, 18, 19], None)
Ejemplo n.º 13
0
  def test_successful_diff(self):
    """Test the diff command."""
    mock_options = self.setup_mock_options()
    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
    mock_scheduler_proxy.getTasksStatus.return_value = self.create_status_response()
    self.setup_populate_job_config(mock_scheduler_proxy)
    with contextlib.nested(
        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
        patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
        patch('twitter.common.app.get_options', return_value=mock_options),
        patch('subprocess.call', return_value=0),
        patch('json.loads', return_value=Mock())) as (
            mock_scheduler_proxy_class,
            mock_clusters,
            options,
            subprocess_patch,
            json_patch):
      with temporary_file() as fp:
        fp.write(self.get_valid_config())
        fp.flush()
        diff(['west/mchucarroll/test/hello', fp.name])

        # Diff should get the task status, populate a config, and run diff.
        mock_scheduler_proxy.getTasksStatus.assert_called_with(
            TaskQuery(jobName='hello', environment='test', owner=Identity(role='mchucarroll'),
                statuses=ACTIVE_STATES))
        assert mock_scheduler_proxy.populateJobConfig.call_count == 1
        assert isinstance(mock_scheduler_proxy.populateJobConfig.call_args[0][0], JobConfiguration)
        assert (mock_scheduler_proxy.populateJobConfig.call_args[0][0].key ==
            JobKey(environment=u'test', role=u'mchucarroll', name=u'hello'))
        # Subprocess should have been used to invoke diff with two parameters.
        assert subprocess_patch.call_count == 1
        assert len(subprocess_patch.call_args[0][0]) == 3
        assert subprocess_patch.call_args[0][0][0] == os.environ.get('DIFF_VIEWER', 'diff')
Ejemplo n.º 14
0
def make_assigned_task(thermos_config, assigned_ports=None):
  from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME
  from gen.apache.aurora.api.ttypes import (
      AssignedTask,
      ExecutorConfig,
      Identity,
      JobKey,
      TaskConfig
  )

  assigned_ports = assigned_ports or {}
  executor_config = ExecutorConfig(name=AURORA_EXECUTOR_NAME, data=thermos_config.json_dumps())
  task_config = TaskConfig(
      job=JobKey(
          role=thermos_config.role().get(),
          environment="prod",
          name=thermos_config.name().get()),
      owner=Identity(role=thermos_config.role().get(), user=thermos_config.role().get()),
      environment=thermos_config.environment().get(),
      jobName=thermos_config.name().get(),
      executorConfig=executor_config)

  return AssignedTask(
      instanceId=12345,
      task=task_config,
      assignedPorts=assigned_ports,
      slaveHost='test-host')
Ejemplo n.º 15
0
  def test_diff_server_error(self):
    """Test the diff command if the user passes a config with an error in it."""
    mock_options = self.setup_mock_options()
    (mock_api, mock_scheduler_proxy) = self.create_mock_api()
    mock_scheduler_proxy.getTasksStatus.return_value = self.create_failed_status_response()
    self.setup_populate_job_config(mock_scheduler_proxy)
    with contextlib.nested(
        patch('apache.aurora.client.api.SchedulerProxy', return_value=mock_scheduler_proxy),
        patch('twitter.common.app.get_options', return_value=mock_options),
        patch('subprocess.call', return_value=0),
        patch('json.loads', return_value=Mock())) as (
            mock_scheduler_proxy_class,
            mock_clusters,
            options,
            subprocess_patch,
            json_patch):

      with temporary_file() as fp:
        fp.write(self.get_valid_config())
        fp.flush()
        cmd = AuroraCommandLine()
        result = cmd.execute(['job', 'diff', 'west/bozo/test/hello', fp.name])
        assert result == EXIT_INVALID_PARAMETER
        # In this error case, we should have called the server getTasksStatus;
        # but since it fails, we shouldn't call populateJobConfig or subprocess.
        mock_scheduler_proxy.getTasksStatus.assert_called_with(
            TaskQuery(jobKeys=[JobKey(role='bozo', environment='test', name='hello')],
                statuses=ACTIVE_STATES))
        assert mock_scheduler_proxy.populateJobConfig.call_count == 0
        assert subprocess_patch.call_count == 0
Ejemplo n.º 16
0
 def get_expected_task_query(cls, instances=None):
   instance_ids = frozenset(instances) if instances is not None else None
   return TaskQuery(taskIds=None,
                    instanceIds=instance_ids,
                    jobKeys=[JobKey(role=cls.TEST_ROLE,
                                    environment=cls.TEST_ENV,
                                    name=cls.TEST_JOB)])
Ejemplo n.º 17
0
    def test_status_api_failure(self):
        mock_scheduler_client = create_autospec(spec=SchedulerClient,
                                                instance=True)
        mock_thrift_client = create_autospec(spec=AuroraAdmin.Client,
                                             instance=True)
        mock_scheduler_client.get_thrift_client.return_value = mock_thrift_client

        mock_thrift_client.getTasksWithoutConfigs.side_effect = IOError(
            "Uh-Oh")
        with contextlib.nested(
                patch(
                    'apache.aurora.client.api.scheduler_client.SchedulerClient.get',
                    return_value=mock_scheduler_client),
                patch('apache.aurora.client.factory.CLUSTERS',
                      new=self.TEST_CLUSTERS)):

            cmd = AuroraCommandLine()
            # This should create a scheduler client, set everything up, and then issue a
            # getTasksWithoutConfigs call against the mock_scheduler_client. That should raise an
            # exception, which results in the command failing with an error code.
            result = cmd.execute(['job', 'status', 'west/bozo/test/hello'])
            assert result == EXIT_UNKNOWN_ERROR
            assert mock_thrift_client.getTasksWithoutConfigs.mock_calls == [
                call(
                    TaskQuery(jobKeys=[
                        JobKey(role='bozo', environment='test', name='hello')
                    ]))
            ]
Ejemplo n.º 18
0
 def get_expected_task_query(cls, shards=None):
   instance_ids = frozenset(shards) if shards is not None else None
   # Helper to create the query that will be a parameter to job kill.
   return TaskQuery(
       taskIds=None,
       instanceIds=instance_ids,
       jobKeys=[JobKey(role=cls.TEST_ROLE, environment=cls.TEST_ENV, name=cls.TEST_JOB)])
Ejemplo n.º 19
0
 def test_restart_failed_restart(self):
     (mock_api, mock_scheduler_proxy) = self.create_mock_api()
     mock_health_check = self.setup_health_checks(mock_api)
     self.setup_mock_scheduler_for_simple_restart(mock_api)
     mock_scheduler_proxy.restartShards.return_value = self.create_error_response(
     )
     with contextlib.nested(
             patch('apache.aurora.client.api.SchedulerProxy',
                   return_value=mock_scheduler_proxy),
             patch(
                 'apache.aurora.client.api.instance_watcher.StatusHealthCheck',
                 return_value=mock_health_check),
             patch('time.time',
                   side_effect=functools.partial(self.fake_time, self)),
             patch('threading._Event.wait')):
         with temporary_file() as fp:
             fp.write(self.get_valid_config())
             fp.flush()
             cmd = AuroraCommandLine()
             result = cmd.execute([
                 'job', 'restart', '--batch-size=5', 'west/bozo/test/hello',
                 '--config', fp.name
             ])
             assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count == 1
             assert mock_scheduler_proxy.restartShards.call_count == 1
             mock_scheduler_proxy.restartShards.assert_called_with(
                 JobKey(environment=self.TEST_ENV,
                        role=self.TEST_ROLE,
                        name=self.TEST_JOB), [0, 1, 2, 3, 4], None)
             assert result == EXIT_API_ERROR
Ejemplo n.º 20
0
    def test_restart_simple_no_config(self):
        # Test the client-side restart logic in its simplest case: everything succeeds
        (mock_api, mock_scheduler_proxy) = self.create_mock_api()
        mock_health_check = self.setup_health_checks(mock_api)
        self.setup_mock_scheduler_for_simple_restart(mock_api)
        with contextlib.nested(
                patch('apache.aurora.client.api.SchedulerProxy',
                      return_value=mock_scheduler_proxy),
                patch('apache.aurora.client.factory.CLUSTERS',
                      new=self.TEST_CLUSTERS),
                patch(
                    'apache.aurora.client.api.instance_watcher.StatusHealthCheck',
                    return_value=mock_health_check),
                patch('time.time',
                      side_effect=functools.partial(self.fake_time, self)),
                patch('threading._Event.wait')):
            cmd = AuroraCommandLine()
            cmd.execute(
                ['job', 'restart', '--batch-size=5', 'west/bozo/test/hello'])

            assert mock_scheduler_proxy.getTasksWithoutConfigs.call_count >= 4
            assert mock_scheduler_proxy.restartShards.call_count == 4
            mock_scheduler_proxy.restartShards.assert_called_with(
                JobKey(environment=self.TEST_ENV,
                       role=self.TEST_ROLE,
                       name=self.TEST_JOB), [15, 16, 17, 18, 19], None)
Ejemplo n.º 21
0
    def create_scheduled_tasks(cls):
        tasks = []
        for name in ['foo', 'bar', 'baz']:
            task = ScheduledTask(
                failureCount=0,
                assignedTask=AssignedTask(
                    taskId=1287391823,
                    slaveHost='slavehost',
                    task=TaskConfig(
                        maxTaskFailures=1,
                        executorConfig=ExecutorConfig(data='fake data'),
                        metadata=[],
                        job=JobKey(role=cls.TEST_ROLE,
                                   environment=cls.TEST_ENV,
                                   name=name),
                        owner=Identity(role=cls.TEST_ROLE),
                        environment=cls.TEST_ENV,
                        jobName=name,
                        numCpus=2,
                        ramMb=2,
                        diskMb=2),
                    instanceId=4237894,
                    assignedPorts={}),
                status=ScheduleStatus.RUNNING,
                taskEvents=[
                    TaskEvent(timestamp=28234726395,
                              status=ScheduleStatus.RUNNING,
                              message="Hi there")
                ])

            tasks.append(task)
        return tasks
Ejemplo n.º 22
0
    def setup_get_tasks_status_calls(cls, scheduler):
        status_response = cls.create_simple_success_response()
        scheduler.getTasksStatus.return_value = status_response
        scheduler.getTasksWithoutConfigs.return_value = status_response
        task_config = TaskConfig(numCpus=1.0,
                                 ramMb=10,
                                 diskMb=1,
                                 job=JobKey(role='bozo',
                                            environment='test',
                                            name='hello'))

        # This should be a list of ScheduledTask's.
        tasks = []
        for i in range(20):
            task_status = create_autospec(spec=ScheduledTask, instance=True)
            task_status.assignedTask = create_autospec(spec=AssignedTask,
                                                       instance=True)
            task_status.assignedTask.instanceId = i
            task_status.assignedTask.taskId = "Task%s" % i
            task_status.assignedTask.slaveId = "Slave%s" % i
            task_status.slaveHost = "Slave%s" % i
            task_status.assignedTask.task = task_config
            tasks.append(task_status)
        status_response.result = Result(
            scheduleStatusResult=ScheduleStatusResult(tasks=tasks))
Ejemplo n.º 23
0
 def test_from_assigned_task_shell_no_demotion(self, mock_getpwnam):
     interval_secs = 17
     initial_interval_secs = 3
     max_consecutive_failures = 2
     timeout_secs = 5
     shell_config = ShellHealthChecker(shell_command='failed command')
     task_config = TaskConfig(
         job=JobKey(role='role', environment='env', name='name'),
         executorConfig=ExecutorConfig(
             name='thermos-generic',
             data=MESOS_JOB(
                 task=HELLO_WORLD,
                 health_check_config=HealthCheckConfig(
                     health_checker=HealthCheckerConfig(shell=shell_config),
                     interval_secs=interval_secs,
                     initial_interval_secs=initial_interval_secs,
                     max_consecutive_failures=max_consecutive_failures,
                     timeout_secs=timeout_secs,
                 )).json_dumps()))
     assigned_task = AssignedTask(task=task_config,
                                  instanceId=1,
                                  assignedPorts={'foo': 9001})
     execconfig_data = json.loads(assigned_task.task.executorConfig.data)
     assert execconfig_data['health_check_config']['health_checker'][
         'shell']['shell_command'] == 'failed command'
     health_checker = HealthCheckerProvider(
         nosetuid_health_checks=True).from_assigned_task(
             assigned_task, None)
     assert health_checker.threaded_health_checker.interval == interval_secs
     assert health_checker.threaded_health_checker.initial_interval == initial_interval_secs
     hct_max_fail = health_checker.threaded_health_checker.max_consecutive_failures
     assert hct_max_fail == max_consecutive_failures
     # Should not be trying to access role's user info.
     assert not mock_getpwnam.called
Ejemplo n.º 24
0
 def test_addInstances(self):
   self.mock_thrift_client.addInstances(
     IsA(JobKey),
     IgnoreArg(),
     IsA(Lock)).AndReturn(DEFAULT_RESPONSE)
   self.mox.ReplayAll()
   self.make_scheduler_proxy().addInstances(JobKey(), {}, Lock())
Ejemplo n.º 25
0
 def create_scheduled_task(instance, start_time):
     task = ScheduledTask(assignedTask=AssignedTask(
         taskId="task_%s" % instance,
         slaveId="random_machine_id",
         slaveHost="junk.nothing",
         task=TaskConfig(job=JobKey(role="nobody",
                                    environment="prod",
                                    name='flibber'),
                         isService=False,
                         resources=frozenset([
                             Resource(numCpus=2),
                             Resource(ramMb=2048),
                             Resource(diskMb=4096)
                         ]),
                         priority=7,
                         maxTaskFailures=3,
                         production=False),
         assignedPorts={"http": 1001},
         instanceId=instance),
                          status=2,
                          failureCount=instance + 4,
                          taskEvents=create_task_events(start_time),
                          ancestorId="random_task_ancestor%s" %
                          instance)
     return task
Ejemplo n.º 26
0
 def create_mock_scheduled_tasks(cls):
     jobs = []
     for name in ['foo', 'bar', 'baz']:
         job = Mock()
         job.key = JobKey(role=cls.TEST_ROLE,
                          environment=cls.TEST_ENV,
                          name=name)
         job.failure_count = 0
         job.assignedTask = Mock(spec=AssignedTask)
         job.assignedTask.slaveHost = 'slavehost'
         job.assignedTask.task = Mock(spec=TaskConfig)
         job.assignedTask.task.maxTaskFailures = 1
         job.assignedTask.task.executorConfig = Mock(spec=ExecutorConfig)
         job.assignedTask.task.executorConfig.data = Mock()
         job.assignedTask.task.metadata = []
         job.assignedTask.task.owner = Identity(role='bozo')
         job.assignedTask.task.environment = 'test'
         job.assignedTask.task.jobName = 'woops'
         job.assignedTask.task.numCpus = 2
         job.assignedTask.task.ramMb = 2
         job.assignedTask.task.diskMb = 2
         job.assignedTask.instanceId = 4237894
         job.assignedTask.assignedPorts = None
         job.status = ScheduleStatus.RUNNING
         mockEvent = Mock(spec=TaskEvent)
         mockEvent.timestamp = 28234726395
         mockEvent.status = ScheduleStatus.RUNNING
         mockEvent.message = "Hi there"
         job.taskEvents = [mockEvent]
         jobs.append(job)
     return jobs
Ejemplo n.º 27
0
 def create_task(cls):
     return [
         ScheduledTask(assignedTask=AssignedTask(
             instanceId=0,
             task=TaskConfig(
                 job=JobKey(role='role', environment='test', name='job'))),
                       status=ScheduleStatus.RUNNING)
     ]
Ejemplo n.º 28
0
 def test_killTasks(self):
   self.mock_thrift_client.killTasks(
       IgnoreArg(),
       IgnoreArg(),
       IsA(JobKey),
       IgnoreArg()).AndReturn(DEFAULT_RESPONSE)
   self.mox.ReplayAll()
   self.make_scheduler_proxy().killTasks(None, None, JobKey(), set([0]))
Ejemplo n.º 29
0
 def assert_correct_addinstance_calls(cls, api):
     assert api.addInstances.call_count == 20
     last_addinst = api.addInstances.call_args
     assert isinstance(last_addinst[0][0], AddInstancesConfig)
     assert last_addinst[0][0].instanceIds == frozenset([19])
     assert last_addinst[0][0].key == JobKey(environment='test',
                                             role='bozo',
                                             name='hello')
Ejemplo n.º 30
0
 def _create_getjobs_response(cls):
     response = cls.create_simple_success_response()
     response.result = Result(getJobsResult=GetJobsResult(configs=[
         JobConfiguration(
             cronSchedule='* * * * *',
             key=JobKey(role='bozo', environment='test', name='hello'))
     ]))
     return response