def test_pack_result_summary_key(self):
    request_key = task_pack.unpack_request_key('11')
    result_summary_key = task_pack.request_key_to_result_summary_key(
        request_key)
    run_result_key = task_pack.result_summary_key_to_run_result_key(
        result_summary_key, 1)

    actual = task_pack.pack_result_summary_key(result_summary_key)
    self.assertEqual('110', actual)

    with self.assertRaises(AssertionError):
      task_pack.pack_result_summary_key(run_result_key)
Exemple #2
0
    def test_pack_result_summary_key(self):
        request_key = task_pack.unpack_request_key('11')
        result_summary_key = task_pack.request_key_to_result_summary_key(
            request_key)
        run_result_key = task_pack.result_summary_key_to_run_result_key(
            result_summary_key, 1)

        actual = task_pack.pack_result_summary_key(result_summary_key)
        self.assertEqual('110', actual)

        with self.assertRaises(AssertionError):
            task_pack.pack_result_summary_key(run_result_key)
Exemple #3
0
def cancel_task(result_summary_key):
    """Cancels a task if possible."""
    request = task_pack.result_summary_key_to_request_key(result_summary_key).get()
    to_run_key = task_to_run.request_to_task_to_run_key(request)
    now = utils.utcnow()

    def run():
        to_run, result_summary = ndb.get_multi((to_run_key, result_summary_key))
        was_running = result_summary.state == task_result.State.RUNNING
        if not result_summary.can_be_canceled:
            return False, was_running
        to_run.queue_number = None
        result_summary.state = task_result.State.CANCELED
        result_summary.abandoned_ts = now
        result_summary.modified_ts = now

        futures = ndb.put_multi_async((to_run, result_summary))
        _maybe_pubsub_notify_via_tq(result_summary, request)
        for f in futures:
            f.check_success()

        return True, was_running

    try:
        ok, was_running = datastore_utils.transaction(run)
    except datastore_utils.CommitError as e:
        packed = task_pack.pack_result_summary_key(result_summary_key)
        return "Failed killing task %s: %s" % (packed, e)
    # Add it to the negative cache.
    task_to_run.set_lookup_cache(to_run_key, False)
    # TODO(maruel): Add stats.
    return ok, was_running
Exemple #4
0
    def terminate(self, request):
        """Asks a bot to terminate itself gracefully.

    The bot will stay in the DB, use 'delete' to remove it from the DB
    afterward. This request returns a pseudo-taskid that can be waited for to
    wait for the bot to turn down.

    This command is particularly useful when a privileged user needs to safely
    debug a machine specific issue. The user can trigger a terminate for one of
    the bot exhibiting the issue, wait for the pseudo-task to run then access
    the machine with the guarantee that the bot is not running anymore.
    """
        # TODO(maruel): Disallow a terminate task when there's one currently
        # pending or if the bot is considered 'dead', e.g. no contact since 10
        # minutes.
        logging.debug('%s', request)
        bot_id = unicode(request.bot_id)
        bot_key = bot_management.get_info_key(bot_id)
        get_or_raise(bot_key)  # raises 404 if there is no such bot
        try:
            # Craft a special priority 0 task to tell the bot to shutdown.
            request = task_request.create_termination_task(
                bot_id, wait_for_capacity=True)
        except (datastore_errors.BadValueError, TypeError, ValueError) as e:
            raise endpoints.BadRequestException(e.message)

        result_summary = task_scheduler.schedule_request(request,
                                                         secret_bytes=None)
        return swarming_rpcs.TerminateResponse(
            task_id=task_pack.pack_result_summary_key(result_summary.key))
Exemple #5
0
def _maybe_pubsub_notify_now(result_summary, request):
    """Examines result_summary and sends task completion PubSub message.

  Does it only if result_summary indicates a task in some finished state and
  the request is specifying pubsub topic.

  Returns False to trigger the retry (on transient errors), or True if retry is
  not needed (e.g. messages was sent successfully or fatal error happened).
  """
    assert not ndb.in_transaction()
    assert isinstance(result_summary,
                      task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if (result_summary.state in task_result.State.STATES_NOT_RUNNING
            and request.pubsub_topic):
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        try:
            _pubsub_notify(task_id, request.pubsub_topic,
                           request.pubsub_auth_token, request.pubsub_userdata)
        except pubsub.TransientError:
            logging.exception(
                'Transient error when sending PubSub notification')
            return False
        except pubsub.Error:
            logging.exception('Fatal error when sending PubSub notification')
            return True  # do not retry it
    return True
Exemple #6
0
def cancel_task(result_summary_key):
  """Cancels a task if possible."""
  request_key = task_pack.result_summary_key_to_request_key(result_summary_key)
  to_run_key = task_to_run.request_to_task_to_run_key(request_key.get())
  now = utils.utcnow()

  def run():
    to_run, result_summary = ndb.get_multi((to_run_key, result_summary_key))
    was_running = result_summary.state == task_result.State.RUNNING
    if not result_summary.can_be_canceled:
      return False, was_running
    to_run.queue_number = None
    result_summary.state = task_result.State.CANCELED
    result_summary.abandoned_ts = now
    result_summary.modified_ts = now
    ndb.put_multi((to_run, result_summary))
    return True, was_running

  try:
    ok, was_running = datastore_utils.transaction(run)
  except datastore_utils.CommitError as e:
    packed = task_pack.pack_result_summary_key(result_summary_key)
    return 'Failed killing task %s: %s' % (packed, e)
  # Add it to the negative cache.
  task_to_run.set_lookup_cache(to_run_key, False)
  # TODO(maruel): Add stats.
  return ok, was_running
Exemple #7
0
def _maybe_pubsub_notify_via_tq(result_summary, request):
    """Examines result_summary and enqueues a task to send PubSub message.

  Must be called within a transaction.

  Raises CommitError on errors (to abort the transaction).
  """
    assert ndb.in_transaction()
    assert isinstance(result_summary,
                      task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if (result_summary.state in task_result.State.STATES_NOT_RUNNING
            and request.pubsub_topic):
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        ok = utils.enqueue_task(url='/internal/taskqueue/pubsub/%s' % task_id,
                                queue_name='pubsub',
                                transactional=True,
                                payload=utils.encode_to_json({
                                    'task_id':
                                    task_id,
                                    'topic':
                                    request.pubsub_topic,
                                    'auth_token':
                                    request.pubsub_auth_token,
                                    'userdata':
                                    request.pubsub_userdata,
                                }))
        if not ok:
            raise datastore_utils.CommitError(
                'Failed to enqueue task queue task')
  def new(self, request):
    """Creates a new task.

    The task will be enqueued in the tasks list and will be executed at the
    earliest opportunity by a bot that has at least the dimensions as described
    in the task request.
    """
    logging.info('%s', request)
    try:
      request = message_conversion.new_task_request_from_rpc(
          request, utils.utcnow())
      posted_request = task_request.make_request(request, acl.is_bot_or_admin())
    except (datastore_errors.BadValueError, TypeError, ValueError) as e:
      raise endpoints.BadRequestException(e.message)

    result_summary = task_scheduler.schedule_request(posted_request)

    previous_result = None
    if result_summary.deduped_from:
      previous_result = message_conversion.task_result_to_rpc(result_summary)

    return swarming_rpcs.TaskRequestMetadata(
        request=message_conversion.task_request_to_rpc(posted_request),
        task_id=task_pack.pack_result_summary_key(result_summary.key),
        task_result=previous_result)
Exemple #9
0
def _maybe_pubsub_notify_via_tq(result_summary, request):
    """Examines result_summary and enqueues a task to send PubSub message.

  Must be called within a transaction.

  Raises CommitError on errors (to abort the transaction).
  """
    assert ndb.in_transaction()
    assert isinstance(result_summary, task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if result_summary.state in task_result.State.STATES_NOT_RUNNING and request.pubsub_topic:
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        ok = utils.enqueue_task(
            url="/internal/taskqueue/pubsub/%s" % task_id,
            queue_name="pubsub",
            transactional=True,
            payload=utils.encode_to_json(
                {
                    "task_id": task_id,
                    "topic": request.pubsub_topic,
                    "auth_token": request.pubsub_auth_token,
                    "userdata": request.pubsub_userdata,
                }
            ),
        )
        if not ok:
            raise datastore_utils.CommitError("Failed to enqueue task queue task")
Exemple #10
0
    def new(self, request):
        """Creates a new task.

    The task will be enqueued in the tasks list and will be executed at the
    earliest opportunity by a bot that has at least the dimensions as described
    in the task request.
    """
        logging.info('%s', request)
        try:
            request = message_conversion.new_task_request_from_rpc(
                request, utils.utcnow())
            posted_request = task_request.make_request(request,
                                                       acl.is_bot_or_admin())
        except (datastore_errors.BadValueError, TypeError, ValueError) as e:
            raise endpoints.BadRequestException(e.message)

        result_summary = task_scheduler.schedule_request(posted_request)

        previous_result = None
        if result_summary.deduped_from:
            previous_result = message_conversion.task_result_to_rpc(
                result_summary)

        return swarming_rpcs.TaskRequestMetadata(
            request=message_conversion.task_request_to_rpc(posted_request),
            task_id=task_pack.pack_result_summary_key(result_summary.key),
            task_result=previous_result)
Exemple #11
0
def _expire_task(to_run_key, request):
    """Expires a TaskResultSummary and unschedules the TaskToRun.

  Returns:
    True on success.
  """
    # Look if the TaskToRun is reapable once before doing the check inside the
    # transaction. This reduces the likelihood of failing this check inside the
    # transaction, which is an order of magnitude more costly.
    if not to_run_key.get().is_reapable:
        logging.info('Not reapable anymore')
        return None

    result_summary_key = task_pack.request_key_to_result_summary_key(
        request.key)
    now = utils.utcnow()

    def run():
        # 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
        to_run_future = to_run_key.get_async()
        result_summary_future = result_summary_key.get_async()
        to_run = to_run_future.get_result()
        if not to_run or not to_run.is_reapable:
            result_summary_future.wait()
            return False

        to_run.queue_number = None
        result_summary = result_summary_future.get_result()
        if result_summary.try_number:
            # It's a retry that is being expired. Keep the old state. That requires an
            # additional pipelined GET but that shouldn't be the common case.
            run_result = result_summary.run_result_key.get()
            result_summary.set_from_run_result(run_result, request)
        else:
            result_summary.state = task_result.State.EXPIRED
        result_summary.abandoned_ts = now
        result_summary.modified_ts = now

        futures = ndb.put_multi_async((to_run, result_summary))
        _maybe_pubsub_notify_via_tq(result_summary, request)
        for f in futures:
            f.check_success()

        return True

    # It'll be caught by next cron job execution in case of failure.
    try:
        success = datastore_utils.transaction(run)
    except datastore_utils.CommitError:
        success = False
    if success:
        task_to_run.set_lookup_cache(to_run_key, False)
        logging.info('Expired %s',
                     task_pack.pack_result_summary_key(result_summary_key))
    return success
Exemple #12
0
    def post(self):
        logging.error('Unexpected old client')
        data = self.parse_body()
        msg = log_unexpected_subset_keys(self._EXPECTED_DATA_KEYS,
                                         self._REQUIRED_DATA_KEYS, data,
                                         self.request, 'client',
                                         'request keys')
        if msg:
            self.abort_with_error(400, error=msg)
        data_properties = data['properties']
        msg = log_unexpected_subset_keys(self._EXPECTED_PROPERTIES_KEYS,
                                         self._REQUIRED_PROPERTIES_KEYS,
                                         data_properties, self.request,
                                         'client', 'request properties keys')
        if msg:
            self.abort_with_error(400, error=msg)

        # Class TaskProperties takes care of making everything deterministic.
        properties = task_request.TaskProperties(
            commands=data_properties['commands'],
            data=data_properties['data'],
            dimensions=data_properties['dimensions'],
            env=data_properties['env'],
            execution_timeout_secs=data_properties['execution_timeout_secs'],
            grace_period_secs=data_properties.get('grace_period_secs', 30),
            idempotent=data_properties.get('idempotent', False),
            io_timeout_secs=data_properties['io_timeout_secs'])

        now = utils.utcnow()
        expiration_ts = now + datetime.timedelta(
            seconds=data['scheduling_expiration_secs'])
        request = task_request.TaskRequest(
            created_ts=now,
            expiration_ts=expiration_ts,
            name=data['name'],
            parent_task_id=data.get('parent_task_id'),
            priority=data['priority'],
            properties=properties,
            tags=data['tags'],
            user=data['user'] or '')

        try:
            request = task_request.make_request(request, acl.is_bot_or_admin())
        except (AttributeError, datastore_errors.BadValueError, TypeError,
                ValueError) as e:
            self.abort_with_error(400, error=str(e))

        result_summary = task_scheduler.schedule_request(request)
        data = {
            'request': request.to_dict(),
            'task_id': task_pack.pack_result_summary_key(result_summary.key),
        }
        self.send_response(utils.to_json_encodable(data))
Exemple #13
0
def _expire_task(to_run_key, request):
    """Expires a TaskResultSummary and unschedules the TaskToRun.

  Returns:
    True on success.
  """
    # Look if the TaskToRun is reapable once before doing the check inside the
    # transaction. This reduces the likelihood of failing this check inside the
    # transaction, which is an order of magnitude more costly.
    if not to_run_key.get().is_reapable:
        logging.info("Not reapable anymore")
        return None

    result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
    now = utils.utcnow()

    def run():
        # 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
        to_run_future = to_run_key.get_async()
        result_summary_future = result_summary_key.get_async()
        to_run = to_run_future.get_result()
        if not to_run or not to_run.is_reapable:
            result_summary_future.wait()
            return False

        to_run.queue_number = None
        result_summary = result_summary_future.get_result()
        if result_summary.try_number:
            # It's a retry that is being expired. Keep the old state. That requires an
            # additional pipelined GET but that shouldn't be the common case.
            run_result = result_summary.run_result_key.get()
            result_summary.set_from_run_result(run_result, request)
        else:
            result_summary.state = task_result.State.EXPIRED
        result_summary.abandoned_ts = now
        result_summary.modified_ts = now

        futures = ndb.put_multi_async((to_run, result_summary))
        _maybe_pubsub_notify_via_tq(result_summary, request)
        for f in futures:
            f.check_success()

        return True

    # It'll be caught by next cron job execution in case of failure.
    try:
        success = datastore_utils.transaction(run)
    except datastore_utils.CommitError:
        success = False
    if success:
        task_to_run.set_lookup_cache(to_run_key, False)
        logging.info("Expired %s", task_pack.pack_result_summary_key(result_summary_key))
    return success
Exemple #14
0
  def post(self):
    logging.error('Unexpected old client')
    data = self.parse_body()
    msg = log_unexpected_subset_keys(
        self._EXPECTED_DATA_KEYS, self._REQUIRED_DATA_KEYS, data, self.request,
        'client', 'request keys')
    if msg:
      self.abort_with_error(400, error=msg)
    data_properties = data['properties']
    msg = log_unexpected_subset_keys(
        self._EXPECTED_PROPERTIES_KEYS, self._REQUIRED_PROPERTIES_KEYS,
        data_properties, self.request, 'client', 'request properties keys')
    if msg:
      self.abort_with_error(400, error=msg)

    # Class TaskProperties takes care of making everything deterministic.
    properties = task_request.TaskProperties(
        commands=data_properties['commands'],
        data=data_properties['data'],
        dimensions=data_properties['dimensions'],
        env=data_properties['env'],
        execution_timeout_secs=data_properties['execution_timeout_secs'],
        grace_period_secs=data_properties.get('grace_period_secs', 30),
        idempotent=data_properties.get('idempotent', False),
        io_timeout_secs=data_properties['io_timeout_secs'])

    now = utils.utcnow()
    expiration_ts = now + datetime.timedelta(
        seconds=data['scheduling_expiration_secs'])
    request = task_request.TaskRequest(
        created_ts=now,
        expiration_ts=expiration_ts,
        name=data['name'],
        parent_task_id=data.get('parent_task_id'),
        priority=data['priority'],
        properties=properties,
        tags=data['tags'],
        user=data['user'] or '')

    try:
      request = task_request.make_request(request, acl.is_bot_or_admin())
    except (
        AttributeError, datastore_errors.BadValueError, TypeError,
        ValueError) as e:
      self.abort_with_error(400, error=str(e))

    result_summary = task_scheduler.schedule_request(request)
    data = {
      'request': request.to_dict(),
      'task_id': task_pack.pack_result_summary_key(result_summary.key),
    }
    self.send_response(utils.to_json_encodable(data))
Exemple #15
0
  def post(self):
    request_data = self.parse_body()
    # If the priority is below 100, make the the user has right to do so.
    if request_data.get('priority', 255) < 100 and not acl.is_bot_or_admin():
      # Silently drop the priority of normal users.
      request_data['priority'] = 100

    try:
      request = task_request.make_request(request_data)
    except (datastore_errors.BadValueError, TypeError, ValueError) as e:
      self.abort_with_error(400, error=str(e))

    result_summary = task_scheduler.schedule_request(request)
    data = {
      'request': request.to_dict(),
      'task_id': task_pack.pack_result_summary_key(result_summary.key),
    }
    self.send_response(utils.to_json_encodable(data))
Exemple #16
0
  def post(self):
    request_data = self.parse_body()
    # If the priority is below 100, make the the user has right to do so.
    if request_data.get('priority', 255) < 100 and not acl.is_bot_or_admin():
      # Silently drop the priority of normal users.
      request_data['priority'] = 100

    try:
      request = task_request.make_request(request_data)
    except (datastore_errors.BadValueError, TypeError, ValueError) as e:
      self.abort_with_error(400, error=str(e))

    result_summary = task_scheduler.schedule_request(request)
    data = {
      'request': request.to_dict(),
      'task_id': task_pack.pack_result_summary_key(result_summary.key),
    }
    self.send_response(utils.to_json_encodable(data))
Exemple #17
0
    def terminate(self, request):
        """Asks a bot to terminate itself gracefully.

    The bot will stay in the DB, use 'delete' to remove it from the DB
    afterward. This request returns a pseudo-taskid that can be waited for to
    wait for the bot to turn down.

    This command is particularly useful when a privileged user needs to safely
    debug a machine specific issue. The user can trigger a terminate for one of
    the bot exhibiting the issue, wait for the pseudo-task to run then access
    the machine with the guarantee that the bot is not running anymore.
    """
        # TODO(maruel): Disallow a terminate task when there's one currently
        # pending or if the bot is considered 'dead', e.g. no contact since 10
        # minutes.
        logging.info('%s', request)
        bot_key = bot_management.get_info_key(request.bot_id)
        get_or_raise(bot_key)  # raises 404 if there is no such bot
        try:
            # Craft a special priority 0 task to tell the bot to shutdown.
            properties = task_request.TaskProperties(
                dimensions={u'id': request.bot_id},
                execution_timeout_secs=0,
                grace_period_secs=0,
                io_timeout_secs=0)
            now = utils.utcnow()
            request = task_request.TaskRequest(
                created_ts=now,
                expiration_ts=now + datetime.timedelta(days=1),
                name='Terminate %s' % request.bot_id,
                priority=0,
                properties=properties,
                tags=['terminate:1'],
                user=auth.get_current_identity().to_bytes())
            assert request.properties.is_terminate
            posted_request = task_request.make_request(request,
                                                       acl.is_bot_or_admin())
        except (datastore_errors.BadValueError, TypeError, ValueError) as e:
            raise endpoints.BadRequestException(e.message)

        result_summary = task_scheduler.schedule_request(posted_request)
        return swarming_rpcs.TerminateResponse(
            task_id=task_pack.pack_result_summary_key(result_summary.key))
  def terminate(self, request):
    """Asks a bot to terminate itself gracefully.

    The bot will stay in the DB, use 'delete' to remove it from the DB
    afterward. This request returns a pseudo-taskid that can be waited for to
    wait for the bot to turn down.

    This command is particularly useful when a privileged user needs to safely
    debug a machine specific issue. The user can trigger a terminate for one of
    the bot exhibiting the issue, wait for the pseudo-task to run then access
    the machine with the guarantee that the bot is not running anymore.
    """
    # TODO(maruel): Disallow a terminate task when there's one currently
    # pending or if the bot is considered 'dead', e.g. no contact since 10
    # minutes.
    logging.info('%s', request)
    bot_key = bot_management.get_info_key(request.bot_id)
    get_or_raise(bot_key)  # raises 404 if there is no such bot
    try:
      # Craft a special priority 0 task to tell the bot to shutdown.
      properties = task_request.TaskProperties(
          dimensions={u'id': request.bot_id},
          execution_timeout_secs=0,
          grace_period_secs=0,
          io_timeout_secs=0)
      now = utils.utcnow()
      request = task_request.TaskRequest(
          created_ts=now,
          expiration_ts=now + datetime.timedelta(days=1),
          name='Terminate %s' % request.bot_id,
          priority=0,
          properties=properties,
          tags=['terminate:1'],
          user=auth.get_current_identity().to_bytes())
      assert request.properties.is_terminate
      posted_request = task_request.make_request(request, acl.is_bot_or_admin())
    except (datastore_errors.BadValueError, TypeError, ValueError) as e:
      raise endpoints.BadRequestException(e.message)

    result_summary = task_scheduler.schedule_request(posted_request)
    return swarming_rpcs.TerminateResponse(
        task_id=task_pack.pack_result_summary_key(result_summary.key))
  def new(self, request):
    """Provides a TaskRequest and receives its metadata."""
    request_dict = json.loads(remote.protojson.encode_message(request))
    _transform_request(request_dict)

    # If the priority is below 100, make the the user has right to do so.
    if request_dict.get('priority', 255) < 100 and not acl.is_bot_or_admin():
      # Silently drop the priority of normal users.
      request_dict['priority'] = 100

    try:
      posted_request = task_request.make_request(request_dict)
    except (datastore_errors.BadValueError, TypeError, ValueError) as e:
      raise endpoints.BadRequestException(e.message)

    result_summary = task_scheduler.schedule_request(posted_request)
    posted_dict = utils.to_json_encodable(posted_request)
    return swarming_rpcs.TaskRequestMetadata(
        request=message_conversion.task_request_from_dict(posted_dict),
        task_id=task_pack.pack_result_summary_key(result_summary.key))
Exemple #20
0
    def new(self, request):
        """Provides a TaskRequest and receive its metadata."""
        request_dict = json.loads(remote.protojson.encode_message(request))
        _transform_request(request_dict)

        # If the priority is below 100, make the the user has right to do so.
        if request_dict.get('priority',
                            255) < 100 and not acl.is_bot_or_admin():
            # Silently drop the priority of normal users.
            request_dict['priority'] = 100

        try:
            posted_request = task_request.make_request(request_dict)
        except (datastore_errors.BadValueError, TypeError, ValueError) as e:
            raise endpoints.BadRequestException(e.message)

        result_summary = task_scheduler.schedule_request(posted_request)
        posted_dict = utils.to_json_encodable(posted_request)
        return swarming_rpcs.TaskRequestMetadata(
            request=message_conversion.task_request_from_dict(posted_dict),
            task_id=task_pack.pack_result_summary_key(result_summary.key))
Exemple #21
0
def cancel_task(request, result_key):
    """Cancels a task if possible.

  Ensures that the associated TaskToRun is canceled and updates the
  TaskResultSummary/TaskRunResult accordingly.

  Warning: ACL check must have been done before.
  """
    to_run_key = task_to_run.request_to_task_to_run_key(request)
    if result_key.kind() == 'TaskRunResult':
        result_key = task_pack.run_result_key_to_result_summary_key(result_key)
    now = utils.utcnow()

    def run():
        to_run, result_summary = ndb.get_multi((to_run_key, result_key))
        was_running = result_summary.state == task_result.State.RUNNING
        if not result_summary.can_be_canceled:
            return False, was_running
        to_run.queue_number = None
        result_summary.state = task_result.State.CANCELED
        result_summary.abandoned_ts = now
        result_summary.modified_ts = now

        futures = ndb.put_multi_async((to_run, result_summary))
        _maybe_pubsub_notify_via_tq(result_summary, request)
        for f in futures:
            f.check_success()

        return True, was_running

    try:
        ok, was_running = datastore_utils.transaction(run)
    except datastore_utils.CommitError as e:
        packed = task_pack.pack_result_summary_key(result_key)
        return 'Failed killing task %s: %s' % (packed, e)
    # Add it to the negative cache.
    task_to_run.set_lookup_cache(to_run_key, False)
    # TODO(maruel): Add stats.
    return ok, was_running
Exemple #22
0
def _maybe_pubsub_notify_now(result_summary, request):
    """Examines result_summary and sends task completion PubSub message.

  Does it only if result_summary indicates a task in some finished state and
  the request is specifying pubsub topic.

  Returns False to trigger the retry (on transient errors), or True if retry is
  not needed (e.g. messages was sent successfully or fatal error happened).
  """
    assert not ndb.in_transaction()
    assert isinstance(result_summary, task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if result_summary.state in task_result.State.STATES_NOT_RUNNING and request.pubsub_topic:
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        try:
            _pubsub_notify(task_id, request.pubsub_topic, request.pubsub_auth_token, request.pubsub_userdata)
        except pubsub.TransientError:
            logging.exception("Transient error when sending PubSub notification")
            return False
        except pubsub.Error:
            logging.exception("Fatal error when sending PubSub notification")
            return True  # do not retry it
    return True
Exemple #23
0
def add_task_entry(action, result_summary_key, **kwargs):
    """Action about a TaskRequest/TaskResultSummary."""
    assert action.startswith('task_'), action
    task_id = task_pack.pack_result_summary_key(result_summary_key)
    return add_entry(action=action, task_id=task_id, **kwargs)
Exemple #24
0
    def test_integration(self):
        # Creates a TaskRequest, along its TaskResultSummary and TaskToRun. Have a
        # bot reap the task, and complete the task. Ensure the resulting
        # TaskResultSummary and TaskRunResult are properly updated.
        request = _gen_request()
        result_summary = task_result.new_result_summary(request)
        to_run = task_to_run.new_task_to_run(request, 1, 0)
        result_summary.modified_ts = utils.utcnow()
        ndb.transaction(lambda: ndb.put_multi([result_summary, to_run]))
        expected = self._gen_summary(modified_ts=self.now)
        self.assertEqual(expected, result_summary.to_dict())

        # Nothing changed 2 secs later except latency.
        self.mock_now(self.now, 2)
        self.assertEqual(expected, result_summary.to_dict())

        # Task is reaped after 2 seconds (4 secs total).
        reap_ts = self.now + datetime.timedelta(seconds=4)
        self.mock_now(reap_ts)
        to_run.queue_number = None
        to_run.put()
        run_result = task_result.new_run_result(request, to_run, u'localhost',
                                                u'abc', {})
        run_result.started_ts = utils.utcnow()
        run_result.modified_ts = run_result.started_ts
        ndb.transaction(
            lambda: result_summary.set_from_run_result(run_result, request))
        ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
        expected = self._gen_summary(bot_dimensions={},
                                     bot_version=u'abc',
                                     bot_id=u'localhost',
                                     costs_usd=[0.],
                                     modified_ts=reap_ts,
                                     state=task_result.State.RUNNING,
                                     started_ts=reap_ts,
                                     try_number=1)
        self.assertEqual(expected, result_summary.key.get().to_dict())

        # Task completed after 2 seconds (6 secs total), the task has been running
        # for 2 seconds.
        complete_ts = self.now + datetime.timedelta(seconds=6)
        self.mock_now(complete_ts)
        run_result.completed_ts = complete_ts
        run_result.duration = 0.1
        run_result.exit_code = 0
        run_result.state = task_result.State.COMPLETED
        run_result.modified_ts = utils.utcnow()
        task_result.PerformanceStats(
            key=task_pack.run_result_key_to_performance_stats_key(
                run_result.key),
            bot_overhead=0.1,
            isolated_download=task_result.OperationStats(
                duration=0.05,
                initial_number_items=10,
                initial_size=10000,
                items_cold=large.pack([1, 2]),
                items_hot=large.pack([3, 4, 5])),
            isolated_upload=task_result.OperationStats(duration=0.01,
                                                       items_cold=large.pack(
                                                           [10]))).put()
        ndb.transaction(
            lambda: ndb.put_multi(run_result.append_output('foo', 0)))
        ndb.transaction(
            lambda: result_summary.set_from_run_result(run_result, request))
        ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
        expected = self._gen_summary(bot_dimensions={},
                                     bot_version=u'abc',
                                     bot_id=u'localhost',
                                     completed_ts=complete_ts,
                                     costs_usd=[0.],
                                     duration=0.1,
                                     exit_code=0,
                                     modified_ts=complete_ts,
                                     state=task_result.State.COMPLETED,
                                     started_ts=reap_ts,
                                     try_number=1)
        self.assertEqual(expected, result_summary.key.get().to_dict())
        expected = {
            'bot_overhead': 0.1,
            'isolated_download': {
                'duration': 0.05,
                'initial_number_items': 10,
                'initial_size': 10000,
                'items_cold': large.pack([1, 2]),
                'items_hot': large.pack([3, 4, 5]),
                'num_items_cold': 2,
                'total_bytes_items_cold': 3,
                'num_items_hot': 3,
                'total_bytes_items_hot': 12,
            },
            'isolated_upload': {
                'duration': 0.01,
                'initial_number_items': None,
                'initial_size': None,
                'items_cold': large.pack([10]),
                'items_hot': None,
                'num_items_cold': 1,
                'total_bytes_items_cold': 10,
                'num_items_hot': None,
                'total_bytes_items_hot': None,
            },
            'package_installation': {
                'duration': None,
                'initial_number_items': None,
                'initial_size': None,
                'items_cold': None,
                'items_hot': None,
                'num_items_cold': None,
                'total_bytes_items_cold': None,
                'num_items_hot': None,
                'total_bytes_items_hot': None,
            },
        }
        self.assertEqual(expected, result_summary.performance_stats.to_dict())
        self.assertEqual('foo', result_summary.get_output())
        self.assertEqual(datetime.timedelta(seconds=2),
                         result_summary.duration_as_seen_by_server)
        self.assertEqual(datetime.timedelta(seconds=0.1),
                         result_summary.duration_now(utils.utcnow()))
        self.assertEqual(datetime.timedelta(seconds=4), result_summary.pending)
        self.assertEqual(datetime.timedelta(seconds=4),
                         result_summary.pending_now(utils.utcnow()))

        self.assertEqual(task_pack.pack_result_summary_key(result_summary.key),
                         result_summary.task_id)
        self.assertEqual(complete_ts, result_summary.ended_ts)
        self.assertEqual(task_pack.pack_run_result_key(run_result.key),
                         run_result.task_id)
        self.assertEqual(complete_ts, run_result.ended_ts)
Exemple #25
0
def schedule_request(request):
    """Creates and stores all the entities to schedule a new task request.

  The number of entities created is 3: TaskRequest, TaskResultSummary and
  TaskToRun.

  The TaskRequest is saved first as a DB transaction, then TaskResultSummary and
  TaskToRun are saved as a single DB RPC. The Search index is also updated
  in-between.

  Arguments:
  - request: is in the TaskRequest entity saved in the DB.

  Returns:
    TaskResultSummary. TaskToRun is not returned.
  """
    dupe_future = None
    if request.properties.idempotent:
        # Find a previously run task that is also idempotent and completed. Start a
        # query to fetch items that can be used to dedupe the task. See the comment
        # for this property for more details.
        #
        # Do not use "cls.created_ts > oldest" here because this would require a
        # composite index. It's unnecessary because TaskRequest.key is mostly
        # equivalent to decreasing TaskRequest.created_ts, ordering by key works as
        # well and doesn't require a composite index.
        cls = task_result.TaskResultSummary
        h = request.properties.properties_hash
        dupe_future = cls.query(cls.properties_hash == h).order(cls.key).get_async()

    # At this point, the request is now in the DB but not yet in a mode where it
    # can be triggered or visible. Index it right away so it is searchable. If any
    # of remaining calls in this function fail, the TaskRequest and Search
    # Document will simply point to an incomplete task, which will be ignored.
    #
    # Creates the entities TaskToRun and TaskResultSummary but do not save them
    # yet. TaskRunResult will be created once a bot starts it.
    task = task_to_run.new_task_to_run(request)
    result_summary = task_result.new_result_summary(request)

    # Do not specify a doc_id, as they are guaranteed to be monotonically
    # increasing and searches are done in reverse order, which fits exactly the
    # created_ts ordering. This is useful because DateField is precise to the date
    # (!) and NumberField is signed 32 bits so the best it could do with EPOCH is
    # second resolution up to year 2038.
    index = search.Index(name="requests")
    packed = task_pack.pack_result_summary_key(result_summary.key)
    doc = search.Document(
        fields=[search.TextField(name="name", value=request.name), search.AtomField(name="id", value=packed)]
    )
    # Even if it fails here, we're still fine, as the task is not "alive" yet.
    search_future = index.put_async([doc])

    now = utils.utcnow()

    if dupe_future:
        # Reuse the results!
        dupe_summary = dupe_future.get_result()
        # Refuse tasks older than X days. This is due to the isolate server dropping
        # files. https://code.google.com/p/swarming/issues/detail?id=197
        oldest = now - datetime.timedelta(seconds=config.settings().reusable_task_age_secs)
        if dupe_summary and dupe_summary.created_ts > oldest:
            # If there's a bug, commenting out this block is sufficient to disable the
            # functionality.
            # Setting task.queue_number to None removes it from the scheduling.
            task.queue_number = None
            _copy_entity(dupe_summary, result_summary, ("created_ts", "name", "user", "tags"))
            result_summary.properties_hash = None
            result_summary.try_number = 0
            result_summary.cost_saved_usd = result_summary.cost_usd
            # Only zap after.
            result_summary.costs_usd = []
            result_summary.deduped_from = task_pack.pack_run_result_key(dupe_summary.run_result_key)

    # Get parent task details if applicable.
    parent_task_keys = None
    if request.parent_task_id:
        parent_run_key = task_pack.unpack_run_result_key(request.parent_task_id)
        parent_task_keys = [parent_run_key, task_pack.run_result_key_to_result_summary_key(parent_run_key)]

    result_summary.modified_ts = now

    # Storing these entities makes this task live. It is important at this point
    # that the HTTP handler returns as fast as possible, otherwise the task will
    # be run but the client will not know about it.
    def run():
        ndb.put_multi([result_summary, task])

    def run_parent():
        # This one is slower.
        items = ndb.get_multi(parent_task_keys)
        k = result_summary.task_id
        for item in items:
            item.children_task_ids.append(k)
            item.modified_ts = now
        ndb.put_multi(items)

    # Raising will abort to the caller.
    futures = [datastore_utils.transaction_async(run)]
    if parent_task_keys:
        futures.append(datastore_utils.transaction_async(run_parent))

    try:
        search_future.get_result()
    except search.Error:
        # Do not abort the task, for now search is best effort.
        logging.exception("Put failed")

    for future in futures:
        # Check for failures, it would raise in this case, aborting the call.
        future.get_result()

    stats.add_task_entry(
        "task_enqueued", result_summary.key, dimensions=request.properties.dimensions, user=request.user
    )
    return result_summary
Exemple #26
0
def cancel_task(request, result_key, kill_running):
  """Cancels a task if possible, setting it to either CANCELED or KILLED.

  Ensures that the associated TaskToRun is canceled (when pending) and updates
  the TaskResultSummary/TaskRunResult accordingly. The TaskRunResult.state is
  immediately set to KILLED for running tasks.

  Warning: ACL check must have been done before.

  Returns:
    tuple(bool, bool)
    - True if the cancelation succeeded. Either the task atomically changed
      from PENDING to CANCELED or it was RUNNING and killing bit has been set.
    - True if the task was running while it was canceled.
  """
  if result_key.kind() == 'TaskRunResult':
    # Ignore the try number. A user may ask to cancel run result 1, but if it
    # BOT_DIED, it is accepted to cancel try number #2 since the task is still
    # "pending".
    result_key = task_pack.run_result_key_to_result_summary_key(result_key)
  now = utils.utcnow()

  def run():
    """1 DB GET, 1 memcache write, 2x DB PUTs, 1x task queue."""
    # Need to get the current try number to know which TaskToRun to fetch.
    result_summary = result_key.get()
    was_running = result_summary.state == task_result.State.RUNNING
    if not result_summary.can_be_canceled:
      return False, was_running

    entities = [result_summary]
    if not was_running:
      # PENDING.
      result_summary.state = task_result.State.CANCELED
      to_run_key = task_to_run.request_to_task_to_run_key(
          request,
          result_summary.try_number or 1,
          result_summary.current_task_slice or 0)
      to_run_future = to_run_key.get_async()

      # Add it to the negative cache.
      task_to_run.set_lookup_cache(to_run_key, False)

      to_run = to_run_future.get_result()
      entities.append(to_run)
      to_run.queue_number = None
    else:
      if not kill_running:
        # Deny canceling a task that started.
        return False, was_running
      # RUNNING.
      run_result = result_summary.run_result_key.get()
      entities.append(run_result)
      # Do not change state to KILLED yet. Instead, use a 2 phase commit:
      # - set killing to True
      # - on next bot report, tell it to kill the task
      # - once the bot reports the task as terminated, set state to KILLED
      run_result.killing = True
      run_result.abandoned_ts = now
      run_result.modified_ts = now
      entities.append(run_result)
    result_summary.abandoned_ts = now
    result_summary.modified_ts = now

    futures = ndb.put_multi_async(entities)
    _maybe_pubsub_notify_via_tq(result_summary, request)
    for f in futures:
      f.check_success()
    return True, was_running

  try:
    ok, was_running = datastore_utils.transaction(run)
  except datastore_utils.CommitError as e:
    packed = task_pack.pack_result_summary_key(result_key)
    return 'Failed killing task %s: %s' % (packed, e)

  return ok, was_running
Exemple #27
0
  def test_integration(self):
    # Creates a TaskRequest, along its TaskResultSummary and TaskToRun. Have a
    # bot reap the task, and complete the task. Ensure the resulting
    # TaskResultSummary and TaskRunResult are properly updated.
    request = mkreq(_gen_request())
    result_summary = task_result.new_result_summary(request)
    to_run = task_to_run.new_task_to_run(request)
    result_summary.modified_ts = utils.utcnow()
    ndb.transaction(lambda: ndb.put_multi([result_summary, to_run]))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': None,
      'bot_id': None,
      'bot_version': None,
      'cipd_pins': None,
      'children_task_ids': [],
      'completed_ts': None,
      'costs_usd': [],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'duration': None,
      'exit_code': None,
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': self.now,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [],
      'started_ts': None,
      'state': task_result.State.PENDING,
      'try_number': None,
      'tags': [
        u'pool:default',
        u'priority:50',
        u'service_account:none',
        u'tag:1',
        u'user:Jesus',
      ],
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.to_dict())

    # Nothing changed 2 secs later except latency.
    self.mock_now(self.now, 2)
    self.assertEqual(expected, result_summary.to_dict())

    # Task is reaped after 2 seconds (4 secs total).
    reap_ts = self.now + datetime.timedelta(seconds=4)
    self.mock_now(reap_ts)
    to_run.queue_number = None
    to_run.put()
    run_result = task_result.new_run_result(request, 1, 'localhost', 'abc', {})
    run_result.modified_ts = utils.utcnow()
    result_summary.set_from_run_result(run_result, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': {},
      'bot_id': u'localhost',
      'bot_version': u'abc',
      'cipd_pins': None,
      'children_task_ids': [],
      'completed_ts': None,
      'costs_usd': [0.],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'duration': None,
      'exit_code': None,
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': reap_ts,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [u'v1a'],
      'started_ts': reap_ts,
      'state': task_result.State.RUNNING,
      'tags': [
        u'pool:default',
        u'priority:50',
        u'service_account:none',
        u'tag:1',
        u'user:Jesus',
      ],
      'try_number': 1,
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.key.get().to_dict())

    # Task completed after 2 seconds (6 secs total), the task has been running
    # for 2 seconds.
    complete_ts = self.now + datetime.timedelta(seconds=6)
    self.mock_now(complete_ts)
    run_result.completed_ts = complete_ts
    run_result.duration = 0.1
    run_result.exit_code = 0
    run_result.state = task_result.State.COMPLETED
    run_result.modified_ts = utils.utcnow()
    task_result.PerformanceStats(
        key=task_pack.run_result_key_to_performance_stats_key(run_result.key),
        bot_overhead=0.1,
        isolated_download=task_result.OperationStats(
            duration=0.05, initial_number_items=10, initial_size=10000,
            items_cold='foo', items_hot='bar'),
        isolated_upload=task_result.OperationStats(
            duration=0.01, items_cold='foo')).put()
    ndb.transaction(lambda: ndb.put_multi(run_result.append_output('foo', 0)))
    result_summary.set_from_run_result(run_result, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': {},
      'bot_id': u'localhost',
      'bot_version': u'abc',
      'cipd_pins': None,
      'children_task_ids': [],
      'completed_ts': complete_ts,
      'costs_usd': [0.],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'duration': 0.1,
      'exit_code': 0,
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': complete_ts,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [u'v1a'],
      'started_ts': reap_ts,
      'state': task_result.State.COMPLETED,
      'tags': [
        u'pool:default',
        u'priority:50',
        u'service_account:none',
        u'tag:1',
        u'user:Jesus',
      ],
      'try_number': 1,
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.key.get().to_dict())
    expected = {
      'bot_overhead': 0.1,
      'isolated_download': {
        'duration': 0.05,
        'initial_number_items': 10,
        'initial_size': 10000,
        'items_cold': 'foo',
        'items_hot': 'bar',
      },
      'isolated_upload': {
        'duration': 0.01,
        'initial_number_items': None,
        'initial_size': None,
        'items_cold': 'foo',
        'items_hot': None,
      },
      'package_installation': {
        'duration': None,
        'initial_number_items': None,
        'initial_size': None,
        'items_cold': None,
        'items_hot': None,
      },
    }
    self.assertEqual(expected, result_summary.performance_stats.to_dict())
    self.assertEqual('foo', result_summary.get_output())
    self.assertEqual(
        datetime.timedelta(seconds=2),
        result_summary.duration_as_seen_by_server)
    self.assertEqual(
        datetime.timedelta(seconds=0.1),
        result_summary.duration_now(utils.utcnow()))
    self.assertEqual(
        datetime.timedelta(seconds=4), result_summary.pending)
    self.assertEqual(
        datetime.timedelta(seconds=4),
        result_summary.pending_now(utils.utcnow()))

    self.assertEqual(
        task_pack.pack_result_summary_key(result_summary.key),
        result_summary.task_id)
    self.assertEqual(complete_ts, result_summary.ended_ts)
    self.assertEqual(
        task_pack.pack_run_result_key(run_result.key),
        run_result.task_id)
    self.assertEqual(complete_ts, run_result.ended_ts)
Exemple #28
0
def _expire_task(to_run_key, request):
  """Expires a TaskResultSummary and unschedules the TaskToRun.

  This function is only meant to process PENDING tasks.

  If a follow up TaskSlice is available, reenqueue a new TaskToRun instead of
  expiring the TaskResultSummary.

  Returns:
    TaskResultSummary on success, bool if reenqueued (due to following
    TaskSlice).
  """
  # Look if the TaskToRun is reapable once before doing the check inside the
  # transaction. This reduces the likelihood of failing this check inside the
  # transaction, which is an order of magnitude more costly.
  if not to_run_key.get().is_reapable:
    logging.info('Not reapable anymore')
    return None, None

  result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
  now = utils.utcnow()

  def run():
    # 2 concurrent GET, one PUT. Optionally with an additional serialized GET.
    to_run_future = to_run_key.get_async()
    result_summary_future = result_summary_key.get_async()
    to_run = to_run_future.get_result()
    if not to_run or not to_run.is_reapable:
      result_summary_future.get_result()
      return None, None

    # In any case, dequeue the TaskToRun.
    to_run.queue_number = None
    result_summary = result_summary_future.get_result()
    to_put = [to_run, result_summary]
    # Check if there's a TaskSlice fallback that could be reenqueued.
    new_to_run = None
    index = result_summary.current_task_slice+1
    while index < request.num_task_slices:
      dimensions = request.task_slice(index).properties.dimensions
      if _has_capacity(dimensions):
        # Enqueue a new TasktoRun for this next TaskSlice, it has capacity!
        new_to_run = task_to_run.new_task_to_run(request, 1, index)
        result_summary.current_task_slice = index
        to_put.append(new_to_run)
        break
      index += 1

    if not new_to_run:
      # There's no fallback, giving up.
      if result_summary.try_number:
        # It's a retry that is being expired, i.e. the first try had BOT_DIED.
        # Keep the old state. That requires an additional pipelined GET but that
        # shouldn't be the common case.
        run_result = result_summary.run_result_key.get()
        result_summary.set_from_run_result(run_result, request)
      else:
        result_summary.state = task_result.State.EXPIRED
      result_summary.abandoned_ts = now
    result_summary.modified_ts = now

    futures = ndb.put_multi_async(to_put)
    _maybe_pubsub_notify_via_tq(result_summary, request)
    for f in futures:
      f.check_success()

    return result_summary, new_to_run

  # Add it to the negative cache *before* running the transaction. Either way
  # the task was already reaped or the task is correctly expired and not
  # reapable.
  task_to_run.set_lookup_cache(to_run_key, False)

  # It'll be caught by next cron job execution in case of failure.
  try:
    res, r = datastore_utils.transaction(run)
  except datastore_utils.CommitError:
    res = None
    r = None
  if res:
    logging.info(
        'Expired %s', task_pack.pack_result_summary_key(result_summary_key))
    ts_mon_metrics.on_task_completed(res)
  return res, r
Exemple #29
0
    def test_integration(self):
        # Creates a TaskRequest, along its TaskResultSummary and TaskToRun. Have a
        # bot reap the task, and complete the task. Ensure the resulting
        # TaskResultSummary and TaskRunResult are properly updated.
        request = task_request.make_request(_gen_request(), True)
        result_summary = task_result.new_result_summary(request)
        to_run = task_to_run.new_task_to_run(request)
        result_summary.modified_ts = utils.utcnow()
        ndb.transaction(lambda: ndb.put_multi([result_summary, to_run]))
        expected = {
            "abandoned_ts": None,
            "bot_dimensions": None,
            "bot_id": None,
            "bot_version": None,
            "children_task_ids": [],
            "completed_ts": None,
            "costs_usd": [],
            "cost_saved_usd": None,
            "created_ts": self.now,
            "deduped_from": None,
            "durations": [],
            "exit_codes": [],
            "failure": False,
            "id": "1d69b9f088008810",
            "internal_failure": False,
            "modified_ts": self.now,
            "name": u"Request name",
            "outputs_ref": None,
            "properties_hash": None,
            "server_versions": [],
            "started_ts": None,
            "state": task_result.State.PENDING,
            "try_number": None,
            "tags": [u"priority:50", u"tag:1", u"user:Jesus"],
            "user": u"Jesus",
        }
        self.assertEqual(expected, result_summary.to_dict())

        # Nothing changed 2 secs later except latency.
        self.mock_now(self.now, 2)
        self.assertEqual(expected, result_summary.to_dict())

        # Task is reaped after 2 seconds (4 secs total).
        reap_ts = self.now + datetime.timedelta(seconds=4)
        self.mock_now(reap_ts)
        to_run.queue_number = None
        to_run.put()
        run_result = task_result.new_run_result(request, 1, "localhost", "abc", {})
        run_result.modified_ts = utils.utcnow()
        result_summary.set_from_run_result(run_result, request)
        ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
        expected = {
            "abandoned_ts": None,
            "bot_dimensions": {},
            "bot_id": u"localhost",
            "bot_version": u"abc",
            "children_task_ids": [],
            "completed_ts": None,
            "costs_usd": [0.0],
            "cost_saved_usd": None,
            "created_ts": self.now,
            "deduped_from": None,
            "durations": [],
            "exit_codes": [],
            "failure": False,
            "id": "1d69b9f088008810",
            "internal_failure": False,
            "modified_ts": reap_ts,
            "name": u"Request name",
            "outputs_ref": None,
            "properties_hash": None,
            "server_versions": [u"v1a"],
            "started_ts": reap_ts,
            "state": task_result.State.RUNNING,
            "tags": [u"priority:50", u"tag:1", u"user:Jesus"],
            "try_number": 1,
            "user": u"Jesus",
        }
        self.assertEqual(expected, result_summary.key.get().to_dict())

        # Task completed after 2 seconds (6 secs total), the task has been running
        # for 2 seconds.
        complete_ts = self.now + datetime.timedelta(seconds=6)
        self.mock_now(complete_ts)
        run_result.completed_ts = complete_ts
        run_result.exit_codes.append(0)
        run_result.state = task_result.State.COMPLETED
        run_result.modified_ts = utils.utcnow()
        ndb.transaction(lambda: ndb.put_multi(run_result.append_output(0, "foo", 0)))
        result_summary.set_from_run_result(run_result, request)
        ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
        expected = {
            "abandoned_ts": None,
            "bot_dimensions": {},
            "bot_id": u"localhost",
            "bot_version": u"abc",
            "children_task_ids": [],
            "completed_ts": complete_ts,
            "costs_usd": [0.0],
            "cost_saved_usd": None,
            "created_ts": self.now,
            "deduped_from": None,
            "durations": [],
            "exit_codes": [0],
            "failure": False,
            "id": "1d69b9f088008810",
            "internal_failure": False,
            "modified_ts": complete_ts,
            "name": u"Request name",
            "outputs_ref": None,
            "properties_hash": None,
            "server_versions": [u"v1a"],
            "started_ts": reap_ts,
            "state": task_result.State.COMPLETED,
            "tags": [u"priority:50", u"tag:1", u"user:Jesus"],
            "try_number": 1,
            "user": u"Jesus",
        }
        self.assertEqual(expected, result_summary.key.get().to_dict())
        self.assertEqual(["foo"], list(result_summary.get_outputs()))
        self.assertEqual(datetime.timedelta(seconds=2), result_summary.duration)
        self.assertEqual(datetime.timedelta(seconds=2), result_summary.duration_now(utils.utcnow()))
        self.assertEqual(datetime.timedelta(seconds=4), result_summary.pending)
        self.assertEqual(datetime.timedelta(seconds=4), result_summary.pending_now(utils.utcnow()))

        self.assertEqual(task_pack.pack_result_summary_key(result_summary.key), result_summary.task_id)
        self.assertEqual(complete_ts, result_summary.ended_ts)
        self.assertEqual(task_pack.pack_run_result_key(run_result.key), run_result.task_id)
        self.assertEqual(complete_ts, run_result.ended_ts)
Exemple #30
0
    def test_integration(self):
        # Creates a TaskRequest, along its TaskResultSummary and TaskToRun. Have a
        # bot reap the task, and complete the task. Ensure the resulting
        # TaskResultSummary and TaskRunResult are properly updated.
        request = task_request.make_request(_gen_request(), True)
        result_summary = task_result.new_result_summary(request)
        to_run = task_to_run.new_task_to_run(request)
        result_summary.modified_ts = utils.utcnow()
        ndb.transaction(lambda: ndb.put_multi([result_summary, to_run]))
        expected = {
            'abandoned_ts': None,
            'bot_dimensions': None,
            'bot_id': None,
            'bot_version': None,
            'children_task_ids': [],
            'completed_ts': None,
            'costs_usd': [],
            'cost_saved_usd': None,
            'created_ts': self.now,
            'deduped_from': None,
            'durations': [],
            'exit_codes': [],
            'failure': False,
            'id': '1d69b9f088008810',
            'internal_failure': False,
            'modified_ts': self.now,
            'name': u'Request name',
            'outputs_ref': None,
            'properties_hash': None,
            'server_versions': [],
            'started_ts': None,
            'state': task_result.State.PENDING,
            'try_number': None,
            'tags': [u'priority:50', u'tag:1', u'user:Jesus'],
            'user': u'Jesus',
        }
        self.assertEqual(expected, result_summary.to_dict())

        # Nothing changed 2 secs later except latency.
        self.mock_now(self.now, 2)
        self.assertEqual(expected, result_summary.to_dict())

        # Task is reaped after 2 seconds (4 secs total).
        reap_ts = self.now + datetime.timedelta(seconds=4)
        self.mock_now(reap_ts)
        to_run.queue_number = None
        to_run.put()
        run_result = task_result.new_run_result(request, 1, 'localhost', 'abc',
                                                {})
        run_result.modified_ts = utils.utcnow()
        result_summary.set_from_run_result(run_result, request)
        ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
        expected = {
            'abandoned_ts': None,
            'bot_dimensions': {},
            'bot_id': u'localhost',
            'bot_version': u'abc',
            'children_task_ids': [],
            'completed_ts': None,
            'costs_usd': [0.],
            'cost_saved_usd': None,
            'created_ts': self.now,
            'deduped_from': None,
            'durations': [],
            'exit_codes': [],
            'failure': False,
            'id': '1d69b9f088008810',
            'internal_failure': False,
            'modified_ts': reap_ts,
            'name': u'Request name',
            'outputs_ref': None,
            'properties_hash': None,
            'server_versions': [u'v1a'],
            'started_ts': reap_ts,
            'state': task_result.State.RUNNING,
            'tags': [u'priority:50', u'tag:1', u'user:Jesus'],
            'try_number': 1,
            'user': u'Jesus',
        }
        self.assertEqual(expected, result_summary.key.get().to_dict())

        # Task completed after 2 seconds (6 secs total), the task has been running
        # for 2 seconds.
        complete_ts = self.now + datetime.timedelta(seconds=6)
        self.mock_now(complete_ts)
        run_result.completed_ts = complete_ts
        run_result.exit_codes.append(0)
        run_result.state = task_result.State.COMPLETED
        run_result.modified_ts = utils.utcnow()
        ndb.transaction(
            lambda: ndb.put_multi(run_result.append_output(0, 'foo', 0)))
        result_summary.set_from_run_result(run_result, request)
        ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
        expected = {
            'abandoned_ts': None,
            'bot_dimensions': {},
            'bot_id': u'localhost',
            'bot_version': u'abc',
            'children_task_ids': [],
            'completed_ts': complete_ts,
            'costs_usd': [0.],
            'cost_saved_usd': None,
            'created_ts': self.now,
            'deduped_from': None,
            'durations': [],
            'exit_codes': [0],
            'failure': False,
            'id': '1d69b9f088008810',
            'internal_failure': False,
            'modified_ts': complete_ts,
            'name': u'Request name',
            'outputs_ref': None,
            'properties_hash': None,
            'server_versions': [u'v1a'],
            'started_ts': reap_ts,
            'state': task_result.State.COMPLETED,
            'tags': [u'priority:50', u'tag:1', u'user:Jesus'],
            'try_number': 1,
            'user': u'Jesus',
        }
        self.assertEqual(expected, result_summary.key.get().to_dict())
        self.assertEqual(['foo'], list(result_summary.get_outputs()))
        self.assertEqual(datetime.timedelta(seconds=2),
                         result_summary.duration_total)
        self.assertEqual(datetime.timedelta(seconds=2),
                         result_summary.duration_now(utils.utcnow()))
        self.assertEqual(datetime.timedelta(seconds=4), result_summary.pending)
        self.assertEqual(datetime.timedelta(seconds=4),
                         result_summary.pending_now(utils.utcnow()))

        self.assertEqual(task_pack.pack_result_summary_key(result_summary.key),
                         result_summary.task_id)
        self.assertEqual(complete_ts, result_summary.ended_ts)
        self.assertEqual(task_pack.pack_run_result_key(run_result.key),
                         run_result.task_id)
        self.assertEqual(complete_ts, run_result.ended_ts)
Exemple #31
0
def schedule_request(request):
  """Creates and stores all the entities to schedule a new task request.

  The number of entities created is 3: TaskRequest, TaskResultSummary and
  TaskToRun.

  The TaskRequest is saved first as a DB transaction, then TaskResultSummary and
  TaskToRun are saved as a single DB RPC. The Search index is also updated
  in-between.

  Arguments:
  - request: is in the TaskRequest entity saved in the DB.

  Returns:
    TaskResultSummary. TaskToRun is not returned.
  """
  dupe_future = None
  if request.properties.idempotent:
    # Find a previously run task that is also idempotent and completed. Start a
    # query to fetch items that can be used to dedupe the task. See the comment
    # for this property for more details.
    #
    # Do not use "cls.created_ts > oldest" here because this would require a
    # composite index. It's unnecessary because TaskRequest.key is mostly
    # equivalent to decreasing TaskRequest.created_ts, ordering by key works as
    # well and doesn't require a composite index.
    cls = task_result.TaskResultSummary
    h = request.properties.properties_hash
    dupe_future = cls.query(cls.properties_hash==h).order(cls.key).get_async()

  # At this point, the request is now in the DB but not yet in a mode where it
  # can be triggered or visible. Index it right away so it is searchable. If any
  # of remaining calls in this function fail, the TaskRequest and Search
  # Document will simply point to an incomplete task, which will be ignored.
  #
  # Creates the entities TaskToRun and TaskResultSummary but do not save them
  # yet. TaskRunResult will be created once a bot starts it.
  task = task_to_run.new_task_to_run(request)
  result_summary = task_result.new_result_summary(request)

  # Do not specify a doc_id, as they are guaranteed to be monotonically
  # increasing and searches are done in reverse order, which fits exactly the
  # created_ts ordering. This is useful because DateField is precise to the date
  # (!) and NumberField is signed 32 bits so the best it could do with EPOCH is
  # second resolution up to year 2038.
  index = search.Index(name='requests')
  packed = task_pack.pack_result_summary_key(result_summary.key)
  doc = search.Document(
      fields=[
        search.TextField(name='name', value=request.name),
        search.AtomField(name='id', value=packed),
      ])
  # Even if it fails here, we're still fine, as the task is not "alive" yet.
  search_future = index.put_async([doc])

  now = utils.utcnow()

  if dupe_future:
    # Reuse the results!
    dupe_summary = dupe_future.get_result()
    # Refuse tasks older than X days. This is due to the isolate server dropping
    # files. https://code.google.com/p/swarming/issues/detail?id=197
    oldest = now - datetime.timedelta(
        seconds=config.settings().reusable_task_age_secs)
    if dupe_summary and dupe_summary.created_ts > oldest:
      # If there's a bug, commenting out this block is sufficient to disable the
      # functionality.
      # Setting task.queue_number to None removes it from the scheduling.
      task.queue_number = None
      _copy_entity(dupe_summary, result_summary, ('created_ts', 'name', 'user'))
      result_summary.properties_hash = None
      result_summary.try_number = 0
      result_summary.cost_saved_usd = result_summary.cost_usd
      # Only zap after.
      result_summary.costs_usd = []
      result_summary.deduped_from = task_pack.pack_run_result_key(
          dupe_summary.run_result_key)

  # Get parent task details if applicable.
  parent_task_keys = None
  if request.parent_task_id:
    parent_run_key = task_pack.unpack_run_result_key(request.parent_task_id)
    parent_task_keys = [
      parent_run_key,
      task_pack.run_result_key_to_result_summary_key(parent_run_key),
    ]

  result_summary.modified_ts = now

  # Storing these entities makes this task live. It is important at this point
  # that the HTTP handler returns as fast as possible, otherwise the task will
  # be run but the client will not know about it.
  def run():
    ndb.put_multi([result_summary, task])

  def run_parent():
    # This one is slower.
    items = ndb.get_multi(parent_task_keys)
    k = result_summary.task_id
    for item in items:
      item.children_task_ids.append(k)
      item.modified_ts = now
    ndb.put_multi(items)

  # Raising will abort to the caller.
  futures = [datastore_utils.transaction_async(run)]
  if parent_task_keys:
    futures.append(datastore_utils.transaction_async(run_parent))

  try:
    search_future.get_result()
  except search.Error:
    # Do not abort the task, for now search is best effort.
    logging.exception('Put failed')

  for future in futures:
    # Check for failures, it would raise in this case, aborting the call.
    future.get_result()

  stats.add_task_entry(
      'task_enqueued', result_summary.key,
      dimensions=request.properties.dimensions,
      user=request.user)
  return result_summary
Exemple #32
0
 def task_id(self):
     return task_pack.pack_result_summary_key(self.key)
Exemple #33
0
    def new(self, request):
        """Creates a new task.

    The task will be enqueued in the tasks list and will be executed at the
    earliest opportunity by a bot that has at least the dimensions as described
    in the task request.
    """
        sb = (request.properties.secret_bytes
              if request.properties is not None else None)
        if sb is not None:
            request.properties.secret_bytes = "HIDDEN"
        logging.debug('%s', request)
        if sb is not None:
            request.properties.secret_bytes = sb

        try:
            request_obj, secret_bytes = message_conversion.new_task_request_from_rpc(
                request, utils.utcnow())
            for index in xrange(request_obj.num_task_slices):
                apply_server_property_defaults(
                    request_obj.task_slice(index).properties)
            task_request.init_new_request(
                request_obj, acl.can_schedule_high_priority_tasks())
            # We need to call the ndb.Model pre-put check earlier because the
            # following checks assume that the request itself is valid and could crash
            # otherwise.
            request_obj._pre_put_hook()
        except (datastore_errors.BadValueError, TypeError, ValueError) as e:
            logging.exception(
                'Here\'s what was wrong in the user new task request:')
            raise endpoints.BadRequestException(e.message)

        # Make sure the caller is actually allowed to schedule the task before
        # asking the token server for a service account token.
        task_scheduler.check_schedule_request_acl(request_obj)

        # If request_obj.service_account is an email, contact the token server to
        # generate "OAuth token grant" (or grab a cached one). By doing this we
        # check that the given service account usage is allowed by the token server
        # rules at the time the task is posted. This check is also performed later
        # (when running the task), when we get the actual OAuth access token.
        if service_accounts.is_service_account(request_obj.service_account):
            if not service_accounts.has_token_server():
                raise endpoints.BadRequestException(
                    'This Swarming server doesn\'t support task service accounts '
                    'because Token Server URL is not configured')
            max_lifetime_secs = request_obj.max_lifetime_secs
            try:
                # Note: this raises AuthorizationError if the user is not allowed to use
                # the requested account or service_accounts.InternalError if something
                # unexpected happens.
                duration = datetime.timedelta(seconds=max_lifetime_secs)
                request_obj.service_account_token = (
                    service_accounts.get_oauth_token_grant(
                        service_account=request_obj.service_account,
                        validity_duration=duration))
            except service_accounts.InternalError as exc:
                raise endpoints.InternalServerErrorException(exc.message)

        # If the user only wanted to evaluate scheduling the task, but not actually
        # schedule it, return early without a task_id.
        if request.evaluate_only:
            request_obj._pre_put_hook()
            return swarming_rpcs.TaskRequestMetadata(
                request=message_conversion.task_request_to_rpc(request_obj))

        try:
            result_summary = task_scheduler.schedule_request(
                request_obj, secret_bytes)
        except (datastore_errors.BadValueError, TypeError, ValueError) as e:
            raise endpoints.BadRequestException(e.message)

        previous_result = None
        if result_summary.deduped_from:
            previous_result = message_conversion.task_result_to_rpc(
                result_summary, False)

        return swarming_rpcs.TaskRequestMetadata(
            request=message_conversion.task_request_to_rpc(request_obj),
            task_id=task_pack.pack_result_summary_key(result_summary.key),
            task_result=previous_result)
Exemple #34
0
def _reap_task(bot_dimensions, bot_version, to_run_key, request):
  """Reaps a task and insert the results entity.

  Returns:
    (TaskRunResult, SecretBytes) if successful, (None, None) otherwise.
  """
  assert request.key == task_to_run.task_to_run_key_to_request_key(to_run_key)
  result_summary_key = task_pack.request_key_to_result_summary_key(request.key)
  bot_id = bot_dimensions[u'id'][0]

  now = utils.utcnow()
  # Log before the task id in case the function fails in a bad state where the
  # DB TX ran but the reply never comes to the bot. This is the worst case as
  # this leads to a task that results in BOT_DIED without ever starting. This
  # case is specifically handled in cron_handle_bot_died().
  logging.info(
      '_reap_task(%s)', task_pack.pack_result_summary_key(result_summary_key))

  def run():
    # 3 GET, 1 PUT at the end.
    to_run_future = to_run_key.get_async()
    result_summary_future = result_summary_key.get_async()
    to_run = to_run_future.get_result()
    t = request.task_slice(to_run.task_slice_index)
    if t.properties.has_secret_bytes:
      secret_bytes_future = request.secret_bytes_key.get_async()
    result_summary = result_summary_future.get_result()
    orig_summary_state = result_summary.state
    secret_bytes = None
    if t.properties.has_secret_bytes:
      secret_bytes = secret_bytes_future.get_result()
    if not to_run:
      logging.error('Missing TaskToRun?\n%s', result_summary.task_id)
      return None, None
    if not to_run.is_reapable:
      logging.info('%s is not reapable', result_summary.task_id)
      return None, None
    if result_summary.bot_id == bot_id:
      # This means two things, first it's a retry, second it's that the first
      # try failed and the retry is being reaped by the same bot. Deny that, as
      # the bot may be deeply broken and could be in a killing spree.
      # TODO(maruel): Allow retry for bot locked task using 'id' dimension.
      logging.warning(
          '%s can\'t retry its own internal failure task',
          result_summary.task_id)
      return None, None
    to_run.queue_number = None
    run_result = task_result.new_run_result(
        request, to_run, bot_id, bot_version, bot_dimensions)
    # Upon bot reap, both .started_ts and .modified_ts matches. They differ on
    # the first ping.
    run_result.started_ts = now
    run_result.modified_ts = now
    result_summary.set_from_run_result(run_result, request)
    ndb.put_multi([to_run, run_result, result_summary])
    if result_summary.state != orig_summary_state:
      _maybe_pubsub_notify_via_tq(result_summary, request)
    return run_result, secret_bytes

  # Add it to the negative cache *before* running the transaction. This will
  # inhibit concurrently readers to try to reap this task. The downside is if
  # this request fails in the middle of the transaction, the task may stay
  # unreapable for up to 15 seconds.
  if not task_to_run.set_lookup_cache(to_run_key, False):
    logging.debug('hit negative cache')
    return None, None

  try:
    run_result, secret_bytes = datastore_utils.transaction(run, retries=0)
  except datastore_utils.CommitError:
    # The challenge here is that the transaction may have failed because:
    # - The DB had an hickup and the TaskToRun, TaskRunResult and
    #   TaskResultSummary haven't been updated.
    # - The entities had been updated by a concurrent transaction on another
    #   handler so it was not reapable anyway. This does cause exceptions as
    #   both GET returns the TaskToRun.queue_number != None but only one succeed
    #   at the PUT.
    #
    # In the first case, we may want to reset the negative cache, while we don't
    # want to in the later case. The trade off are one of:
    # - negative cache is incorrectly set, so the task is not reapable for 15s
    # - resetting the negative cache would cause even more contention
    #
    # We chose the first one here for now, as the when the DB starts misbehaving
    # and the index becomes stale, it means the DB is *already* not in good
    # shape, so it is preferable to not put more stress on it, and skipping a
    # few tasks for 15s may even actively help the DB to stabilize.
    logging.info('CommitError; reaping failed')
    # The bot will reap the next available task in case of failure, no big deal.
    run_result = None
    secret_bytes = None
  return run_result, secret_bytes
Exemple #35
0
 def task_id(self):
   """Returns the TaskResultSummary packed id, not the task request key."""
   return task_pack.pack_result_summary_key(
       task_pack.request_key_to_result_summary_key(self.key))
Exemple #36
0
  def test_integration(self):
    # Creates a TaskRequest, along its TaskResultSummary and TaskToRun. Have a
    # bot reap the task, and complete the task. Ensure the resulting
    # TaskResultSummary and TaskRunResult are properly updated.
    request = task_request.make_request(_gen_request(), True)
    result_summary = task_result.new_result_summary(request)
    to_run = task_to_run.new_task_to_run(request)
    result_summary.modified_ts = utils.utcnow()
    ndb.transaction(lambda: ndb.put_multi([result_summary, to_run]))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': None,
      'bot_id': None,
      'bot_version': None,
      'children_task_ids': [],
      'completed_ts': None,
      'costs_usd': [],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'durations': [],
      'exit_codes': [],
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': self.now,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [],
      'started_ts': None,
      'state': task_result.State.PENDING,
      'try_number': None,
      'tags': [u'priority:50', u'tag:1', u'user:Jesus'],
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.to_dict())

    # Nothing changed 2 secs later except latency.
    self.mock_now(self.now, 2)
    self.assertEqual(expected, result_summary.to_dict())

    # Task is reaped after 2 seconds (4 secs total).
    reap_ts = self.now + datetime.timedelta(seconds=4)
    self.mock_now(reap_ts)
    to_run.queue_number = None
    to_run.put()
    run_result = task_result.new_run_result(request, 1, 'localhost', 'abc', {})
    run_result.modified_ts = utils.utcnow()
    result_summary.set_from_run_result(run_result, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': {},
      'bot_id': u'localhost',
      'bot_version': u'abc',
      'children_task_ids': [],
      'completed_ts': None,
      'costs_usd': [0.],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'durations': [],
      'exit_codes': [],
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': reap_ts,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [u'v1a'],
      'started_ts': reap_ts,
      'state': task_result.State.RUNNING,
      'tags': [u'priority:50', u'tag:1', u'user:Jesus'],
      'try_number': 1,
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.key.get().to_dict())

    # Task completed after 2 seconds (6 secs total), the task has been running
    # for 2 seconds.
    complete_ts = self.now + datetime.timedelta(seconds=6)
    self.mock_now(complete_ts)
    run_result.completed_ts = complete_ts
    run_result.exit_codes.append(0)
    run_result.state = task_result.State.COMPLETED
    run_result.modified_ts = utils.utcnow()
    ndb.transaction(
        lambda: ndb.put_multi(run_result.append_output(0, 'foo', 0)))
    result_summary.set_from_run_result(run_result, request)
    ndb.transaction(lambda: ndb.put_multi((result_summary, run_result)))
    expected = {
      'abandoned_ts': None,
      'bot_dimensions': {},
      'bot_id': u'localhost',
      'bot_version': u'abc',
      'children_task_ids': [],
      'completed_ts': complete_ts,
      'costs_usd': [0.],
      'cost_saved_usd': None,
      'created_ts': self.now,
      'deduped_from': None,
      'durations': [],
      'exit_codes': [0],
      'failure': False,
      'id': '1d69b9f088008810',
      'internal_failure': False,
      'modified_ts': complete_ts,
      'name': u'Request name',
      'outputs_ref': None,
      'properties_hash': None,
      'server_versions': [u'v1a'],
      'started_ts': reap_ts,
      'state': task_result.State.COMPLETED,
      'tags': [u'priority:50', u'tag:1', u'user:Jesus'],
      'try_number': 1,
      'user': u'Jesus',
    }
    self.assertEqual(expected, result_summary.key.get().to_dict())
    self.assertEqual(['foo'], list(result_summary.get_outputs()))
    self.assertEqual(
        datetime.timedelta(seconds=2), result_summary.duration_total)
    self.assertEqual(
        datetime.timedelta(seconds=2),
        result_summary.duration_now(utils.utcnow()))
    self.assertEqual(
        datetime.timedelta(seconds=4), result_summary.pending)
    self.assertEqual(
        datetime.timedelta(seconds=4),
        result_summary.pending_now(utils.utcnow()))

    self.assertEqual(
        task_pack.pack_result_summary_key(result_summary.key),
        result_summary.task_id)
    self.assertEqual(complete_ts, result_summary.ended_ts)
    self.assertEqual(
        task_pack.pack_run_result_key(run_result.key),
        run_result.task_id)
    self.assertEqual(complete_ts, run_result.ended_ts)
Exemple #37
0
def add_task_entry(action, result_summary_key, **kwargs):
  """Action about a TaskRequest/TaskResultSummary."""
  assert action.startswith('task_'), action
  task_id = task_pack.pack_result_summary_key(result_summary_key)
  return add_entry(action=action, task_id=task_id, **kwargs)
Exemple #38
0
 def task_id(self):
   return task_pack.pack_result_summary_key(self.key)