def _mint_jwt_based_token(scopes, service_account_key):
    """Creates new access token given service account private key."""
    # For more info see:
    # * https://developers.google.com/accounts/docs/OAuth2ServiceAccount.

    # JWT header.
    header_b64 = _b64_encode(
        utils.encode_to_json({
            'alg': 'RS256',
            'kid': service_account_key.private_key_id,
            'typ': 'JWT',
        }))

    # JWT claimset.
    now = int(utils.time_time())
    claimset_b64 = _b64_encode(
        utils.encode_to_json({
            'aud': 'https://www.googleapis.com/oauth2/v3/token',
            'exp': now + 3600,
            'iat': now,
            'iss': service_account_key.client_email,
            'scope': ' '.join(scopes),
        }))

    # Sign <header>.<claimset> with account's private key.
    signature_b64 = _b64_encode(
        _rsa_sign('%s.%s' % (header_b64, claimset_b64),
                  service_account_key.private_key))

    # URL encoded body of a token request.
    request_body = urllib.urlencode({
        'grant_type':
        'urn:ietf:params:oauth:grant-type:jwt-bearer',
        'assertion':
        '%s.%s.%s' % (header_b64, claimset_b64, signature_b64),
    })

    # Grab the token (with retries).
    for _ in xrange(0, 5):
        response = urlfetch.fetch(
            url='https://www.googleapis.com/oauth2/v3/token',
            payload=request_body,
            method='POST',
            headers={'Content-Type': 'application/x-www-form-urlencoded'},
            follow_redirects=False,
            deadline=10,
            validate_certificate=True)
        if response.status_code == 200:
            token = json.loads(response.content)
            return {
                'access_token': str(token['access_token']),
                'exp_ts': utils.time_time() + token['expires_in'],
            }
        logging.error('Failed to fetch access token (HTTP %d)\n%s',
                      response.status_code, response.content)

    # All retried has failed, give up.
    raise AccessTokenError(
        'Failed to fetch access token from /oauth2/v3/token')
Exemple #2
0
def scrape_logs_for_errors(start_time, end_time, module_versions):
    """Returns a list of _ErrorCategory to generate a report.

  Arguments:
    start_time: time to look for report, defaults to last email sent.
    end_time: time to end the search for error, defaults to now.
    module_versions: list of tuple of module-version to gather info about.

  Returns:
    tuple of 3 items:
      - list of _ErrorCategory that should be reported
      - list of _ErrorCategory that should be ignored
      - end_time of the last item processed if not all items were processed or
        |end_time|
  """
    # Scan for up to 9 minutes. This function is assumed to be run by a backend
    # (cron job or task queue) which has a 10 minutes deadline. This leaves ~1
    # minute to the caller to send an email and update the DB entity.
    start = utils.time_time()

    # In practice, we don't expect more than ~100 entities.
    filters = {
        e.key.string_id(): e
        for e in models.ErrorReportingMonitoring.query()
    }

    # Gather all the error categories.
    buckets = {}
    for error_record in _extract_exceptions_from_logs(start_time, end_time,
                                                      module_versions):
        bucket = buckets.setdefault(error_record.signature,
                                    _ErrorCategory(error_record.signature))
        bucket.append_error(error_record)
        # Abort, there's too much logs.
        if (utils.time_time() - start) >= 9 * 60:
            end_time = error_record.start_time
            break

    # Filter them.
    categories = []
    ignored = []
    for category in buckets.values():
        # Ignore either the exception or the signature. Signature takes precedence.
        f = filters.get(
            models.ErrorReportingMonitoring.error_to_key_id(
                category.signature))
        if not f and category.exception_type:
            f = filters.get(
                models.ErrorReportingMonitoring.error_to_key_id(
                    category.exception_type))
        if _should_ignore_error_category(f, category):
            ignored.append(category)
        else:
            categories.append(category)

    return categories, ignored, end_time
Exemple #3
0
def _mint_jwt_based_token(scopes, service_account_key):
  """Creates new access token given service account private key."""
  # For more info see:
  # * https://developers.google.com/accounts/docs/OAuth2ServiceAccount.

  # JWT header.
  header_b64 = _b64_encode(utils.encode_to_json({
    'alg': 'RS256',
    'kid': service_account_key.private_key_id,
    'typ': 'JWT',
  }))

  # JWT claimset.
  now = int(utils.time_time())
  claimset_b64 = _b64_encode(utils.encode_to_json({
    'aud': 'https://www.googleapis.com/oauth2/v3/token',
    'exp': now + 3600,
    'iat': now,
    'iss': service_account_key.client_email,
    'scope': ' '.join(scopes),
  }))

  # Sign <header>.<claimset> with account's private key.
  signature_b64 = _b64_encode(_rsa_sign(
      '%s.%s' % (header_b64, claimset_b64), service_account_key.private_key))

  # URL encoded body of a token request.
  request_body = urllib.urlencode({
    'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
    'assertion': '%s.%s.%s' % (header_b64, claimset_b64, signature_b64),
  })

  # Grab the token (with retries).
  for _ in xrange(0, 5):
    response = urlfetch.fetch(
        url='https://www.googleapis.com/oauth2/v3/token',
        payload=request_body,
        method='POST',
        headers={'Content-Type': 'application/x-www-form-urlencoded'},
        follow_redirects=False,
        deadline=10,
        validate_certificate=True)
    if response.status_code == 200:
      token = json.loads(response.content)
      return {
        'access_token': str(token['access_token']),
        'exp_ts': utils.time_time() + token['expires_in'],
      }
    logging.error(
        'Failed to fetch access token (HTTP %d)\n%s',
        response.status_code, response.content)

  # All retried has failed, give up.
  raise AccessTokenError('Failed to fetch access token from /oauth2/v3/token')
Exemple #4
0
 def make_good_jwt(self, iat=None, exp=None, nbf=OMIT):
     hdr = {'alg': 'RS256', 'kid': self.KEY}
     payload = {'aud': 'audience blah-blah'}
     if iat is not self.OMIT:
         payload['iat'] = iat or int(utils.time_time())
     if exp is not self.OMIT:
         payload['exp'] = exp or int(utils.time_time() + 3600)
     if nbf is not self.OMIT:
         payload['nbf'] = nbf or int(utils.time_time())
     jwt = self.make_jwt(hdr, payload)
     return hdr, payload, jwt
Exemple #5
0
def scrape_logs_for_errors(start_time, end_time, module_versions):
  """Returns a list of _ErrorCategory to generate a report.

  Arguments:
    start_time: time to look for report, defaults to last email sent.
    end_time: time to end the search for error, defaults to now.
    module_versions: list of tuple of module-version to gather info about.

  Returns:
    tuple of 3 items:
      - list of _ErrorCategory that should be reported
      - list of _ErrorCategory that should be ignored
      - end_time of the last item processed if not all items were processed or
        |end_time|
  """
  # Scan for up to 9 minutes. This function is assumed to be run by a backend
  # (cron job or task queue) which has a 10 minutes deadline. This leaves ~1
  # minute to the caller to send an email and update the DB entity.
  start = utils.time_time()

  # In practice, we don't expect more than ~100 entities.
  filters = {
    e.key.string_id(): e for e in models.ErrorReportingMonitoring.query()
  }

  # Gather all the error categories.
  buckets = {}
  for error_record in _extract_exceptions_from_logs(
      start_time, end_time, module_versions):
    bucket = buckets.setdefault(
        error_record.signature, _ErrorCategory(error_record.signature))
    bucket.append_error(error_record)
    # Abort, there's too much logs.
    if (utils.time_time() - start) >= 9*60:
      end_time = error_record.start_time
      break

  # Filter them.
  categories = []
  ignored = []
  for category in buckets.itervalues():
    # Ignore either the exception or the signature. Signature takes precedence.
    f = filters.get(models.ErrorReportingMonitoring.error_to_key_id(
        category.signature))
    if not f and category.exception_type:
      f = filters.get(models.ErrorReportingMonitoring.error_to_key_id(
          category.exception_type))
    if _should_ignore_error_category(f, category):
      ignored.append(category)
    else:
      categories.append(category)

  return categories, ignored, end_time
Exemple #6
0
  def test_delegation_token(self):
    # Grab a fake-signed delegation token.
    subtoken = delegation_pb2.Subtoken(
        delegated_identity='user:[email protected]',
        kind=delegation_pb2.Subtoken.BEARER_DELEGATION_TOKEN,
        audience=['*'],
        services=['*'],
        creation_time=int(utils.time_time()),
        validity_duration=3600)
    tok_pb = delegation_pb2.DelegationToken(
      serialized_subtoken=subtoken.SerializeToString(),
      signer_id='user:[email protected]',
      signing_key_id='signing-key',
      pkcs1_sha256_sig='fake-signature')
    tok = tokens.base64_encode(tok_pb.SerializeToString())

    # Valid delegation token.
    state, ctx = self.call(
        'ipv4:127.0.0.1', '*****@*****.**', {'X-Delegation-Token-V1': tok})
    self.assertEqual(state, CapturedState(
        current_identity='user:[email protected]',
        is_superuser=False,
        peer_identity='user:[email protected]',
        peer_ip=ipaddr.ip_from_string('127.0.0.1'),
        delegation_token=subtoken,
    ))

    # Invalid delegation token.
    state, ctx = self.call(
        'ipv4:127.0.0.1', '*****@*****.**', {'X-Delegation-Token-V1': tok + 'blah'})
    self.assertIsNone(state)
    self.assertEqual(ctx.code, prpclib.StatusCode.PERMISSION_DENIED)
    self.assertEqual(
        ctx.details, 'Bad delegation token: Bad proto: Truncated message.')
Exemple #7
0
def fake_subtoken_proto(delegated_identity='user:[email protected]', **kwargs):
    kwargs['delegated_identity'] = delegated_identity
    kwargs.setdefault('audience', ['*'])
    kwargs.setdefault('services', ['*'])
    kwargs.setdefault('creation_time', int(utils.time_time()))
    kwargs.setdefault('validity_duration', 3600)
    return delegation_pb2.Subtoken(**kwargs)
Exemple #8
0
    def test_updated_lease_expiration_ts(self):
        """Ensures an instance can be updated with a lease_expiration_ts."""
        now = int(utils.time_time())

        def retrieve_machine(*args, **kwargs):
            return {
                'lease_expiration_ts': str(now),
            }

        self.mock(catalog.machine_provider, 'retrieve_machine',
                  retrieve_machine)

        key = instances.get_instance_key(
            'base-name',
            'revision',
            'zone',
            'instance-name',
        )
        key = models.Instance(
            key=key,
            cataloged=True,
            instance_group_manager=instances.get_instance_group_manager_key(
                key),
        ).put()

        self.failIf(key.get().leased)
        catalog.update_cataloged_instance(key)
        self.failUnless(key.get().cataloged)
        self.assertEqual(key.get().lease_expiration_ts,
                         datetime.datetime.utcfromtimestamp(now))
        self.failUnless(key.get().leased)
        self.failIf(key.get().pending_deletion)
Exemple #9
0
  def generate_fetch_url(self, hash_algo, hash_digest):
    """Returns a signed URL that can be used to fetch an object.

    See https://developers.google.com/storage/docs/accesscontrol#Signed-URLs
    for more info about signed URLs.
    """
    assert is_valid_hash_digest(hash_algo, hash_digest)
    assert self.is_fetch_configured()

    # Generate the signature.
    gs_path = self._verified_gs_path(hash_algo, hash_digest)
    expires = str(int(utils.time_time() + FETCH_URL_EXPIRATION_SEC))
    to_sign = '\n'.join([
      'GET',
      '', # Content-MD5, not provided
      '', # Content-Type, not provided
      expires,
      gs_path,
    ])
    signature = self._rsa_sign(self._service_account_key.private_key, to_sign)

    # Generate the final URL.
    query_params = urllib.urlencode([
      ('GoogleAccessId', self._service_account_key.client_email),
      ('Expires', expires),
      ('Signature', signature),
    ])
    assert gs_path.startswith('/'), gs_path
    return 'https://storage.googleapis.com%s?%s' % (gs_path, query_params)
Exemple #10
0
    def generate_fetch_url(self, hash_algo, hash_digest):
        """Returns a signed URL that can be used to fetch an object.

    See https://developers.google.com/storage/docs/accesscontrol#Signed-URLs
    for more info about signed URLs.
    """
        assert is_valid_hash_digest(hash_algo, hash_digest)
        assert self.is_fetch_configured()

        # Generate the signature.
        gs_path = self._verified_gs_path(hash_algo, hash_digest)
        expires = str(int(utils.time_time() + FETCH_URL_EXPIRATION_SEC))
        to_sign = '\n'.join([
            'GET',
            '',  # Content-MD5, not provided
            '',  # Content-Type, not provided
            expires,
            gs_path,
        ])
        signature = self._rsa_sign(self._service_account_key.private_key,
                                   to_sign)

        # Generate the final URL.
        query_params = urllib.urlencode([
            ('GoogleAccessId', self._service_account_key.client_email),
            ('Expires', expires),
            ('Signature', signature),
        ])
        assert gs_path.startswith('/'), gs_path
        return 'https://storage.googleapis.com%s?%s' % (gs_path, query_params)
Exemple #11
0
def check_subtoken(subtoken, peer_identity):
    """Validates the delegation subtoken, extracts delegated_identity.

  Args:
    subtoken: instance of delegation_pb2.Subtoken.
    peer_identity: identity of whoever tries to use this token.

  Returns:
    Delegated Identity extracted from the token (if it is valid).

  Raises:
    BadTokenError if the token is invalid or not usable by peer_identity.
  """
    assert isinstance(subtoken, delegation_pb2.Subtoken)

    # Do fast failing checks before heavy ones.
    service_id = model.get_service_self_identity()
    check_subtoken_expiration(subtoken, int(utils.time_time()))
    check_subtoken_services(subtoken, service_id.to_bytes())

    # Verify caller can use the token, figure out a delegated identity.
    check_subtoken_audience(subtoken, peer_identity)
    try:
        return model.Identity.from_bytes(subtoken.delegated_identity)
    except ValueError as exc:
        raise BadTokenError('Invalid delegated_identity: %s' % exc)
Exemple #12
0
 def test_cant_be_used_after_exp(self):
     past = utils.time_time() - tokens.ALLOWED_CLOCK_DRIFT_SEC - 1
     _, _, jwt = self.make_good_jwt(iat=past - 3600, exp=past)
     with self.assertRaises(tokens.InvalidTokenError) as err:
         tokens.verify_jwt(jwt, self.mock_certs_bundle())
     self.assertIn('Bad JWT: expired (now 1514768461 > exp 1514768430)',
                   err.exception.message)
    def test_delegation_token(self):
        # No delegation.
        self.assertEqual(
            {
                'cur_id': 'user:[email protected]',
                'peer_id': 'user:[email protected]'
            }, self.call_with_tokens())

        # Grab a fake-signed delegation token.
        subtoken = delegation_pb2.Subtoken(
            delegated_identity='user:[email protected]',
            kind=delegation_pb2.Subtoken.BEARER_DELEGATION_TOKEN,
            audience=['*'],
            services=['*'],
            creation_time=int(utils.time_time()),
            validity_duration=3600)
        tok_pb = delegation_pb2.DelegationToken(
            serialized_subtoken=subtoken.SerializeToString(),
            signer_id='user:[email protected]',
            signing_key_id='signing-key',
            pkcs1_sha256_sig='fake-signature')
        tok = tokens.base64_encode(tok_pb.SerializeToString())

        # Valid delegation token.
        self.assertEqual(
            {
                'cur_id': 'user:[email protected]',
                'peer_id': 'user:[email protected]'
            }, self.call_with_tokens(delegation_tok=tok))

        # Invalid delegation token.
        with self.assertRaises(api.AuthorizationError):
            self.call_with_tokens(delegation_tok=tok + 'blah')
Exemple #14
0
 def test_expired(self):
     now = int(utils.time_time())
     tok = fake_subtoken_proto('user:[email protected]',
                               creation_time=now - 120,
                               validity_duration=60)
     with self.assertRaises(delegation.BadTokenError):
         delegation.check_subtoken(tok, FAKE_IDENT, api.AuthDB())
Exemple #15
0
 def test_cant_be_used_before_iat(self):
     future = utils.time_time() + tokens.ALLOWED_CLOCK_DRIFT_SEC + 1
     _, _, jwt = self.make_good_jwt(iat=future, exp=future + 3600)
     with self.assertRaises(tokens.InvalidTokenError) as err:
         tokens.verify_jwt(jwt, self.mock_certs_bundle())
     self.assertIn('Bad JWT: too early (now 1514768461 < nbf 1514768492)',
                   err.exception.message)
Exemple #16
0
def _fetch_bot_groups():
    """Loads bots.cfg and parses it into _BotGroups struct.

  If bots.cfg doesn't exist, returns default config that allows any caller from
  'bots' IP whitelist to act as a bot.

  Caches the loaded bot config internally.

  Returns:
    _BotGroups with pre-processed bots.cfg ready for serving.

  Raises:
    BadConfigError if there's no cached config and the current config at HEAD is
    not passing validation.
  """
    cfg = _cache.get_cfg_if_fresh()
    if cfg:
        logging.info('Using cached bots.cfg at rev %s', cfg.rev)
        return cfg

    with _cache.lock:
        # Maybe someone refreshed it already?
        cfg = _cache.get_cfg_if_fresh()
        if cfg:
            logging.info('Using cached bots.cfg at rev %s', cfg.rev)
            return cfg

        # Nothing is known yet? Block everyone (by holding the lock) until we get
        # a result, there's no other choice.
        known_cfg, exp = _cache.cfg_and_exp or (None, None)
        if not known_cfg:
            cfg = _do_fetch_bot_groups(None)
            _cache.set_cfg(cfg)
            return cfg

        # Someone is already refreshing the cache? Let them finish.
        if _cache.fetcher_thread is not None:
            logging.warning(
                'Using stale cached bots.cfg at rev %s while another thread is '
                'refreshing it. Cache expired %.1f sec ago.', known_cfg.rev,
                utils.time_time() - exp)
            return known_cfg

        # Ok, we'll do it, outside the lock.
        tid = threading.current_thread()
        _cache.fetcher_thread = tid

    cfg = None
    try:
        cfg = _do_fetch_bot_groups(known_cfg)
        return cfg
    finally:
        with _cache.lock:
            # 'fetcher_thread' may be different if _cache.reset() was used while we
            # were fetching. Ignore the result in this case.
            if _cache.fetcher_thread is tid:
                _cache.fetcher_thread = None
                if cfg:  # may be None on exceptions
                    _cache.set_cfg(cfg)
Exemple #17
0
    def test_delegation_token(self):
        peer_ident = model.Identity.from_bytes('user:[email protected]')

        class Handler(handler.AuthenticatingHandler):
            @classmethod
            def get_auth_methods(cls, conf):
                return [lambda _request: peer_ident]

            @api.public
            def get(self):
                self.response.write(
                    json.dumps({
                        'peer_id': api.get_peer_identity().to_bytes(),
                        'cur_id': api.get_current_identity().to_bytes(),
                    }))

        app = self.make_test_app('/request', Handler)

        def call(headers=None):
            return json.loads(app.get('/request', headers=headers).body)

        # No delegation.
        self.assertEqual(
            {
                u'cur_id': u'user:[email protected]',
                u'peer_id': u'user:[email protected]'
            }, call())

        # TODO(vadimsh): Mint token via some high-level function call.
        subtoken = delegation_pb2.Subtoken(
            delegated_identity='user:[email protected]',
            audience=['*'],
            services=['*'],
            creation_time=int(utils.time_time()),
            validity_duration=3600)
        tok = delegation.serialize_token(delegation.seal_token(subtoken))

        # With valid delegation token.
        self.assertEqual(
            {
                u'cur_id': u'user:[email protected]',
                u'peer_id': u'user:[email protected]'
            }, call({'X-Delegation-Token-V1': tok}))

        # With invalid delegation token.
        r = app.get('/request',
                    headers={'X-Delegation-Token-V1': tok + 'blah'},
                    expect_errors=True)
        self.assertEqual(403, r.status_int)

        # Transient error.
        def mocked_check(*_args):
            raise delegation.TransientError('Blah')

        self.mock(delegation, 'check_bearer_delegation_token', mocked_check)
        r = app.get('/request',
                    headers={'X-Delegation-Token-V1': tok},
                    expect_errors=True)
        self.assertEqual(500, r.status_int)
Exemple #18
0
 def test_not_active_yet(self):
     now = int(utils.time_time())
     toks = delegation_pb2.SubtokenList(subtokens=[
         fake_subtoken_proto('user:[email protected]',
                             creation_time=now + 120),
     ])
     with self.assertRaises(delegation.BadTokenError):
         delegation.check_subtoken_list(toks, FAKE_IDENT)
Exemple #19
0
 def test_expired(self):
   now = int(utils.time_time())
   toks = delegation_pb2.SubtokenList(subtokens=[
     fake_subtoken_proto(
         'user:[email protected]', creation_time=now-120, validity_duration=60),
   ])
   with self.assertRaises(delegation.BadTokenError):
     delegation.check_subtoken_list(toks, FAKE_IDENT)
Exemple #20
0
def _mint_jwt_based_token_async(scopes, signer):
  """Creates new access token given a JWT signer."""
  # For more info see:
  # * https://developers.google.com/accounts/docs/OAuth2ServiceAccount.

  # Prepare a claim set to be signed by the service account key. Note that
  # Google backends seem to ignore 'exp' field and always give one-hour long
  # tokens, so we just always request 1h long token too.
  #
  # Also revert time back a tiny bit, for the sake of machines whose time is not
  # perfectly in sync with global time. If client machine's time is in the
  # future according to Google server clock, the access token request will be
  # denied. It doesn't complain about slightly late clock though.
  logging.info(
    'Refreshing the access token for %s with scopes %s',
    signer.email, scopes)

  now = int(utils.time_time()) - 5
  jwt = yield signer.sign_claimset_async({
    'aud': 'https://www.googleapis.com/oauth2/v4/token',
    'exp': now + 3600,
    'iat': now,
    'iss': signer.email,
    'jti': _b64_encode(os.urandom(16)),
    'scope': ' '.join(scopes),
  })

  # URL encoded body of a token request.
  request_body = urllib.urlencode({
    'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
    'assertion': jwt,
  })

  # Exchange signed claimset for an access token.
  token = yield _call_async(
      url='https://www.googleapis.com/oauth2/v4/token',
      payload=request_body,
      method='POST',
      headers={
        'Accept': 'application/json',
        'Content-Type': 'application/x-www-form-urlencoded',
      })
  raise ndb.Return({
    'access_token': str(token['access_token']),
    'exp_ts': int(utils.time_time() + token['expires_in'])
  })
Exemple #21
0
 def test_not_active_yet(self):
   now = int(utils.time_time())
   toks = delegation_pb2.SubtokenList(subtokens=[
     fake_subtoken_proto(
         'user:[email protected]', creation_time=now+120),
   ])
   with self.assertRaises(delegation.BadTokenError):
     delegation.check_subtoken_list(toks, FAKE_IDENT)
 def good_body(self):
   return machine_token_pb2.MachineTokenBody(
       machine_fqdn='some-machine.host',
       issued_by='*****@*****.**',
       issued_at=int(utils.time_time()),
       lifetime=3600,
       ca_id=1,
       cert_sn=3456)
Exemple #23
0
 def test_expired(self):
     now = int(utils.time_time())
     toks = delegation_pb2.SubtokenList(subtokens=[
         fake_subtoken_proto('user:[email protected]',
                             creation_time=now - 120,
                             validity_duration=60),
     ])
     with self.assertRaises(delegation.BadTokenError):
         delegation.check_subtoken_list(toks, FAKE_IDENT)
Exemple #24
0
 def get_cfg_if_fresh(self):
     """Returns cached _BotGroups if it is still fresh or None if not."""
     # We allow this to be executed outside the lock. We assume here that when a
     # change to self.cfg_and_exp field is visible to other threads, all changes
     # to the tuple itself are also already visible. This is safe in Python,
     # there's no memory write reordering there.
     tp = self.cfg_and_exp
     if tp and tp[1] > utils.time_time():
         return tp[0]
     return None
 def _do_request(req, *args, **kwargs):
   entry = RequestLog()
   _request_logs.append(entry)
   response = None
   try:
     response = _old_request(req, *args, **kwargs)
     return response
   finally:
     entry.status = response.status_code if response else 503
     entry.response_size = response.content_length if response else 0
     entry.end_time = utils.time_time()
 def _do_request(req, *args, **kwargs):
     entry = RequestLog()
     _request_logs.append(entry)
     response = None
     try:
         response = _old_request(req, *args, **kwargs)
         return response
     finally:
         entry.status = response.status_code if response else 503
         entry.response_size = response.content_length if response else 0
         entry.end_time = utils.time_time()
  def test_delegation_token(self):
    peer_ident = model.Identity.from_bytes('user:[email protected]')

    class Handler(handler.AuthenticatingHandler):
      @classmethod
      def get_auth_methods(cls, conf):
        return [lambda _request: peer_ident]

      @api.public
      def get(self):
        self.response.write(json.dumps({
          'peer_id': api.get_peer_identity().to_bytes(),
          'cur_id': api.get_current_identity().to_bytes(),
        }))

    app = self.make_test_app('/request', Handler)
    def call(headers=None):
      return json.loads(app.get('/request', headers=headers).body)

    # No delegation.
    self.assertEqual(
        {u'cur_id': u'user:[email protected]', u'peer_id': u'user:[email protected]'}, call())

    # TODO(vadimsh): Mint token via some high-level function call.
    subtokens = delegation_pb2.SubtokenList(subtokens=[
        delegation_pb2.Subtoken(
            issuer_id='user:[email protected]',
            creation_time=int(utils.time_time()),
            validity_duration=3600),
    ])
    tok = delegation.serialize_token(delegation.seal_token(subtokens))

    # With valid delegation token.
    self.assertEqual(
        {u'cur_id': u'user:[email protected]', u'peer_id': u'user:[email protected]'},
        call({'X-Delegation-Token-V1': tok}))

    # With invalid delegation token.
    r = app.get(
        '/request',
        headers={'X-Delegation-Token-V1': tok + 'blah'},
        expect_errors=True)
    self.assertEqual(403, r.status_int)

    # Transient error.
    def mocked_check(*_args):
      raise delegation.TransientError('Blah')
    self.mock(delegation, 'check_delegation_token', mocked_check)
    r = app.get(
        '/request',
        headers={'X-Delegation-Token-V1': tok},
        expect_errors=True)
    self.assertEqual(500, r.status_int)
 def test_poll_enough_time(self):
     # Successfully poll a task.
     self.mock(random, 'getrandbits', lambda _: 0x88)
     now = datetime.datetime(2010, 1, 2, 3, 4, 5)
     self.mock_now(now)
     _, task_id = self.client_create_task_isolated()
     self.assertEqual('0', task_id[-1])
     token, params = self.get_bot_token()
     params['state']['lease_expiration_ts'] = (int(utils.time_time()) +
                                               3600 + 1200 + 3 * 30 + 10 +
                                               1)
     response = self.post_with_token('/swarming/api/v1/bot/poll', params,
                                     token)
     # Convert TaskResultSummary reference to TaskRunResult.
     task_id = task_id[:-1] + '1'
     expected = {
         u'cmd': u'run',
         u'manifest': {
             u'bot_id':
             u'bot1',
             u'command':
             None,
             u'dimensions': {
                 u'os': u'Amiga',
                 u'pool': u'default',
             },
             u'env': {},
             u'extra_args': [],
             u'hard_timeout':
             3600,
             u'grace_period':
             30,
             u'host':
             u'http://localhost:8080',
             u'inputs_ref': {
                 u'isolated': u'0123456789012345678901234567890123456789',
                 u'isolatedserver': u'http://localhost:1',
                 u'namespace': u'default-gzip',
             },
             u'io_timeout':
             1200,
             u'packages': [{
                 u'package_name':
                 u'rm',
                 u'version':
                 test_env_handlers.PINNED_PACKAGE_VERSION,
             }],
             u'task_id':
             task_id,
         },
     }
     self.assertEqual(expected, response)
Exemple #29
0
 def test_poll_enough_time(self):
   # Successfully poll a task.
   self.mock(random, 'getrandbits', lambda _: 0x88)
   now = datetime.datetime(2010, 1, 2, 3, 4, 5)
   self.mock_now(now)
   _, task_id = self.client_create_task_isolated()
   self.assertEqual('0', task_id[-1])
   params = self.do_handshake()
   params['state']['lease_expiration_ts'] = (
       int(utils.time_time()) + 3600 + 1200 + 3 * 30 + 10 + 1)
   response = self.post_json('/swarming/api/v1/bot/poll', params)
   # Convert TaskResultSummary reference to TaskRunResult.
   task_id = task_id[:-1] + '1'
   expected = {
     u'cmd': u'run',
     u'manifest': {
       u'bot_id': u'bot1',
       u'caches': [],
       u'cipd_input': {
         u'client_package': {
           u'package_name': u'infra/tools/cipd/${platform}',
           u'path': None,
           u'version': u'git_revision:deadbeef',
         },
         u'packages': [{
           u'package_name': u'rm',
           u'path': u'bin',
           u'version': u'git_revision:deadbeef',
         }],
         u'server': u'https://chrome-infra-packages.appspot.com',
       },
       u'command': None,
       u'dimensions': {
         u'os': u'Amiga',
         u'pool': u'default',
       },
       u'env': {},
       u'extra_args': [],
       u'hard_timeout': 3600,
       u'grace_period': 30,
       u'host': u'http://localhost:8080',
       u'isolated': {
         u'input': u'0123456789012345678901234567890123456789',
         u'server': u'http://localhost:1',
         u'namespace': u'default-gzip',
       },
       u'io_timeout': 1200,
       u'service_account': u'none',
       u'task_id': task_id,
     },
   }
   self.assertEqual(expected, response)
def _check_and_log_token(flavor, account_email, token):
    """Checks the lifetime and logs details about the generated access token."""
    expires_in = token.expiry - utils.time_time()
    logging.info(
        'Got %s access token: email=%s, fingerprint=%s, expiry=%d, expiry_in=%d',
        flavor, account_email, utils.get_token_fingerprint(token.access_token),
        token.expiry, expires_in)
    # Give 2 min of wiggle room to account for various effects related to
    # relativity of clocks (us vs Google backends that produce the token) and
    # indeterminism of network propagation delays. 2 min should be more than
    # enough to account for them. These asserts should never be hit.
    assert expires_in < MAX_TOKEN_LIFETIME_SEC + 60
    assert expires_in > MIN_TOKEN_LIFETIME_SEC - 60
Exemple #31
0
    def post(self):
        # Forbid usage of delegation tokens for this particular call. Using
        # delegation when creating delegation tokens is too deep. Redelegation will
        # be done as separate explicit API call that accept existing delegation
        # token via request body, not via headers.
        if auth.get_current_identity() != auth.get_peer_identity():
            raise auth.AuthorizationError(
                'This API call must not be used with active delegation token')

        # Convert request body to proto (with validation). Verify IP format.
        try:
            body = self.parse_body()
            subtoken = subtoken_from_jsonish(body)
            intent = body.get('intent') or ''
            if not isinstance(intent, basestring):
                raise TypeError('"intent" must be string')
        except (TypeError, ValueError) as exc:
            self.abort_with_error(400, text=str(exc))

        # Fill in defaults.
        assert not subtoken.requestor_identity
        user_id = auth.get_current_identity().to_bytes()
        subtoken.requestor_identity = user_id
        if not subtoken.delegated_identity:
            subtoken.delegated_identity = user_id
        subtoken.creation_time = int(utils.time_time())
        if not subtoken.validity_duration:
            subtoken.validity_duration = DEF_VALIDITY_DURATION_SEC
        if '*' in subtoken.services:
            subtoken.services[:] = get_default_allowed_services(user_id)

        # Check ACL (raises auth.AuthorizationError on errors).
        rule = check_can_create_token(user_id, subtoken)

        # Register the token in the datastore, generate its ID.
        subtoken.subtoken_id = register_subtoken(subtoken, rule, intent,
                                                 auth.get_peer_ip())

        # Create and sign the token.
        try:
            token = delegation.serialize_token(delegation.seal_token(subtoken))
        except delegation.BadTokenError as exc:
            # This happens if resulting token is too large.
            self.abort_with_error(400, text=str(exc))

        self.send_response(response={
            'delegation_token': token,
            'subtoken_id': str(subtoken.subtoken_id),
            'validity_duration': subtoken.validity_duration,
        },
                           http_code=201)
Exemple #32
0
    def test_delegation_token(self):
        call = self.make_test_app_with_peer('user:[email protected]')

        # No delegation.
        self.assertEqual(
            {
                'status': 200,
                'body': {
                    u'cur_id': u'user:[email protected]',
                    u'peer_id': u'user:[email protected]',
                },
            }, call())

        # Grab a fake-signed delegation token.
        subtoken = delegation_pb2.Subtoken(
            delegated_identity='user:[email protected]',
            kind=delegation_pb2.Subtoken.BEARER_DELEGATION_TOKEN,
            audience=['*'],
            services=['*'],
            creation_time=int(utils.time_time()),
            validity_duration=3600)
        tok_pb = delegation_pb2.DelegationToken(
            serialized_subtoken=subtoken.SerializeToString(),
            signer_id='user:[email protected]',
            signing_key_id='signing-key',
            pkcs1_sha256_sig='fake-signature')
        tok = b64.encode(tok_pb.SerializeToString())

        # With valid delegation token.
        self.assertEqual(
            {
                'status': 200,
                'body': {
                    u'cur_id': u'user:[email protected]',
                    u'peer_id': u'user:[email protected]',
                },
            }, call({'X-Delegation-Token-V1': tok}))

        # With invalid delegation token.
        resp = call({'X-Delegation-Token-V1': tok + 'blah'})
        self.assertEqual(403, resp['status'])
        self.assertIn('Bad delegation token', resp['body'])

        # Transient error.
        def mocked_check(*_args):
            raise delegation.TransientError('Blah')

        self.mock(delegation, 'check_bearer_delegation_token', mocked_check)
        resp = call({'X-Delegation-Token-V1': tok})
        self.assertEqual(500, resp['status'])
        self.assertIn('Blah', resp['body'])
Exemple #33
0
    def test_yield_entries(self):
        stats_framework_logs_mock.reset_timestamp(self.h, self.now)

        self.assertEqual(0, len(list(stats_logs.yield_entries(None, None))))
        self.assertEqual(0, len(list(stats_logs.yield_entries(1,
                                                              time.time()))))

        self.assertEqual('Yay', self.app.get('/generate').body)

        self.assertEqual(1, len(list(stats_logs.yield_entries(None, None))))
        self.assertEqual(1, len(list(stats_logs.yield_entries(1,
                                                              time.time()))))
        self.assertEqual(
            0, len(list(stats_logs.yield_entries(None, utils.time_time()))))
Exemple #34
0
    def setUp(self):
        """Initializes the commonly used stubs.

    Using init_all_stubs() costs ~10ms more to run all the tests so only enable
    the ones known to be required. Test cases requiring more stubs can enable
    them in their setUp() function.
    """
        super(TestCase, self).setUp()
        self.testbed = testbed.Testbed()
        self.testbed.activate()

        # If you have a NeedIndexError, here is the switch you need to flip to make
        # the new required indexes to be automatically added. Change
        # train_index_yaml to True to have index.yaml automatically updated, then
        # run your test case. Do not forget to put it back to False.
        train_index_yaml = False

        if self.SKIP_INDEX_YAML_CHECK:
            # See comment for skip_index_yaml_check above.
            self.assertIsNone(self.APP_DIR)

        self.testbed.init_app_identity_stub()
        self.testbed.init_datastore_v3_stub(
            require_indexes=not train_index_yaml
            and not self.SKIP_INDEX_YAML_CHECK,
            root_path=self.APP_DIR,
            consistency_policy=datastore_stub_util.
            PseudoRandomHRConsistencyPolicy(probability=1))
        self.testbed.init_logservice_stub()
        self.testbed.init_memcache_stub()
        self.testbed.init_modules_stub()

        # Use mocked time in memcache.
        memcache = self.testbed.get_stub(testbed.MEMCACHE_SERVICE_NAME)
        memcache._gettime = lambda: int(utils.time_time())

        # Email support.
        self.testbed.init_mail_stub()
        self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
        self.old_send_to_admins = self.mock(self.mail_stub,
                                            '_Dynamic_SendToAdmins',
                                            self._SendToAdmins)

        self.testbed.init_taskqueue_stub()
        self._taskqueue_stub = self.testbed.get_stub(
            testbed.TASKQUEUE_SERVICE_NAME)
        self._taskqueue_stub._root_path = self.APP_DIR

        self.testbed.init_user_stub()
Exemple #35
0
def _get_dev_server_token():
  """Reads token from DevServerAccessToken entity."""
  assert utils.is_local_dev_server()
  token = DevServerAccessToken.get_or_insert('access_token')

  # Dump token URL to log, so that it easy to find and change it.
  edit_url = 'http://localhost:8000/datastore/edit/%s' % token.key.urlsafe()
  logging.info('Using token from %s', edit_url)

  if not token.access_token:
    raise AccessTokenError(
        'Dev server access token is not initialized: %s' % edit_url)

  # Fake expiration time as 5 min from now.
  return token.access_token, utils.time_time() + 300
Exemple #36
0
def _get_dev_server_token():
    """Reads token from DevServerAccessToken entity."""
    assert utils.is_local_dev_server()
    token = DevServerAccessToken.get_or_insert('access_token')

    # Dump token URL to log, so that it easy to find and change it.
    edit_url = 'http://localhost:8000/datastore/edit/%s' % token.key.urlsafe()
    logging.info('Using token from %s', edit_url)

    if not token.access_token:
        raise AccessTokenError(
            'Dev server access token is not initialized: %s' % edit_url)

    # Fake expiration time as 5 min from now.
    return token.access_token, utils.time_time() + 300
Exemple #37
0
  def post(self):
    # Forbid usage of delegation tokens for this particular call. Using
    # delegation when creating delegation tokens is too deep. Redelegation will
    # be done as separate explicit API call that accept existing delegation
    # token via request body, not via headers.
    if auth.get_current_identity() != auth.get_peer_identity():
      raise auth.AuthorizationError(
          'This API call must not be used with active delegation token')

    # Convert request body to proto (with validation).
    try:
      subtoken = subtoken_from_jsonish(self.parse_body())
    except (TypeError, ValueError) as exc:
      self.abort_with_error(400, text=str(exc))

    # Fill in defaults.
    assert not subtoken.impersonator_id
    user_id = auth.get_current_identity().to_bytes()
    if not subtoken.issuer_id:
      subtoken.issuer_id = user_id
    if subtoken.issuer_id != user_id:
      subtoken.impersonator_id = user_id
    subtoken.creation_time = int(utils.time_time())
    if not subtoken.validity_duration:
      subtoken.validity_duration = DEF_VALIDITY_DURATION_SEC
    if not subtoken.services or '*' in subtoken.services:
      subtoken.services[:] = get_default_allowed_services(user_id)

    # Check ACL (raises auth.AuthorizationError on errors).
    check_can_create_token(user_id, subtoken)

    # Create and sign the token.
    try:
      token = delegation.serialize_token(
          delegation.seal_token(
              delegation_pb2.SubtokenList(subtokens=[subtoken])))
    except delegation.BadTokenError as exc:
      # This happens if resulting token is too large.
      self.abort_with_error(400, text=str(exc))

    self.send_response(
        response={
          'delegation_token': token,
          'validity_duration': subtoken.validity_duration,
        },
        http_code=201)
  def test_yield_entries(self):
    stats_framework_mock.reset_timestamp(self.h, self.now)

    self.assertEqual(
        0, len(list(stats_framework.yield_entries(None, None))))
    self.assertEqual(
        0, len(list(stats_framework.yield_entries(1, time.time()))))

    self.assertEqual('Yay', self.app.get('/generate').body)

    self.assertEqual(
        1, len(list(stats_framework.yield_entries(None, None))))
    self.assertEqual(
        1, len(list(stats_framework.yield_entries(1, time.time()))))
    self.assertEqual(
        0, len(list(stats_framework.yield_entries(
          None, utils.time_time()))))
  def test_token(self):
    calls = []
    def mocked(**kwargs):
      calls.append(kwargs)
      return 'fake-token', utils.time_time() + 3600
    self.mock(auth, 'get_access_token', mocked)

    tok = service_accounts.AccessToken('fake-token', utils.time_time() + 3600)
    self.assertEqual(
        ('*****@*****.**', tok),
        service_accounts.get_system_account_token('*****@*****.**', ['scope']))

    self.assertEqual([{
        'act_as': '*****@*****.**',
        'min_lifetime_sec': service_accounts.MIN_TOKEN_LIFETIME_SEC,
        'scopes': ['scope'],
    }], calls)
Exemple #40
0
    def post(self):
        # Forbid usage of delegation tokens for this particular call. Using
        # delegation when creating delegation tokens is too deep. Redelegation will
        # be done as separate explicit API call that accept existing delegation
        # token via request body, not via headers.
        if auth.get_current_identity() != auth.get_peer_identity():
            raise auth.AuthorizationError(
                'This API call must not be used with active delegation token')

        # Convert request body to proto (with validation).
        try:
            subtoken = subtoken_from_jsonish(self.parse_body())
        except (TypeError, ValueError) as exc:
            self.abort_with_error(400, text=str(exc))

        # Fill in defaults.
        assert not subtoken.impersonator_id
        user_id = auth.get_current_identity().to_bytes()
        if not subtoken.issuer_id:
            subtoken.issuer_id = user_id
        if subtoken.issuer_id != user_id:
            subtoken.impersonator_id = user_id
        subtoken.creation_time = int(utils.time_time())
        if not subtoken.validity_duration:
            subtoken.validity_duration = DEF_VALIDITY_DURATION_SEC
        if not subtoken.services or '*' in subtoken.services:
            subtoken.services[:] = get_default_allowed_services(user_id)

        # Check ACL (raises auth.AuthorizationError on errors).
        check_can_create_token(user_id, subtoken)

        # Create and sign the token.
        try:
            token = delegation.serialize_token(
                delegation.seal_token(
                    delegation_pb2.SubtokenList(subtokens=[subtoken])))
        except delegation.BadTokenError as exc:
            # This happens if resulting token is too large.
            self.abort_with_error(400, text=str(exc))

        self.send_response(response={
            'delegation_token': token,
            'validity_duration': subtoken.validity_duration,
        },
                           http_code=201)
Exemple #41
0
def _get_jwt_based_token(scopes, service_account_key):
  """Returns token for @developer.gserviceaccount.com service account."""
  # Derive memcache key from scopes and private_key_id.
  if isinstance(scopes, basestring):
    scopes = [scopes]
  assert all('@' not in scope for scope in scopes), scopes
  assert '@' not in service_account_key.private_key_id, service_account_key
  cache_key = 'access_token@%s@%s' % (
      ' '.join(scopes), service_account_key.private_key_id)

  # Randomize refresh time to avoid thundering herd effect when token expires.
  token_info = memcache.get(cache_key)
  should_refresh = (
      token_info is None or
      token_info['exp_ts'] - utils.time_time() < random.randint(300, 600))
  if should_refresh:
    token_info = _mint_jwt_based_token(scopes, service_account_key)
    memcache.set(cache_key, token_info, token_info['exp_ts'])
  return token_info['access_token'], token_info['exp_ts']
 def test_poll_enough_time(self):
   # Successfully poll a task.
   self.mock(random, 'getrandbits', lambda _: 0x88)
   now = datetime.datetime(2010, 1, 2, 3, 4, 5)
   self.mock_now(now)
   _, task_id = self.client_create_task_isolated()
   self.assertEqual('0', task_id[-1])
   token, params = self.get_bot_token()
   params['state']['lease_expiration_ts'] = (
       int(utils.time_time()) + 3600 + 1200 + 3 * 30 + 10 + 1)
   response = self.post_with_token('/swarming/api/v1/bot/poll', params, token)
   # Convert TaskResultSummary reference to TaskRunResult.
   task_id = task_id[:-1] + '1'
   expected = {
     u'cmd': u'run',
     u'manifest': {
       u'bot_id': u'bot1',
       u'command': None,
       u'dimensions': {
         u'os': u'Amiga',
         u'pool': u'default',
       },
       u'env': {},
       u'extra_args': [],
       u'hard_timeout': 3600,
       u'grace_period': 30,
       u'host': u'http://localhost:8080',
       u'inputs_ref': {
         u'isolated': u'0123456789012345678901234567890123456789',
         u'isolatedserver': u'http://localhost:1',
         u'namespace': u'default-gzip',
       },
       u'io_timeout': 1200,
       u'packages': [{
         u'package_name': u'rm',
         u'version': test_env_handlers.PINNED_PACKAGE_VERSION,
       }],
       u'task_id': task_id,
     },
   }
   self.assertEqual(expected, response)
 def test_yield_next_available_task_to_run_task_meets_deadline(self):
   request_dimensions = {
     u'OS': u'Windows-3.1.1',
     u'hostname': u'localhost',
     u'foo': u'bar',
     u'pool': u'default',
   }
   _gen_new_task_to_run(
       properties=dict(dimensions=request_dimensions))
   # Bot declares exactly same dimensions so it matches.
   bot_dimensions = request_dimensions
   actual = _yield_next_available_task_to_dispatch(
       bot_dimensions, utils.time_time() + 86400 + 600 + 3 * 30 + 10 + 1)
   expected = [
     {
       'dimensions_hash': _hash_dimensions(request_dimensions),
       'expiration_ts': self.expiration_ts,
       'queue_number': '0x000a890b67ba1346',
     },
   ]
   self.assertEqual(expected, actual)
Exemple #44
0
def check_subtoken_list(subtokens, peer_identity):
  """Validates the chain of delegation subtokens, extracts original issuer_id.

  Args:
    subtokens: instance of delegation_pb2.SubtokenList with at least one token.
    peer_identity: identity of whoever tries to use this token chain.

  Returns:
    Delegated Identity extracted from the token chain (if it is valid).

  Raises:
    BadTokenError if token chain is invalid or not usable by peer_identity.
  """
  assert isinstance(subtokens, delegation_pb2.SubtokenList)
  toks = subtokens.subtokens
  if not toks:
    raise BadTokenError('Subtoken list is empty')
  if len(toks) > MAX_SUBTOKEN_LIST_LEN:
    raise BadTokenError(
        'Subtoken list is too long (%d tokens, max is %d)' %
        (len(toks), MAX_SUBTOKEN_LIST_LEN))

  # Do fast failing checks before heavy ones.
  now = int(utils.time_time())
  service_id = model.get_service_self_identity().to_bytes()
  for tok in toks:
    check_subtoken_expiration(tok, now)
    check_subtoken_services(tok, service_id)

  # Figure out delegated identity by following delegation chain.
  current_identity = peer_identity
  for tok in reversed(toks):
    check_subtoken_audience(tok, current_identity)
    try:
      current_identity = model.Identity.from_bytes(tok.issuer_id)
    except ValueError as exc:
      raise BadTokenError('Invalid issuer_id: %s' % exc)
  return current_identity
Exemple #45
0
  def generate(cls, message=None, embedded=None, expiration_sec=None):
    """Generates a token that contains MAC tag for |message|.

    Args:
      message: single string or list of strings to tag with MAC. It should be
          the same as one used to validate the token. It's not embedded into the
          token. See also 'validate' below.
      embedded: dict with additional data to add to token. It is embedded
          directly into the token and can be easily extracted from it by anyone
          who has the token. Should be used only for publicly visible data.
          It is tagged by token's MAC, so 'validate' function can detect
          any modifications (and reject tokens tampered with).
      expiration_sec: how long token lives before considered expired, overrides
          default TokenKind.expiration_sec if present.

    Returns:
      URL safe base64 encoded token.
    """
    if not cls.is_configured():
      raise ValueError('Token parameters are invalid ')

    # Convert all 'unicode' strings to 'str' in appropriate encoding.
    message = normalize_message(message) if message is not None else []
    embedded = normalize_embedded(embedded) if embedded else {}

    # Fetch an array of last values of secret key.
    secret = api.get_secret(cls.secret_key)
    assert secret

    # Append 'issued' timestamp (in milliseconds) and expiration time.
    embedded['_i'] = str(int(utils.time_time() * 1000))
    if expiration_sec is not None:
      assert expiration_sec > 0, expiration_sec
      embedded['_x'] = str(int(expiration_sec * 1000))

    # Encode token using most recent secret key value.
    return encode_token(cls.algo, cls.version, secret[0], message, embedded)
  def test_delegation_token(self):
    def call(tok=None):
      headers = {'X-Delegation-Token-V1': tok} if tok else None
      self.call('127.0.0.1', '*****@*****.**', headers)
      return {
        'cur_id': api.get_current_identity().to_bytes(),
        'peer_id': api.get_current_identity().to_bytes(),
      }

    # No delegation.
    self.assertEqual(
        {'cur_id': 'user:[email protected]', 'peer_id': 'user:[email protected]'}, call())

    # TODO(vadimsh): Mint token via some high-level function call.
    subtokens = delegation_pb2.SubtokenList(subtokens=[
        delegation_pb2.Subtoken(
            issuer_id='user:[email protected]',
            creation_time=int(utils.time_time()),
            validity_duration=3600),
    ])
    tok = delegation.serialize_token(delegation.seal_token(subtokens))

    # Valid delegation token.
    self.assertEqual(
        {'cur_id': 'user:[email protected]', 'peer_id': 'user:[email protected]'},
        call(tok))

    # Invalid delegation token.
    with self.assertRaises(api.AuthorizationError):
      call(tok + 'blah')

    # Transient error.
    def mocked_check(*_args):
      raise delegation.TransientError('Blah')
    self.mock(delegation, 'check_delegation_token', mocked_check)
    with self.assertRaises(endpoints.InternalServerErrorException):
      call(tok)
Exemple #47
0
def yield_next_available_task_to_dispatch(bot_dimensions, deadline):
  """Yields next available (TaskRequest, TaskToRun) in decreasing order of
  priority.

  Once the caller determines the task is suitable to execute, it must use
  reap_task_to_run(task.key) to mark that it is not to be scheduled anymore.

  Performance is the top most priority here.

  Arguments:
  - bot_dimensions: dimensions (as a dict) defined by the bot that can be
      matched.
  - deadline: UTC timestamp (as an int) that the bot must be able to
      complete the task by. None if there is no such deadline.
  """
  # List of all the valid dimensions hashed.
  accepted_dimensions_hash = frozenset(
      _hash_dimensions(utils.encode_to_json(i))
      for i in _powerset(bot_dimensions))
  now = utils.utcnow()
  broken = 0
  cache_lookup = 0
  expired = 0
  hash_mismatch = 0
  ignored = 0
  no_queue = 0
  real_mismatch = 0
  too_long = 0
  total = 0
  # Be very aggressive in fetching the largest amount of items as possible. Note
  # that we use the default ndb.EVENTUAL_CONSISTENCY so stale items may be
  # returned. It's handled specifically.
  # - 100/200 gives 2s~40s of query time for 1275 items.
  # - 250/500 gives 2s~50s of query time for 1275 items.
  # - 50/500 gives 3s~20s of query time for 1275 items. (Slower but less
  #   variance). Spikes in 20s~40s are rarer.
  # The problem here are:
  # - Outliers, some shards are simply slower at executing the query.
  # - Median time, which we should optimize.
  # - Abusing batching will slow down this query.
  #
  # TODO(maruel): Measure query performance with stats_framework!!
  # TODO(maruel): Use fetch_page_async() + ndb.get_multi_async() +
  # memcache.get_multi_async() to do pipelined processing. Should greatly reduce
  # the effect of latency on the total duration of this function. I also suspect
  # using ndb.get_multi() will return fresher objects than what is returned by
  # the query.
  opts = ndb.QueryOptions(batch_size=50, prefetch_size=500, keys_only=True)
  try:
    # Interestingly, the filter on .queue_number>0 is required otherwise all the
    # None items are returned first.
    q = TaskToRun.query(default_options=opts).order(
        TaskToRun.queue_number).filter(TaskToRun.queue_number > 0)
    for task_key in q:
      duration = (utils.utcnow() - now).total_seconds()
      if duration > 40.:
        # Stop searching after too long, since the odds of the request blowing
        # up right after succeeding in reaping a task is not worth the dangling
        # task request that will stay in limbo until the cron job reaps it and
        # retry it. The current handlers are given 60s to complete. By using
        # 40s, it gives 20s to complete the reaping and complete the HTTP
        # request.
        return

      total += 1
      # Verify TaskToRun is what is expected. Play defensive here.
      try:
        validate_to_run_key(task_key)
      except ValueError as e:
        logging.error(str(e))
        broken += 1
        continue

      # integer_id() == dimensions_hash.
      if task_key.integer_id() not in accepted_dimensions_hash:
        hash_mismatch += 1
        continue

      # Do this after the basic weeding out but before fetching TaskRequest.
      if _lookup_cache_is_taken(task_key):
        cache_lookup += 1
        continue

      # Ok, it's now worth taking a real look at the entity.
      task = task_key.get(use_cache=False)

      # DB operations are slow, double check memcache again.
      if _lookup_cache_is_taken(task_key):
        cache_lookup += 1
        continue

      # It is possible for the index to be inconsistent since it is not executed
      # in a transaction, no problem.
      if not task.queue_number:
        no_queue += 1
        continue

      # It expired. A cron job will cancel it eventually. Since 'now' is saved
      # before the query, an expired task may still be reaped even if
      # technically expired if the query is very slow. This is on purpose so
      # slow queries do not cause exagerate expirations.
      if task.expiration_ts < now:
        expired += 1
        continue

      # The hash may have conflicts. Ensure the dimensions actually match by
      # verifying the TaskRequest. There's a probability of 2**-31 of conflicts,
      # which is low enough for our purpose. The reason use_cache=False is
      # otherwise it'll create a buffer bloat.
      request = task.request_key.get(use_cache=False)
      if not match_dimensions(request.properties.dimensions, bot_dimensions):
        real_mismatch += 1
        continue

      # If the bot has a deadline, don't allow it to reap the task unless it can
      # be completed before the deadline. We have to assume the task takes the
      # theoretical maximum amount of time possible, which is governed by
      # execution_timeout_secs. An isolated task's download phase is not subject
      # to this limit, so we need to add io_timeout_secs. When a task is
      # signalled that it's about to be killed, it receives a grace period as
      # well. grace_period_secs is given by run_isolated to the task execution
      # process, by task_runner to run_isolated, and by bot_main to the
      # task_runner. Lastly, add a few seconds to account for any overhead.
      if deadline is not None:
        if not request.properties.execution_timeout_secs:
          # Task never times out, so it cannot be accepted.
          too_long += 1
          continue
        max_task_time = (utils.time_time() +
                         request.properties.execution_timeout_secs +
                         (request.properties.io_timeout_secs or 600) +
                         3 * (request.properties.grace_period_secs or 30) +
                         10)
        if deadline <= max_task_time:
          too_long += 1
          continue

      # It's a valid task! Note that in the meantime, another bot may have
      # reaped it.
      yield request, task
      ignored += 1
  finally:
    duration = (utils.utcnow() - now).total_seconds()
    logging.info(
        '%d/%s in %5.2fs: %d total, %d exp %d no_queue, %d hash mismatch, '
        '%d cache negative, %d dimensions mismatch, %d ignored, %d broken, '
        '%d not executable by deadline (UTC %s)',
        opts.batch_size,
        opts.prefetch_size,
        duration,
        total,
        expired,
        no_queue,
        hash_mismatch,
        cache_lookup,
        real_mismatch,
        ignored,
        broken,
        too_long,
        deadline)
 def test_get_dev_server_token_present(self):
   service_account.DevServerAccessToken(
       id='access_token', access_token='blah').put()
   token, exp = service_account._get_dev_server_token()
   self.assertEqual('blah', token)
   self.assertGreaterEqual(exp, utils.time_time())
Exemple #49
0
def fake_subtoken_proto(issuer_id, **kwargs):
  kwargs['issuer_id'] = issuer_id
  kwargs.setdefault('creation_time', int(utils.time_time()))
  kwargs.setdefault('validity_duration', 3600)
  return delegation_pb2.Subtoken(**kwargs)
Exemple #50
0
  def validate(cls, token, message=None):
    """Checks token MAC and expiration, decodes data embedded into it.

    The following holds:
      token = TokenKind.generate(some_message, token_data)
      assert TokenKind.validate(token, some_message) == token_data

    Args:
      token: a token produced by 'generate' call.
      message: single string or list of strings that should be the same as one
          used to generate the token. If it's different, the token is considered
          invalid. It usually contains some implicitly passed state that should
          be the same when token is generated and validated. For example, it may
          be an account ID of current caller. Then if such token is used by
          another account, it is considered invalid.

    Returns:
      A dict with public data embedded into the token.

    Raises:
      InvalidTokenError if token is broken, tampered with or expired.
    """
    if not cls.is_configured():
      raise ValueError('Token parameters are invalid ')

    # Convert all 'unicode' strings to 'str' in appropriate encoding.
    token = to_encoding(token, 'ascii')
    message = normalize_message(message) if message is not None else []

    # Fetch an array of last values of secret key.
    secret = api.get_secret(cls.secret_key)
    assert secret

    # Decode token, use any recent value of secret to validate MAC.
    version, embedded = decode_token(cls.algo, token, secret, message)

    # Versions should match.
    if version != cls.version:
      raise InvalidTokenError(
          'Bad token format version - expected %r, got %r' %
          (cls.version, version))

    # Grab a timestamp (in milliseconds) when token was issued.
    issued_ts = embedded.pop('_i', None)
    if issued_ts is None:
      raise InvalidTokenError('Bad token: missing issued timestamp')
    issued_ts = int(issued_ts)

    # Discard tokens from the future. Someone is messing with the clock.
    now = utils.time_time() * 1000
    if issued_ts > now + ALLOWED_CLOCK_DRIFT_SEC * 1000:
      raise InvalidTokenError('Bad token: issued timestamp is in the future')

    # Grab expiration time embedded into the token, if any.
    expiration_msec = embedded.pop('_x', None)
    if expiration_msec is None:
      expiration_msec = cls.expiration_sec * 1000
    else:
      expiration_msec = int(expiration_msec)
      assert expiration_msec > 0, expiration_msec

    # Check token expiration.
    if now > issued_ts + expiration_msec:
      raise InvalidTokenError('Bad token: expired')

    return embedded
 def fake_mint_token(*args):
   calls.append(args)
   return {
     'access_token': 'token@%d' % utils.time_time(),
     'exp_ts': utils.time_time() + 3600,
   }
 def fake_get(key):
   if key not in memcache or memcache[key][1] < utils.time_time():
     return None
   return memcache[key][0]