Пример #1
0
class PackageRepositoryApi(remote.Service):
  """Package Repository API."""

  # Cached value of 'service' property.
  _service = None

  @property
  def service(self):
    """Returns configured impl.RepoService."""
    if self._service is None:
      self._service = impl.get_repo_service()
      if self._service is None or not self._service.is_fetch_configured():
        raise endpoints.InternalServerErrorException(
            'Service is not configured')
    return self._service

  def get_instance(self, package_name, instance_id):
    """Grabs PackageInstance or raises appropriate *NotFoundError."""
    instance = self.service.get_instance(package_name, instance_id)
    if instance is None:
      pkg = self.service.get_package(package_name)
      if pkg is None:
        raise PackageNotFoundError()
      raise InstanceNotFoundError()
    return instance

  def verify_instance_exists(self, package_name, instance_id):
    """Raises appropriate *NotFoundError if instance is missing."""
    self.get_instance(package_name, instance_id)

  def verify_instance_is_ready(self, package_name, instance_id):
    """Raises appropriate error if instance doesn't exist or not ready yet.

    Instance is ready when all processors successfully finished.
    """
    instance = self.get_instance(package_name, instance_id)
    if instance.processors_failure:
      raise ProcessingFailedError(
          'Failed processors: %s' % ', '.join(instance.processors_failure))
    if instance.processors_pending:
      raise ProcessingNotFinishedYetError(
          'Pending processors: %s' % ', '.join(instance.processors_pending))


  ### Package methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          with_refs=messages.BooleanField(2, required=False)),
      PackageResponse,
      http_method='GET',
      path='package',
      name='fetchPackage')
  @auth.public  # ACL check is inside
  def fetch_package(self, request):
    """Returns information about a package."""
    package_name = validate_package_name(request.package_name)

    caller = auth.get_current_identity()
    if not acl.can_fetch_package(package_name, caller):
      raise auth.AuthorizationError()

    pkg = self.service.get_package(package_name)
    if pkg is None:
      raise PackageNotFoundError()

    refs = []
    if request.with_refs:
      refs = self.service.query_package_refs(package_name)

    return PackageResponse(
        package=package_to_proto(pkg),
        refs=[package_ref_to_proto(r) for r in refs])


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True)),
      PackageResponse,
      http_method='POST',
      path='package/hidden',
      name='hidePackage')
  @auth.public  # ACL check is inside
  def hide_package(self, request):
    """Marks the package as hidden, it disappears from listPackages output."""
    return self.set_package_hidden(request.package_name, True)


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True)),
      PackageResponse,
      http_method='DELETE',
      path='package/hidden',
      name='unhidePackage')
  @auth.public  # ACL check is inside
  def unhide_package(self, request):
    """Marks the package as visible, the reverse of hidePackage."""
    return self.set_package_hidden(request.package_name, False)


  def set_package_hidden(self, package_name, hidden):
    """Common implementation for hide_package and unhide_package."""
    package_name = validate_package_name(package_name)

    caller = auth.get_current_identity()
    if not acl.can_modify_hidden(package_name, caller):
      raise auth.AuthorizationError()

    def mutation(pkg):
      if pkg.hidden == hidden:
        return False
      pkg.hidden = hidden
      return True

    pkg = self.service.modify_package(package_name, mutation)
    if pkg is None:
      raise PackageNotFoundError()

    return PackageResponse(package=package_to_proto(pkg))


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          path=messages.StringField(1, required=False),
          recursive=messages.BooleanField(2, required=False),
          show_hidden=messages.BooleanField(3, required=False)),
      ListPackagesResponse,
      http_method='GET',
      path='package/search',
      name='listPackages')
  @auth.public  # ACL check is inside
  def list_packages(self, request):
    """Returns packages in the given directory and possibly subdirectories."""
    path = request.path or ''
    recursive = request.recursive or False
    show_hidden = request.show_hidden or False

    pkgs, dirs = self.service.list_packages(path, recursive, show_hidden)
    caller = auth.get_current_identity()
    visible_pkgs = [p for p in pkgs if acl.can_fetch_package(p, caller)]
    visible_dirs = [d for d in dirs if acl.can_fetch_package(d, caller)]

    return ListPackagesResponse(packages=visible_pkgs, directories=visible_dirs)

  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True)),
      DeletePackageResponse,
      http_method='DELETE',
      path='package',
      name='deletePackage')
  @auth.public  # ACL check is inside
  def delete_package(self, request):
    """Deletes a package along with all its instances."""
    package_name = validate_package_name(request.package_name)

    caller = auth.get_current_identity()
    if not acl.can_delete_package(package_name, caller):
      raise auth.AuthorizationError()

    deleted = self.service.delete_package(package_name)
    if not deleted:
      raise PackageNotFoundError()
    return DeletePackageResponse()


  ### PackageInstance methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True)),
      FetchInstanceResponse,
      http_method='GET',
      path='instance',
      name='fetchInstance')
  @auth.public  # ACL check is inside
  def fetch_instance(self, request):
    """Returns signed URL that can be used to fetch a package instance."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)

    caller = auth.get_current_identity()
    if not acl.can_fetch_instance(package_name, caller):
      raise auth.AuthorizationError()

    instance = self.get_instance(package_name, instance_id)
    return FetchInstanceResponse(
        instance=instance_to_proto(instance),
        fetch_url=self.service.generate_fetch_url(instance),
        processors=processors_protos(instance))

  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True)),
      RegisterInstanceResponse,
      path='instance',
      http_method='POST',
      name='registerInstance')
  @auth.public  # ACL check is inside
  def register_instance(self, request):
    """Registers a new package instance in the repository."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)

    caller = auth.get_current_identity()
    if not acl.can_register_instance(package_name, caller):
      raise auth.AuthorizationError()

    instance = self.service.get_instance(package_name, instance_id)
    if instance is not None:
      return RegisterInstanceResponse(
          status=Status.ALREADY_REGISTERED,
          instance=instance_to_proto(instance))

    # Need to upload to CAS first? Open an upload session. Caller must use
    # CASServiceApi to finish the upload and then call registerInstance again.
    if not self.service.is_instance_file_uploaded(package_name, instance_id):
      upload_url, upload_session_id = self.service.create_upload_session(
          package_name, instance_id, caller)
      return RegisterInstanceResponse(
          status=Status.UPLOAD_FIRST,
          upload_session_id=upload_session_id,
          upload_url=upload_url)

    # Package data is in the store. Make an entity.
    instance, registered = self.service.register_instance(
        package_name=package_name,
        instance_id=instance_id,
        caller=caller,
        now=utils.utcnow())
    return RegisterInstanceResponse(
        status=Status.REGISTERED if registered else Status.ALREADY_REGISTERED,
        instance=instance_to_proto(instance))


  ### Refs methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          SetRefRequest,
          package_name=messages.StringField(1, required=True),
          ref=messages.StringField(2, required=True)),
      SetRefResponse,
      path='ref',
      http_method='POST',
      name='setRef')
  @auth.public  # ACL check is inside
  def set_ref(self, request):
    """Creates a ref or moves an existing one."""
    package_name = validate_package_name(request.package_name)
    ref = validate_package_ref(request.ref)
    instance_id = validate_instance_id(request.instance_id)

    caller = auth.get_current_identity()
    if not acl.can_move_ref(package_name, ref, caller):
      raise auth.AuthorizationError('Not authorized to move "%s"' % ref)
    self.verify_instance_is_ready(package_name, instance_id)

    ref_entity = self.service.set_package_ref(
        package_name=package_name,
        ref=ref,
        instance_id=instance_id,
        caller=caller,
        now=utils.utcnow())
    return SetRefResponse(ref=package_ref_to_proto(ref_entity))

  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True),
          ref=messages.StringField(3, repeated=True)),
      FetchRefsResponse,
      path='ref',
      http_method='GET',
      name='fetchRefs')
  @auth.public  # ACL check is inside
  def fetch_refs(self, request):
    """Lists package instance refs (newest first)."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)
    refs = validate_package_ref_list(request.ref) if request.ref else None

    caller = auth.get_current_identity()
    if not acl.can_fetch_instance(package_name, caller):
      raise auth.AuthorizationError()
    self.verify_instance_exists(package_name, instance_id)

    if not refs:
      # Fetch all.
      output = self.service.query_instance_refs(package_name, instance_id)
    else:
      # Fetch selected refs, pick ones pointing to the instance.
      output = [
        r
        for r in self.service.get_package_refs(package_name, refs).itervalues()
        if r and r.instance_id == instance_id
      ]
      output.sort(key=lambda r: r.modified_ts, reverse=True)

    return FetchRefsResponse(refs=[package_ref_to_proto(ref) for ref in output])


  ### Tags methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True),
          tag=messages.StringField(3, repeated=True)),
      FetchTagsResponse,
      path='tags',
      http_method='GET',
      name='fetchTags')
  @auth.public  # ACL check is inside
  def fetch_tags(self, request):
    """Lists package instance tags (newest first)."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)
    tags = validate_instance_tag_list(request.tag) if request.tag else None

    caller = auth.get_current_identity()
    if not acl.can_fetch_instance(package_name, caller):
      raise auth.AuthorizationError()
    self.verify_instance_exists(package_name, instance_id)

    if not tags:
      # Fetch all.
      attached = self.service.query_tags(package_name, instance_id)
    else:
      # Fetch selected only. "Is tagged by?" check essentially.
      found = self.service.get_tags(package_name, instance_id, tags)
      attached = [found[tag] for tag in tags if found[tag]]
      attached.sort(key=lambda t: t.registered_ts, reverse=True)

    return FetchTagsResponse(tags=[tag_to_proto(tag) for tag in attached])

  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          AttachTagsRequest,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True)),
      AttachTagsResponse,
      path='tags',
      http_method='POST',
      name='attachTags')
  @auth.public  # ACL check is inside
  def attach_tags(self, request):
    """Attaches a set of tags to a package instance."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)
    tags = validate_instance_tag_list(request.tags)

    caller = auth.get_current_identity()
    for tag in tags:
      if not acl.can_attach_tag(package_name, tag, caller):
        raise auth.AuthorizationError('Not authorized to attach "%s"' % tag)
    self.verify_instance_is_ready(package_name, instance_id)

    attached = self.service.attach_tags(
        package_name=package_name,
        instance_id=instance_id,
        tags=tags,
        caller=caller,
        now=utils.utcnow())
    return AttachTagsResponse(tags=[tag_to_proto(attached[t]) for t in tags])

  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True),
          tag=messages.StringField(3, repeated=True)),
      DetachTagsResponse,
      path='tags',
      http_method='DELETE',
      name='detachTags')
  @auth.public  # ACL check is inside
  def detach_tags(self, request):
    """Removes given tags from a package instance."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)
    tags = validate_instance_tag_list(request.tag)

    caller = auth.get_current_identity()
    for tag in tags:
      if not acl.can_detach_tag(package_name, tag, caller):
        raise auth.AuthorizationError('Not authorized to detach "%s"' % tag)
    self.verify_instance_exists(package_name, instance_id)

    self.service.detach_tags(
        package_name=package_name,
        instance_id=instance_id,
        tags=tags)
    return DetachTagsResponse()


  ### Search methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          tag=messages.StringField(1, required=True),
          package_name=messages.StringField(2, required=False)),
      SearchResponse,
      path='instance/search',
      http_method='GET',
      name='searchInstances')
  @auth.public  # ACL check is inside
  def search_instances(self, request):
    """Returns package instances with given tag (in no particular order)."""
    tag = validate_instance_tag(request.tag)
    if request.package_name:
      package_name = validate_package_name(request.package_name)
    else:
      package_name = None

    caller = auth.get_current_identity()
    callback = None
    if package_name:
      # If search is limited to one package, check its ACL only once.
      if not acl.can_fetch_instance(package_name, caller):
        raise auth.AuthorizationError()
    else:
      # Filter out packages not allowed by ACL.
      acl_cache = {}
      def check_readable(package_name, _instance_id):
        if package_name not in acl_cache:
          acl_cache[package_name] = acl.can_fetch_instance(package_name, caller)
        return acl_cache[package_name]
      callback = check_readable

    found = self.service.search_by_tag(tag, package_name, callback)
    return SearchResponse(instances=[instance_to_proto(i) for i in found])


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          version=messages.StringField(2, required=True)),
      ResolveVersionResponse,
      path='instance/resolve',
      http_method='GET',
      name='resolveVersion')
  @auth.public  # ACL check is inside
  def resolve_version(self, request):
    """Returns instance ID of an existing instance given a ref or a tag."""
    package_name = validate_package_name(request.package_name)
    version = validate_instance_version(request.version)

    caller = auth.get_current_identity()
    if not acl.can_fetch_instance(package_name, caller):
      raise auth.AuthorizationError()

    pkg = self.service.get_package(package_name)
    if pkg is None:
      raise PackageNotFoundError()

    ids = self.service.resolve_version(package_name, version, limit=2)
    if not ids:
      raise InstanceNotFoundError()
    if len(ids) > 1:
      return ResolveVersionResponse(
          status=Status.AMBIGUOUS_VERSION,
          error_message='More than one instance has tag "%s" set' % version)
    return ResolveVersionResponse(instance_id=ids[0])


  ### ACL methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_path=messages.StringField(1, required=True)),
      FetchACLResponse,
      http_method='GET',
      path='acl',
      name='fetchACL')
  @auth.public  # ACL check is inside
  def fetch_acl(self, request):
    """Returns access control list for a given package path."""
    package_path = validate_package_path(request.package_path)

    caller = auth.get_current_identity()
    if not acl.can_fetch_acl(package_path, caller):
      raise auth.AuthorizationError()

    return FetchACLResponse(
        acls=package_acls_to_proto({
          role: acl.get_package_acls(package_path, role)
          for role in acl.ROLES
        }))

  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          ModifyACLRequest,
          package_path=messages.StringField(1, required=True)),
      ModifyACLResponse,
      http_method='POST',
      path='acl',
      name='modifyACL')
  @auth.public  # ACL check is inside
  def modify_acl(self, request):
    """Changes access control list for a given package path."""
    package_path = validate_package_path(request.package_path)

    try:
      changes = [
        role_change_from_proto(msg, package_path)
        for msg in request.changes
      ]
    except ValueError as exc:
      raise ValidationError('Invalid role change request: %s' % exc)

    caller = auth.get_current_identity()
    if not acl.can_modify_acl(package_path, caller):
      raise auth.AuthorizationError()

    # Apply changes. Do not catch ValueError. Validation above should be
    # sufficient. If it is not, HTTP 500 and an uncaught exception in logs is
    # exactly what is needed.
    acl.modify_roles(changes, caller, utils.utcnow())
    return ModifyACLResponse()


  ### ClientBinary methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True)),
      FetchClientBinaryResponse,
      http_method='GET',
      path='client',
      name='fetchClientBinary')
  @auth.public  # ACL check is inside
  def fetch_client_binary(self, request):
    """Returns signed URL that can be used to fetch CIPD client binary."""
    package_name = validate_package_name(request.package_name)
    if not client.is_cipd_client_package(package_name):
      raise ValidationError('Not a CIPD client package')
    instance_id = validate_instance_id(request.instance_id)

    caller = auth.get_current_identity()
    if not acl.can_fetch_instance(package_name, caller):
      raise auth.AuthorizationError()

    # Grab the location of the extracted binary.
    instance = self.get_instance(package_name, instance_id)
    client_info, error_message = self.service.get_client_binary_info(instance)
    if error_message:
      raise Error(error_message)
    if client_info is None:
      return FetchClientBinaryResponse(
        status=Status.NOT_EXTRACTED_YET,
        instance=instance_to_proto(instance))

    return FetchClientBinaryResponse(
        instance=instance_to_proto(instance),
        client_binary=FetchClientBinaryResponse.ClientBinary(
            sha1=client_info.sha1,
            size=client_info.size,
            fetch_url=client_info.fetch_url,
            file_name=client.get_cipd_client_filename(package_name)))


  # Counter methods.


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          IncrementCounterRequest,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True),
          counter_name=messages.StringField(3, required=True)),
      IncrementCounterResponse,
      path='counter',
      http_method='POST',
      name='incrementCounter')
  @auth.public  # ACL check is inside
  def increment_counter(self, request):
    """Increments a counter on a package instance."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)
    counter_name = validate_counter_name(request.counter_name)
    delta = request.delta

    if delta not in (0, 1):
      raise ValidationError('Delta must be either 0 or 1')

    caller = auth.get_current_identity()
    if not acl.can_modify_counter(package_name, caller):
      raise auth.AuthorizationError()
    self.verify_instance_exists(package_name, instance_id)

    self.service.increment_counter(
        package_name=package_name,
        instance_id=instance_id,
        counter_name=counter_name,
        delta=delta)
    return IncrementCounterResponse()


  @gae_ts_mon.instrument_endpoint()
  @endpoints_method(
      endpoints.ResourceContainer(
          message_types.VoidMessage,
          package_name=messages.StringField(1, required=True),
          instance_id=messages.StringField(2, required=True),
          counter_name=messages.StringField(3, required=True)),
      ReadCounterResponse,
      path='counter',
      http_method='GET',
      name='readCounter')
  @auth.public  # ACL check is inside
  def read_counter(self, request):
    """Increments a counter on a package instance."""
    package_name = validate_package_name(request.package_name)
    instance_id = validate_instance_id(request.instance_id)
    counter_name = validate_counter_name(request.counter_name)

    caller = auth.get_current_identity()
    if not acl.can_read_counter(package_name, caller):
      raise auth.AuthorizationError()
    self.verify_instance_exists(package_name, instance_id)

    counter = self.service.read_counter(
        package_name=package_name,
        instance_id=instance_id,
        counter_name=counter_name)

    response = ReadCounterResponse(value=counter.value)
    if counter.created_ts is not None:
      response.created_ts = utils.datetime_to_timestamp(counter.created_ts)
    if counter.updated_ts is not None:
      response.updated_ts = utils.datetime_to_timestamp(counter.updated_ts)
    return response
Пример #2
0
class ScoreForm(messages.Message):
    """ScoreForm for outbound Score information"""
    user_name = messages.StringField(1, required=True)
    date = messages.StringField(2, required=True)
    won = messages.BooleanField(3, required=True)
    tied = messages.BooleanField(4, required=True)
Пример #3
0
 class Config(messages.Message):
     classes = messages.BooleanField(1, default=False)
     class_name = messages.StringField(2, default='code')
     highlighter = messages.StringField(3, default='pygments')
Пример #4
0
class ProjectListResponse(DjangoProtoRPCMessage):
    """ProtoRPC message definition to represent a list of stored projects."""
    items = messages.MessageField(ProjectResponseMessageSlim, 1, repeated=True)
    is_list = messages.BooleanField(2)
Пример #5
0
 class Config(messages.Message):
     path = messages.StringField(1)
     id = messages.StringField(2)
     convert = messages.BooleanField(3)
Пример #6
0
class PushPing(messages.Message):
  """Indicates whether data storage executed successfully."""
  ok = messages.BooleanField(1)
Пример #7
0
class StandardQueryParameters(_messages.Message):
    """Query parameters accepted by all methods.

  Enums:
    FXgafvValueValuesEnum: V1 error format.
    AltValueValuesEnum: Data format for response.

  Fields:
    f__xgafv: V1 error format.
    access_token: OAuth access token.
    alt: Data format for response.
    bearer_token: OAuth bearer token.
    callback: JSONP
    fields: Selector specifying which fields to include in a partial response.
    key: API key. Your API key identifies your project and provides you with
      API access, quota, and reports. Required unless you provide an OAuth 2.0
      token.
    oauth_token: OAuth 2.0 token for the current user.
    pp: Pretty-print response.
    prettyPrint: Returns response with indentations and line breaks.
    quotaUser: Available to use for quota purposes for server-side
      applications. Can be any arbitrary string assigned to a user, but should
      not exceed 40 characters.
    trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
      include in api requests.
    uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
    upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
  """
    class AltValueValuesEnum(_messages.Enum):
        """Data format for response.

    Values:
      json: Responses with Content-Type of application/json
      media: Media download with context-dependent Content-Type
      proto: Responses with Content-Type of application/x-protobuf
    """
        json = 0
        media = 1
        proto = 2

    class FXgafvValueValuesEnum(_messages.Enum):
        """V1 error format.

    Values:
      _1: v1 error format
      _2: v2 error format
    """
        _1 = 0
        _2 = 1

    f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
    access_token = _messages.StringField(2)
    alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
    bearer_token = _messages.StringField(4)
    callback = _messages.StringField(5)
    fields = _messages.StringField(6)
    key = _messages.StringField(7)
    oauth_token = _messages.StringField(8)
    pp = _messages.BooleanField(9, default=True)
    prettyPrint = _messages.BooleanField(10, default=True)
    quotaUser = _messages.StringField(11)
    trace = _messages.StringField(12)
    uploadType = _messages.StringField(13)
    upload_protocol = _messages.StringField(14)
Пример #8
0
class Response(messages.Message):
    message = messages.StringField(1)
    success = messages.BooleanField(2)
    errors = messages.StringField(3)
    token = messages.StringField(4)
Пример #9
0
class Cluster(_messages.Message):
  """A Cluster object.

  Enums:
    StatusValueValuesEnum: [Output only] The current status of this cluster.

  Fields:
    clusterApiVersion: The API version of the Kubernetes master and kubelets
      running in this cluster. Leave blank to pick up the latest stable
      release, or specify a version of the form "x.y.z". The Google Container
      Engine release notes lists the currently supported versions. If an
      incorrect version is specified, the server returns an error listing the
      currently supported versions.
    containerIpv4Cidr: The IP address range of the container pods in this
      cluster, in  CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one
      automatically chosen or specify a /14 block in 10.0.0.0/8 or
      172.16.0.0/12.
    creationTimestamp: [Output only] The time the cluster was created, in
      RFC3339 text format.
    description: An optional description of this cluster.
    enableCloudLogging: Whether logs from the cluster should be made available
      via the Google Cloud Logging service. This includes both logs from your
      applications running in the cluster as well as logs from the Kubernetes
      components themselves.
    enableCloudMonitoring: Whether metrics from the cluster should be made
      available via the Google Cloud Monitoring service.
    endpoint: [Output only] The IP address of this cluster's Kubernetes
      master. The endpoint can be accessed from the internet at
      https://username:password@endpoint/.  See the masterAuth property of
      this resource for username and password information.
    instanceGroupUrls: [Output only] The resource URLs of [instance
      groups](/compute/docs/instance-groups/) associated with this cluster.
    masterAuth: The authentication information for accessing the master.
    name: The name of this cluster. The name must be unique within this
      project and zone, and can be up to 40 characters with the following
      restrictions:   - Lowercase letters, numbers, and hyphens only. - Must
      start with a letter. - Must end with a number or a letter.
    network: The name of the Google Compute Engine network to which the
      cluster is connected.
    nodeConfig: The machine type and image to use for all nodes in this
      cluster. See the descriptions of the child properties of nodeConfig.
    nodeRoutingPrefixSize: [Output only] The size of the address space on each
      node for hosting containers.
    numNodes: The number of nodes to create in this cluster. You must ensure
      that your Compute Engine resource quota is sufficient for this number of
      instances plus one (to include the master). You must also have available
      firewall and routes quota.
    selfLink: [Output only] Server-defined URL for the resource.
    servicesIpv4Cidr: [Output only] The IP address range of the Kubernetes
      services in this cluster, in  CIDR notation (e.g. 1.2.3.4/29). Service
      addresses are typically put in the last /16 from the container CIDR.
    status: [Output only] The current status of this cluster.
    statusMessage: [Output only] Additional information about the current
      status of this cluster, if available.
    zone: [Output only] The name of the Google Compute Engine zone in which
      the cluster resides.
  """

  class StatusValueValuesEnum(_messages.Enum):
    """[Output only] The current status of this cluster.

    Values:
      error: <no description>
      provisioning: <no description>
      running: <no description>
      stopping: <no description>
    """
    error = 0
    provisioning = 1
    running = 2
    stopping = 3

  clusterApiVersion = _messages.StringField(1)
  containerIpv4Cidr = _messages.StringField(2)
  creationTimestamp = _messages.StringField(3)
  description = _messages.StringField(4)
  enableCloudLogging = _messages.BooleanField(5)
  enableCloudMonitoring = _messages.BooleanField(6)
  endpoint = _messages.StringField(7)
  instanceGroupUrls = _messages.StringField(8, repeated=True)
  masterAuth = _messages.MessageField('MasterAuth', 9)
  name = _messages.StringField(10)
  network = _messages.StringField(11)
  nodeConfig = _messages.MessageField('NodeConfig', 12)
  nodeRoutingPrefixSize = _messages.IntegerField(13, variant=_messages.Variant.INT32)
  numNodes = _messages.IntegerField(14, variant=_messages.Variant.INT32)
  selfLink = _messages.StringField(15)
  servicesIpv4Cidr = _messages.StringField(16)
  status = _messages.EnumField('StatusValueValuesEnum', 17)
  statusMessage = _messages.StringField(18)
  zone = _messages.StringField(19)
Пример #10
0
class ScoreForm(messages.Message):
    """Outbound, score information"""
    user_name = messages.StringField(1, required=True)
    date = messages.StringField(2, required=True)
    won = messages.BooleanField(3, required=True)
    score = messages.IntegerField(4, required=True)
Пример #11
0
                   AllGameHistory, PlayerHandForm, AllUserScores, MoveForm)
from models.game import (Game, faces)
from models.player import Player
from models.move import Move
from models.user import User
from utils import (get_by_urlsafe, check_user_exists, get_player_by_game)

NEW_GAME_REQUEST = endpoints.ResourceContainer(
    player1=messages.StringField(1, required=True),
    player2=messages.StringField(2, required=True),
    cards_dealt=messages.IntegerField(3, required=False),
    matches_to_win=messages.IntegerField(4, required=False))

USER_GAMES_REQUEST = endpoints.ResourceContainer(
    username=messages.StringField(1, required=True),
    active_only=messages.BooleanField(2, required=True))

GET_GAME_REQUEST = endpoints.ResourceContainer(
    urlsafe_game_key=messages.StringField(1))

GET_ALL_GAMES_REQUEST = endpoints.ResourceContainer(
    active_only=messages.BooleanField(1, required=False))

CANCEL_GAME_REQUEST = endpoints.ResourceContainer(
    urlsafe_game_key=messages.StringField(1))

HAND_REQUEST = endpoints.ResourceContainer(
    urlsafe_game_key=messages.StringField(1, required=True),
    username=messages.StringField(2, required=True))

USER_REQUEST = endpoints.ResourceContainer(username=messages.StringField(
Пример #12
0
class PerformanceStatsRequest(messages.Message):
    # Only applicable when fetching results. This incurs more DB operations and
    # more data is returned so this is a bit slower.
    include_performance_stats = messages.BooleanField(1, default=False)
class ConfigSetApi(remote.Service):
    """A handler for Config Set API."""
    def _ConvertFromMessage(self, msg):
        """Convert a message to a config set info."""
        return mtt_messages.Convert(msg,
                                    ndb_models.ConfigSetInfo,
                                    from_cls=mtt_messages.ConfigSetInfo)

    @base.ApiMethod(endpoints.ResourceContainer(message_types.VoidMessage, ),
                    mtt_messages.BuildChannelList,
                    path='build_channels',
                    http_method='GET',
                    name='build_channels')
    def ListBuildChannels(self, request):
        """Fetches a list of build channels used for importing config sets."""
        channels = []
        for channel_id in config_set_helper.CONFIG_SET_BUILD_CHANNEL_IDS:
            channels.append(build.GetBuildChannel(channel_id))
        return mtt_messages.BuildChannelList(
            build_channels=mtt_messages.ConvertList(channels,
                                                    mtt_messages.BuildChannel))

    @base.ApiMethod(endpoints.ResourceContainer(
        message_types.VoidMessage,
        include_remote=messages.BooleanField(1),
        statuses=messages.EnumField(ndb_models.ConfigSetStatus,
                                    2,
                                    repeated=True),
    ),
                    mtt_messages.ConfigSetInfoList,
                    path='/config_sets',
                    http_method='GET',
                    name='list')
    def List(self, request):
        """Fetches a list of config sets.

    Parameters:
      include_remote: True to check remote config sets and determine the
        imported config sets are updatable, False to only return imported
        config sets
      statuses: config set statuses to include
    """
        imported_infos = config_set_helper.GetLocalConfigSetInfos()

        remote_infos = []
        if request.include_remote:
            remote_infos = config_set_helper.GetRemoteConfigSetInfos()

        info_message_list = config_set_helper.UpdateConfigSetInfos(
            imported_infos, remote_infos)
        if request.statuses:
            info_message_list = [
                msg for msg in info_message_list
                if msg.status in request.statuses
            ]

        return mtt_messages.ConfigSetInfoList(
            config_set_infos=info_message_list)

    @base.ApiMethod(endpoints.ResourceContainer(mtt_messages.ConfigSetInfo),
                    mtt_messages.ConfigSetInfo,
                    path='latest_version',
                    http_method='POST',
                    name='latest_version')
    def GetLatestVersion(self, request):
        imported_info = self._ConvertFromMessage(request)
        return config_set_helper.GetLatestVersion(imported_info)

    @base.ApiMethod(endpoints.ResourceContainer(
        message_types.VoidMessage,
        url=messages.StringField(1),
        content=messages.StringField(2)),
                    mtt_messages.ConfigSetInfo,
                    path='import/{url}',
                    http_method='POST',
                    name='import')
    def Import(self, request):
        """Downloads and imports a config set.

    Parameters:
      url: URL from which to download a config file
      content: contents of a config file, only used if url is not provided
    """
        content = (request.content if request.content else
                   config_set_helper.ReadRemoteFile(request.url))
        return config_set_helper.Import(content)

    @base.ApiMethod(endpoints.ResourceContainer(message_types.VoidMessage,
                                                url=messages.StringField(1)),
                    message_types.VoidMessage,
                    path='{url}',
                    http_method='DELETE',
                    name='delete')
    def Delete(self, request):
        """Removes a config set and all associated objects (tests, etc).

    Parameters:
      url: the url of the config set to remove
    """
        config_set_helper.Delete(request.url)
        return message_types.VoidMessage()
Пример #14
0
class Package(messages.Message):
  """Information about some registered package."""
  package_name = messages.StringField(1, required=True)
  registered_by = messages.StringField(2, required=True)
  registered_ts = messages.IntegerField(3, required=True)
  hidden = messages.BooleanField(4, required=True)
Пример #15
0
class GameForm(messages.Message):
    """GameForm for outbound game state information"""
    urlsafe_key = messages.StringField(1, required=True)
    attempts_remaining = messages.IntegerField(2, required=True)
    game_over = messages.BooleanField(3, required=True)
    user_name = messages.StringField(4, required=True)
Пример #16
0
class TaskProperties(messages.Message):
    """Important metadata about a particular task."""
    # Specifies named caches to map into the working directory. These caches
    # outlives the task, which can then be reused by tasks later used on this bot
    # that request the same named cache.
    caches = messages.MessageField(CacheEntry, 11, repeated=True)
    # CIPD packages to install. These packages are meant to be software that is
    # needed (a dependency) to the task being run. Unlike isolated files, the CIPD
    # packages do not expire from the server.
    cipd_input = messages.MessageField(CipdInput, 10)
    # Command to run. This has priority over a command specified in the isolated
    # files. Only one of 'command' or 'extra_args' can be specified.
    command = messages.StringField(1, repeated=True)
    # Relative working directory to start the 'command' in, defaults to the root
    # mapped directory or what is provided in the isolated file, if any.
    relative_cwd = messages.StringField(15)
    # Dimensions are what is used to determine which bot can run the task. The
    # bot must have all the matching dimensions, even for repeated keys with
    # multiple different values. It is a logical AND, all values must match.
    #
    # It should have been a StringListPair but this would be a breaking change.
    dimensions = messages.MessageField(StringPair, 2, repeated=True)
    # Environment variables to set when running the task.
    env = messages.MessageField(StringPair, 3, repeated=True)
    # Swarming-root relative paths to prepend to a given environment variable.
    #
    # These allow you to put certain subdirectories of the task into PATH,
    # PYTHONPATH, or other PATH-like environment variables. The order of
    # operations is:
    #   * Turn slashes into native-platform slashes.
    #   * Make the path absolute
    #   * Prepend it to the current value of the envvar using the os-native list
    #     separator (i.e. `;` on windows, `:` on POSIX).
    #
    # Each envvar can have multiple paths to prepend. They will be prepended in
    # the order seen here.
    #
    # For example, if env_prefixes was:
    #   [("PATH", ["foo", "bar"]),
    #    ("CUSTOMPATH", ["custom"])]
    #
    # The task would see:
    #   PATH=/path/to/swarming/rundir/foo:/path/to/swarming/rundir/bar:$PATH
    #   CUSTOMPATH=/path/to/swarming/rundir/custom
    #
    # The path should always be specified here with forward-slashes, and it must
    # not attempt to escape the swarming root (i.e. must not contain `..`).
    #
    # These are applied AFTER evaluating `env` entries.
    env_prefixes = messages.MessageField(StringListPair, 14, repeated=True)
    # Maximum number of seconds the task can run before its process is forcibly
    # terminated and the task results in TIMED_OUT.
    execution_timeout_secs = messages.IntegerField(4)
    # Extraneous arguments to append to the command specified in the isolated
    # file. Can only be used when an isolated file specifies a command. Only one
    # of 'command' or 'extra_args' can be specified.
    extra_args = messages.StringField(5, repeated=True)
    # Number of second to give the child process after a SIGTERM before sending a
    # SIGKILL. See doc/Bot.md#timeout-handling
    grace_period_secs = messages.IntegerField(6)
    # True if the task does not access any service through the network and is
    # believed to be 100% reproducible with the same outcome. In the case of a
    # successful task, previous results will be reused if possible.
    idempotent = messages.BooleanField(7)
    # Isolated inputs to map in the working directory. The isolated file may
    # optionally specify a command to run. Otherwise, 'command' must be specified.
    inputs_ref = messages.MessageField(FilesRef, 8)
    # Maximum number of seconds the task may be silent (no output to stdout nor
    # stderr) before it is considered hung and it forcibly terminated early and
    # the task results in TIMED_OUT.
    io_timeout_secs = messages.IntegerField(9)
    # Paths in the working directory to archive back.
    outputs = messages.StringField(12, repeated=True)
    # Secret bytes to provide to the task. Cannot be retrieved back.
    secret_bytes = messages.BytesField(13)
class Step(_messages.Message):
    """A Step represents a single operation performed as part of Execution. A
  step can be used to represent the execution of a tool ( for example a test
  runner execution or an execution of a compiler).  Steps can overlap (for
  instance two steps might have the same start time if some operations are
  done in parallel).  Here is an example, let's consider that we have a
  continuous build is executing a test runner for each iteration. The workflow
  would look like: - user creates a Execution with id 1 - user creates an
  TestExecutionStep with id 100 for Execution 1 - user update
  TestExecutionStep with id 100 to add a raw xml log + the service parses the
  xml logs and returns a TestExecutionStep with updated TestResult(s). - user
  update the status of TestExecutionStep with id 100 to COMPLETE  A Step can
  be updated until its state is set to COMPLETE at which points it becomes
  immutable.  Next tag: 20

  Enums:
    StateValueValuesEnum: The initial state is IN_PROGRESS. The only legal
      state transitions are * IN_PROGRESS -> COMPLETE  A PRECONDITION_FAILED
      will be returned if an invalid transition is requested.  It is valid to
      create Step with a state set to COMPLETE. The state can only be set to
      COMPLETE once. A PRECONDITION_FAILED will be returned if the state is
      set to COMPLETE multiple times.  - In response: always set - In
      create/update request: optional

  Fields:
    completionTime: The time when the step status was set to complete.  This
      value will be set automatically when state transitions to COMPLETE.  -
      In response: set if the execution state is COMPLETE. - In create/update
      request: never set
    creationTime: The time when the step was created.  - In response: always
      set - In create/update request: never set
    description: A description of this tool For example: mvn clean package -D
      skipTests=true  - In response: present if set by create/update request -
      In create/update request: optional
    dimensionValue: If the execution containing this step has any
      dimension_definition set, then this field allows the child to specify
      the values of the dimensions.  The keys must exactly match the
      dimension_definition of the execution.  For example, if the execution
      has `dimension_definition = ['attempt', 'device']` then a step must
      define values for those dimensions, eg. `dimension_value = ['attempt':
      '1', 'device': 'Nexus 6']`  If a step does not participate in one
      dimension of the matrix, the value for that dimension should be empty
      string. For example, if one of the tests is executed by a runner which
      does not support retries, the step could have `dimension_value =
      ['attempt': '', 'device': 'Nexus 6']`  If the step does not participate
      in any dimensions of the matrix, it may leave dimension_value unset.  A
      PRECONDITION_FAILED will be returned if any of the keys do not exist in
      the dimension_definition of the execution.  A PRECONDITION_FAILED will
      be returned if another step in this execution already has the same name
      and dimension_value, but differs on other data fields, for example, step
      field is different.  A PRECONDITION_FAILED will be returned if
      dimension_value is set, and there is a dimension_definition in the
      execution which is not specified as one of the keys.  - In response:
      present if set by create - In create request: optional - In update
      request: never set
    hasImages: Whether any of the outputs of this step are images whose
      thumbnails can be fetched with ListThumbnails.  - In response: always
      set - In create/update request: never set
    labels: Arbitrary user-supplied key/value pairs that are associated with
      the step.  Users are responsible for managing the key namespace such
      that keys don't accidentally collide.  An INVALID_ARGUMENT will be
      returned if the number of labels exceeds 100 or if the length of any of
      the keys or values exceeds 100 characters.  - In response: always set -
      In create request: optional - In update request: optional; any new
      key/value pair will be added to the map, and any new value for an
      existing key will update that key's value
    name: A short human-readable name to display in the UI. Maximum of 100
      characters. For example: Clean build  A PRECONDITION_FAILED will be
      returned upon creating a new step if it shares its name and
      dimension_value with an existing step. If two steps represent a similar
      action, but have different dimension values, they should share the same
      name. For instance, if the same set of tests is run on two different
      platforms, the two steps should have the same name.  - In response:
      always set - In create request: always set - In update request: never
      set
    outcome: Classification of the result, for example into SUCCESS or FAILURE
      - In response: present if set by create/update request - In
      create/update request: optional
    runDuration: How long it took for this step to run.  If unset, this is set
      to the difference between creation_time and completion_time when the
      step is set to the COMPLETE state. In some cases, it is appropriate to
      set this value separately: For instance, if a step is created, but the
      operation it represents is queued for a few minutes before it executes,
      it would be appropriate not to include the time spent queued in its
      run_duration.  PRECONDITION_FAILED will be returned if one attempts to
      set a run_duration on a step which already has this field set.  - In
      response: present if previously set; always present on COMPLETE step -
      In create request: optional - In update request: optional
    state: The initial state is IN_PROGRESS. The only legal state transitions
      are * IN_PROGRESS -> COMPLETE  A PRECONDITION_FAILED will be returned if
      an invalid transition is requested.  It is valid to create Step with a
      state set to COMPLETE. The state can only be set to COMPLETE once. A
      PRECONDITION_FAILED will be returned if the state is set to COMPLETE
      multiple times.  - In response: always set - In create/update request:
      optional
    stepId: A unique identifier within a Execution for this Step.  Returns
      INVALID_ARGUMENT if this field is set or overwritten by the caller.  -
      In response: always set - In create/update request: never set
    testExecutionStep: An execution of a test runner.
    toolExecutionStep: An execution of a tool (used for steps we don't
      explicitly support).
  """
    class StateValueValuesEnum(_messages.Enum):
        """The initial state is IN_PROGRESS. The only legal state transitions are
    * IN_PROGRESS -> COMPLETE  A PRECONDITION_FAILED will be returned if an
    invalid transition is requested.  It is valid to create Step with a state
    set to COMPLETE. The state can only be set to COMPLETE once. A
    PRECONDITION_FAILED will be returned if the state is set to COMPLETE
    multiple times.  - In response: always set - In create/update request:
    optional

    Values:
      complete: <no description>
      inProgress: <no description>
      pending: <no description>
      unknownState: <no description>
    """
        complete = 0
        inProgress = 1
        pending = 2
        unknownState = 3

    completionTime = _messages.MessageField('Timestamp', 1)
    creationTime = _messages.MessageField('Timestamp', 2)
    description = _messages.StringField(3)
    dimensionValue = _messages.MessageField('StepDimensionValueEntry',
                                            4,
                                            repeated=True)
    hasImages = _messages.BooleanField(5)
    labels = _messages.MessageField('StepLabelsEntry', 6, repeated=True)
    name = _messages.StringField(7)
    outcome = _messages.MessageField('Outcome', 8)
    runDuration = _messages.MessageField('Duration', 9)
    state = _messages.EnumField('StateValueValuesEnum', 10)
    stepId = _messages.StringField(11)
    testExecutionStep = _messages.MessageField('TestExecutionStep', 12)
    toolExecutionStep = _messages.MessageField('ToolExecutionStep', 13)
Пример #18
0
class NewTaskRequest(messages.Message):
    """Description of a new task request as described by the client.

  This message is used to create a new task.
  """
    # Maximum of seconds the task may stay PENDING. Must be specified with
    # properties. Cannot be used at the same time as task_slices.
    expiration_secs = messages.IntegerField(1)
    # Task name for display purpose.
    name = messages.StringField(2)
    # Parent Swarming task ID of the process requesting this task. This is to tell
    # the server about reentrancy: when a task creates children Swarming tasks, so
    # that the tree of tasks can be presented in the UI; the parent task will list
    # all the children tasks that were triggered.
    parent_task_id = messages.StringField(3)
    # Task priority, the lower the more important.
    priority = messages.IntegerField(4)
    # Task properties, which defines what to run.
    properties = messages.MessageField(TaskProperties, 5)
    # Slice of TaskSlice, along their scheduling parameters. Cannot be used at the
    # same time as properties and expiration_secs.
    #
    # This defines all the various possible task execution for a task request to
    # be run on the Swarming infrastructure. They are processed in order, and it
    # is guaranteed that at most one of these will be processed.
    task_slices = messages.MessageField(TaskSlice, 12, repeated=True)
    # Tags are 'key:value' strings that describes what the task is about. This can
    # later be leveraged to search for kinds of tasks per tag.
    tags = messages.StringField(6, repeated=True)
    # User on which behalf this task is run, if relevant. Not validated.
    user = messages.StringField(7)

    # Defines what OAuth2 credentials the task uses when calling other services.
    #
    # Possible values are:
    #   - 'none': do not use task service accounts at all, this is default.
    #   - 'bot': use bot's own account, works only if bots authenticate with
    #       OAuth2.
    #   - 'email': use this account (if token server's service_accounts.cfg rules
    #       allow it). Not implemented yet.
    #
    # Note that the service account name is specified outside of task properties,
    # and thus it is possible to have two tasks with different service accounts,
    # but identical properties hash (so one can be deduped). If this is unsuitable
    # use 'idempotent=False' or include a service account name in properties
    # separately.
    #
    # TODO(vadimsh): Link to a doc that describes Swarming Service Accounts, when
    # it exists.
    service_account = messages.StringField(8)

    # Full topic name to post task state updates to, e.g.
    # "projects/<id>/topics/<id>".
    pubsub_topic = messages.StringField(9)
    # Secret string to put into "auth_token" attribute of PubSub message.
    pubsub_auth_token = messages.StringField(10)
    # Will be but into "userdata" fields of PubSub message.
    pubsub_userdata = messages.StringField(11)

    # Only evaluate the task, as if we were going to schedule it, but don't
    # actually schedule it. This will return the TaskRequest, but without
    # a task_id.
    evaluate_only = messages.BooleanField(13)
Пример #19
0
class Digest(messages.Message):
  """ProtoRPC message containing digest information."""
  digest = messages.StringField(1)
  is_isolated = messages.BooleanField(2, default=False)
  size = messages.IntegerField(3)
Пример #20
0
class TaskCancelRequest(messages.Message):
    """Request to cancel one task."""
    kill_running = messages.BooleanField(1)
Пример #21
0
class ClusterHostApi(remote.Service):
  """A class for cluster host API service."""

  HOST_LIST_RESOURCE = endpoints.ResourceContainer(
      message_types.VoidMessage,
      lab_name=messages.StringField(1),
      include_hidden=messages.BooleanField(2, default=False),
      include_devices=messages.BooleanField(3, default=False),
      assignee=messages.StringField(4),
      is_bad=messages.BooleanField(5),
      hostnames=messages.StringField(6, repeated=True),
      host_groups=messages.StringField(7, repeated=True),
      test_harnesses=messages.StringField(8, repeated=True),
      test_harness_versions=messages.StringField(9, repeated=True),
      pools=messages.StringField(10, repeated=True),
      host_states=messages.EnumField(api_messages.HostState, 11, repeated=True),
      flated_extra_info=messages.StringField(12),
      cursor=messages.StringField(13),
      count=messages.IntegerField(
          14, variant=messages.Variant.INT32, default=_DEFAULT_LIST_HOST_COUNT),
      timestamp_operator=messages.EnumField(common.Operator, 15),
      timestamp=message_types.DateTimeField(16),
      recovery_states=messages.StringField(17, repeated=True),
      # TODO: Please use test_harnesses, this field is deprecated.
      test_harness=messages.StringField(18, repeated=True),
      host_update_states=messages.EnumField(
          api_messages.HostUpdateState, 19, repeated=True))

  @endpoints.method(
      HOST_LIST_RESOURCE,
      api_messages.HostInfoCollection,
      path="/hosts",
      http_method="GET",
      name="list")
  @api_common.with_ndb_context
  def ListHosts(self, request):
    """Fetches a list of hosts.

    Args:
      request: an API request.

    Returns:
      a HostInfoCollection object.
    """
    if ((request.timestamp and not request.timestamp_operator) or
        (not request.timestamp and request.timestamp_operator)):
      raise endpoints.BadRequestException(
          '"timestamp" and "timestamp_operator" must be set at the same time.')
    query = datastore_entities.HostInfo.query()
    if request.lab_name:
      query = query.filter(
          datastore_entities.HostInfo.lab_name == request.lab_name)

    if request.assignee:
      query = query.filter(
          datastore_entities.HostInfo.assignee == request.assignee)

    if request.is_bad is not None:
      query = query.filter(datastore_entities.HostInfo.is_bad == request.is_bad)

    if not request.include_hidden:
      query = query.filter(datastore_entities.HostInfo.hidden == False)  
    if request.flated_extra_info:
      query = query.filter(datastore_entities.HostInfo.flated_extra_info ==
                           request.flated_extra_info)

    if len(request.host_groups) == 1:
      query = query.filter(
          datastore_entities.HostInfo.host_group == request.host_groups[0])
    if len(request.hostnames) == 1:
      query = query.filter(
          datastore_entities.HostInfo.hostname == request.hostnames[0])
    test_harnesses = request.test_harness + request.test_harnesses
    if len(test_harnesses) == 1:
      query = query.filter(
          datastore_entities.HostInfo.test_harness == test_harnesses[0])
    if len(request.test_harness_versions) == 1:
      query = query.filter(
          datastore_entities.HostInfo.test_harness_version ==
          request.test_harness_versions[0])
    if len(request.pools) == 1:
      query = query.filter(
          datastore_entities.HostInfo.pools == request.pools[0])
    if len(request.host_states) == 1:
      query = query.filter(
          datastore_entities.HostInfo.host_state == request.host_states[0])
    if len(request.recovery_states) == 1:
      query = query.filter(
          datastore_entities.HostInfo.recovery_state
          == request.recovery_states[0])

    hostnames_with_requested_update_states = set()
    if request.host_update_states:
      update_state_query = datastore_entities.HostUpdateState.query().filter(
          datastore_entities.HostUpdateState.state.IN(
              request.host_update_states))
      hostnames_with_requested_update_states = set(
          update_state.hostname for update_state in update_state_query.fetch(
              projection=[datastore_entities.HostUpdateState.hostname]))

    def _PostFilter(host):
      if request.host_groups and host.host_group not in request.host_groups:
        return
      if request.hostnames and host.hostname not in request.hostnames:
        return
      if (test_harnesses and
          host.test_harness not in test_harnesses):
        return
      if (request.test_harness_versions and
          host.test_harness_version not in request.test_harness_versions):
        return
      if request.pools and not set(host.pools).intersection(set(request.pools)):
        return
      if request.host_states and host.host_state not in request.host_states:
        return
      if (request.recovery_states and
          host.recovery_state not in request.recovery_states):
        return
      if request.timestamp:
        if not host.timestamp:
          return
        return _CheckTimestamp(
            host.timestamp, request.timestamp_operator, request.timestamp)
      if request.host_update_states:
        if host.hostname not in hostnames_with_requested_update_states:
          return
      return True

    if request.timestamp:
      query = query.order(
          datastore_entities.HostInfo.timestamp,
          datastore_entities.HostInfo.key)
    else:
      query = query.order(datastore_entities.HostInfo.key)

    hosts, prev_cursor, next_cursor = datastore_util.FetchPage(
        query, request.count, request.cursor, result_filter=_PostFilter)

    host_update_state_keys = [
        ndb.Key(datastore_entities.HostUpdateState, host.hostname)
        for host in hosts]
    host_update_states = ndb.get_multi(host_update_state_keys)
    host_infos = []
    for host, host_update_state in zip(hosts, host_update_states):
      devices = []
      if request.include_devices:
        device_query = datastore_entities.DeviceInfo.query(ancestor=host.key)
        if not request.include_hidden:
          device_query = device_query.filter(
              datastore_entities.DeviceInfo.hidden == False)          devices = device_query.fetch()
      host_infos.append(datastore_entities.ToMessage(
          host, devices=devices,
          host_update_state_entity=host_update_state))
    return api_messages.HostInfoCollection(
        host_infos=host_infos,
        more=bool(next_cursor),
        next_cursor=next_cursor,
        prev_cursor=prev_cursor)

  HOST_GET_RESOURCE = endpoints.ResourceContainer(
      message_types.VoidMessage,
      hostname=messages.StringField(1, required=True),
      include_notes=messages.BooleanField(2, default=False),
      include_hidden=messages.BooleanField(3, default=False),
      include_host_state_history=messages.BooleanField(4, default=False),
      host_state_history_limit=messages.IntegerField(
          5, default=device_manager.DEFAULT_HOST_HISTORY_SIZE),
  )

  @endpoints.method(
      HOST_GET_RESOURCE,
      api_messages.HostInfo,
      path="{hostname}",
      http_method="GET",
      name="get")
  @api_common.with_ndb_context
  def GetHost(self, request):
    """Fetches the information and notes of a given hostname.

    Args:
      request: an API request.

    Returns:
      a HostInfo object.
    Raises:
      endpoints.NotFoundException: If the given host does not exist.
      endpoint.BadRequestException: If request includes history info with
      negative limit.
    """
    hostname = request.hostname
    host = device_manager.GetHost(hostname)
    if not host:
      raise endpoints.NotFoundException("Host %s does not exist." % hostname)

    device_query = datastore_entities.DeviceInfo.query(ancestor=host.key)
    if not request.include_hidden:
      device_query = device_query.filter(
          datastore_entities.DeviceInfo.hidden == False)      devices = device_query.fetch()

    host_update_state = ndb.Key(
        datastore_entities.HostUpdateState, hostname).get()

    host_info = datastore_entities.ToMessage(
        host, devices=devices, host_update_state_entity=host_update_state)
    # TODO: deprecate "include_notes".
    if request.include_notes:
      host_notes = (
          datastore_entities.Note.query().filter(
              datastore_entities.Note.type == common.NoteType.HOST_NOTE).filter(
                  datastore_entities.Note.hostname == hostname).order(
                      -datastore_entities.Note.timestamp))
      host_info.notes = [
          datastore_entities.ToMessage(note) for note in host_notes
      ]
    if request.include_host_state_history:
      history_states = None
      limit = request.host_state_history_limit
      try:
        history_states = device_manager.GetHostStateHistory(
            hostname, limit=limit)
      except ValueError as err:
        raise endpoints.BadRequestException(err)

      host_state_history = [
          datastore_entities.ToMessage(state) for state in history_states
      ]
      host_info.state_history = host_state_history
    return host_info

  # TODO: deprecate "NewNote" endpoint.
  NEW_NOTE_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      user=messages.StringField(2, required=True),
      message=messages.StringField(3),
      offline_reason=messages.StringField(4),
      recovery_action=messages.StringField(5),
      offline_reason_id=messages.IntegerField(6),
      recovery_action_id=messages.IntegerField(7),
      lab_name=messages.StringField(8),
      timestamp=message_types.DateTimeField(9, required=True),
  )

  @endpoints.method(
      NEW_NOTE_RESOURCE,
      api_messages.Note,
      path="{hostname}/note",
      http_method="POST",
      name="newNote")
  @api_common.with_ndb_context
  def NewNote(self, request):
    """Submits a note for this host.

    Args:
      request: an API request.

    Returns:
      a VoidMessage
    """
    timestamp = request.timestamp
    # Datastore only accepts UTC times. Doing a conversion if necessary.
    if timestamp.utcoffset() is not None:
      timestamp = timestamp.replace(tzinfo=None) - timestamp.utcoffset()
    note = datastore_entities.Note(
        type=common.NoteType.HOST_NOTE,
        hostname=request.hostname,
        user=request.user,
        timestamp=timestamp,
        message=request.message,
        offline_reason=request.offline_reason,
        recovery_action=request.recovery_action)
    note.put()
    return datastore_entities.ToMessage(note)

  NOTE_ADD_OR_UPDATE_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      id=messages.IntegerField(2),
      user=messages.StringField(3, required=True),
      message=messages.StringField(4),
      offline_reason=messages.StringField(5),
      recovery_action=messages.StringField(6),
      offline_reason_id=messages.IntegerField(7),
      recovery_action_id=messages.IntegerField(8),
      lab_name=messages.StringField(9),
      event_time=message_types.DateTimeField(10),
  )

  @endpoints.method(
      NOTE_ADD_OR_UPDATE_RESOURCE,
      api_messages.Note,
      path="{hostname}/notes",
      http_method="POST",
      name="addOrUpdateNote")
  @api_common.with_ndb_context
  def AddOrUpdateNote(self, request):
    """Add or update a host note.

    Args:
      request: an API request.

    Returns:
      an api_messages.Note.
    """
    time_now = datetime.datetime.utcnow()

    host_note_entity = datastore_util.GetOrCreateEntity(
        datastore_entities.Note,
        entity_id=request.id,
        hostname=request.hostname,
        type=common.NoteType.HOST_NOTE)
    host_note_entity.populate(
        user=request.user,
        message=request.message,
        timestamp=time_now,
        event_time=request.event_time)
    entities_to_update = [host_note_entity]

    try:
      offline_reason_entity = note_manager.PreparePredefinedMessageForNote(
          common.PredefinedMessageType.HOST_OFFLINE_REASON,
          message_id=request.offline_reason_id,
          lab_name=request.lab_name,
          content=request.offline_reason)
    except note_manager.InvalidParameterError as err:
      raise endpoints.BadRequestException("Invalid offline reason: [%s]" % err)
    if offline_reason_entity:
      host_note_entity.offline_reason = offline_reason_entity.content
      entities_to_update.append(offline_reason_entity)

    try:
      recovery_action_entity = note_manager.PreparePredefinedMessageForNote(
          common.PredefinedMessageType.HOST_RECOVERY_ACTION,
          message_id=request.recovery_action_id,
          lab_name=request.lab_name,
          content=request.recovery_action)
    except note_manager.InvalidParameterError as err:
      raise endpoints.BadRequestException("Invalid recovery action: [%s]" % err)
    if recovery_action_entity:
      host_note_entity.recovery_action = recovery_action_entity.content
      entities_to_update.append(recovery_action_entity)

    keys = ndb.put_multi(entities_to_update)
    host_note_msg = datastore_entities.ToMessage(host_note_entity)

    host_note_event_msg = api_messages.NoteEvent(
        note=host_note_msg, lab_name=request.lab_name)
    note_manager.PublishMessage(host_note_event_msg,
                                common.PublishEventType.HOST_NOTE_EVENT)

    note_key = keys[0]
    if request.id != note_key.id():
      # If ids are different, then a new note is created, we should create
      # a history snapshot.
      device_manager.CreateAndSaveHostInfoHistoryFromHostNote(
          request.hostname, note_key.id())

    return host_note_msg

  @endpoints.method(
      api_messages.BatchUpdateNotesWithPredefinedMessageRequest,
      api_messages.NoteCollection,
      path="notes:batchUpdateNotesWithPredefinedMessage",
      http_method="POST",
      name="batchUpdateNotesWithPredefinedMessage")
  @api_common.with_ndb_context
  def BatchUpdateNotesWithPredefinedMessage(self, request):
    """Batch update notes with the same predefined message.

    Args:
      request: an API request.

    Returns:
      an api_messages.NoteCollection object.
    """
    time_now = datetime.datetime.utcnow()

    host_note_entities = []
    for note in request.notes:
      note_id = int(note.id) if note.id is not None else None
      host_note_entity = datastore_util.GetOrCreateEntity(
          datastore_entities.Note,
          entity_id=note_id,
          hostname=note.hostname,
          type=common.NoteType.HOST_NOTE)
      host_note_entity.populate(
          user=request.user,
          message=request.message,
          timestamp=time_now,
          event_time=request.event_time)
      host_note_entities.append(host_note_entity)

    try:
      offline_reason_entity = note_manager.PreparePredefinedMessageForNote(
          common.PredefinedMessageType.HOST_OFFLINE_REASON,
          message_id=request.offline_reason_id,
          lab_name=request.lab_name,
          content=request.offline_reason,
          delta_count=len(host_note_entities))
    except note_manager.InvalidParameterError as err:
      raise endpoints.BadRequestException("Invalid offline reason: [%s]" % err)
    if offline_reason_entity:
      for host_note_entity in host_note_entities:
        host_note_entity.offline_reason = offline_reason_entity.content
      offline_reason_entity.put()

    try:
      recovery_action_entity = note_manager.PreparePredefinedMessageForNote(
          common.PredefinedMessageType.HOST_RECOVERY_ACTION,
          message_id=request.recovery_action_id,
          lab_name=request.lab_name,
          content=request.recovery_action,
          delta_count=len(host_note_entities))
    except note_manager.InvalidParameterError as err:
      raise endpoints.BadRequestException("Invalid recovery action: [%s]" % err)
    if recovery_action_entity:
      for host_note_entity in host_note_entities:
        host_note_entity.recovery_action = recovery_action_entity.content
      recovery_action_entity.put()

    note_keys = ndb.put_multi(host_note_entities)
    host_note_entities = ndb.get_multi(note_keys)
    note_msgs = []
    for host_note_entity in host_note_entities:
      host_note_msg = datastore_entities.ToMessage(host_note_entity)
      note_msgs.append(host_note_msg)

      host_note_event_msg = api_messages.NoteEvent(
          note=host_note_msg,
          lab_name=request.lab_name)
      note_manager.PublishMessage(
          host_note_event_msg, common.PublishEventType.HOST_NOTE_EVENT)

    for request_note, updated_note_key in zip(request.notes, note_keys):
      if not request_note.id:
        # If ids are not provided, then a new note is created, we should create
        # a history snapshot.
        device_manager.CreateAndSaveHostInfoHistoryFromHostNote(
            request_note.hostname, updated_note_key.id())

    return api_messages.NoteCollection(
        notes=note_msgs, more=False, next_cursor=None, prev_cursor=None)

  NOTES_BATCH_GET_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      ids=messages.IntegerField(2, repeated=True),
  )

  @endpoints.method(
      NOTES_BATCH_GET_RESOURCE,
      api_messages.NoteCollection,
      path="{hostname}/notes:batchGet",
      http_method="GET",
      name="batchGetNotes")
  @api_common.with_ndb_context
  def BatchGetNotes(self, request):
    """Batch get notes of a host.

    Args:
      request: an API request.
    Request Params:
      hostname: string, the name of a lab host.
      ids: a list of strings, the ids of notes to batch get.

    Returns:
      an api_messages.NoteCollection object.
    """
    keys = [
        ndb.Key(datastore_entities.Note, entity_id)
        for entity_id in request.ids
    ]
    note_entities = ndb.get_multi(keys)
    note_msgs = [
        datastore_entities.ToMessage(entity)
        for entity in note_entities
        if entity and entity.hostname == request.hostname
    ]
    return api_messages.NoteCollection(
        notes=note_msgs, more=False, next_cursor=None, prev_cursor=None)

  NOTES_LIST_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      count=messages.IntegerField(2, default=_DEFAULT_LIST_NOTES_COUNT),
      cursor=messages.StringField(3),
      backwards=messages.BooleanField(4, default=False),
      include_device_notes=messages.BooleanField(5, default=False),
  )

  NOTES_DELETE_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      ids=messages.IntegerField(2, repeated=True),
  )

  @endpoints.method(
      NOTES_DELETE_RESOURCE,
      message_types.VoidMessage,
      path="{hostname}/notes",
      http_method="DELETE",
      name="batchDeleteNotes")
  @api_common.with_ndb_context
  def BatchDeleteNotes(self, request):
    """Delete notes of a host.

    Args:
      request: an API request.
    Request Params:
      hostname: string, the name of a lab host.
      ids: a list of strings, the ids of notes to delete.

    Returns:
      a message_types.VoidMessage object.

    Raises:
      endpoints.BadRequestException, when request does not match existing notes.
    """
    keys = [
        ndb.Key(datastore_entities.Note, entity_id)
        for entity_id in request.ids
    ]
    note_entities = ndb.get_multi(keys)
    for key, note_entity in zip(keys, note_entities):
      if not note_entity or note_entity.hostname != request.hostname:
        raise endpoints.BadRequestException(
            "Note<id:{0}> does not exist under host<{1}>.".format(
                key.id(), note_entity.hostname))
    for key in keys:
      key.delete()
    return message_types.VoidMessage()

  @endpoints.method(
      NOTES_LIST_RESOURCE,
      api_messages.NoteCollection,
      path="{hostname}/notes",
      http_method="GET",
      name="listNotes")
  @api_common.with_ndb_context
  def ListNotes(self, request):
    """List notes of a host.

    Args:
      request: an API request.

    Returns:
      an api_messages.NoteCollection object.
    """
    query = (
        datastore_entities.Note.query()
        .filter(datastore_entities.Note.hostname == request.hostname)
        .order(-datastore_entities.Note.timestamp))
    if not request.include_device_notes:
      query = query.filter(
          datastore_entities.Note.type == common.NoteType.HOST_NOTE)

    note_entities, prev_cursor, next_cursor = datastore_util.FetchPage(
        query, request.count, request.cursor, backwards=request.backwards)
    note_msgs = [
        datastore_entities.ToMessage(entity) for entity in note_entities
    ]
    return api_messages.NoteCollection(
        notes=note_msgs,
        more=bool(next_cursor),
        next_cursor=next_cursor,
        prev_cursor=prev_cursor)

  ASSIGN_HOSTS_RESOURCE = endpoints.ResourceContainer(
      hostnames=messages.StringField(1, repeated=True),
      assignee=messages.StringField(2, required=True))

  @endpoints.method(
      ASSIGN_HOSTS_RESOURCE,
      message_types.VoidMessage,
      path="assign",
      http_method="POST",
      name="assign")
  @api_common.with_ndb_context
  def Assign(self, request):
    """Mark the hosts as recover.

    TODO: deprecated, use set_recovery_state

    Args:
      request: request with a list of hostnames and an assignee.

    Returns:
      message_types.VoidMessage
    """
    device_manager.AssignHosts(request.hostnames, request.assignee)
    return message_types.VoidMessage()

  UNASSIGN_HOSTS_RESOURCE = endpoints.ResourceContainer(
      hostnames=messages.StringField(1, repeated=True))

  @endpoints.method(
      UNASSIGN_HOSTS_RESOURCE,
      message_types.VoidMessage,
      path="unassign",
      http_method="POST",
      name="unassign")
  @api_common.with_ndb_context
  def Unassign(self, request):
    """Mark the hosts as recover.

    TODO: deprecated, use set_recovery_state

    Args:
      request: request with a list of hostnames.

    Returns:
      message_types.VoidMessage
    """
    device_manager.AssignHosts(request.hostnames, None)
    return message_types.VoidMessage()

  @endpoints.method(
      api_messages.HostRecoveryStateRequests,
      message_types.VoidMessage,
      path="batchSetRecoveryState",
      http_method="POST",
      name="batchSetRecoveryState")
  @api_common.with_ndb_context
  def BatchSetRecoveryState(self, request):
    """Batch set recovery state for hosts.

    Args:
      request: a HostRecoveryStateRequests.
    Returns:
      message_types.VoidMessage
    """
    device_manager.SetHostsRecoveryState(request.host_recovery_state_requests)
    return message_types.VoidMessage()

  HOSTNAME_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),)

  @endpoints.method(
      HOSTNAME_RESOURCE,
      api_messages.HostInfo,
      path="{hostname}/remove",
      http_method="POST",
      name="remove")
  @api_common.with_ndb_context
  def Remove(self, request):
    """Remove this host.

    Args:
      request: an API request.

    Returns:
      an updated HostInfo
    Raises:
      endpoints.NotFoundException: If the given device does not exist.
    """
    host = device_manager.HideHost(request.hostname)
    if not host:
      raise endpoints.NotFoundException("Host %s does not exist." %
                                        request.hostname)
    return datastore_entities.ToMessage(host)

  @endpoints.method(
      HOSTNAME_RESOURCE,
      api_messages.HostInfo,
      path="{hostname}/restore",
      http_method="POST",
      name="restore")
  @api_common.with_ndb_context
  def Restore(self, request):
    """Restore this host.

    Args:
      request: an API request.

    Returns:
      an updated HostInfo
    Raises:
      endpoints.NotFoundException: If the given device does not exist.
    """
    host = device_manager.RestoreHost(request.hostname)
    if not host:
      raise endpoints.NotFoundException("Host %s does not exist." %
                                        request.hostname)
    return datastore_entities.ToMessage(host)

  HISTORIES_LIST_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      count=messages.IntegerField(2, default=_DEFAULT_LIST_HISTORIES_COUNT),
      cursor=messages.StringField(3),
      backwards=messages.BooleanField(4, default=False),
  )

  @endpoints.method(
      HISTORIES_LIST_RESOURCE,
      api_messages.HostInfoHistoryCollection,
      path="{hostname}/histories",
      http_method="GET",
      name="listHistories")
  @api_common.with_ndb_context
  def ListHistories(self, request):
    """List histories of a host.

    Args:
      request: an API request.

    Returns:
      an api_messages.HostInfoHistoryCollection object.
    """
    query = (
        datastore_entities.HostInfoHistory.query(
            ancestor=ndb.Key(datastore_entities.HostInfo, request.hostname))
        .order(-datastore_entities.HostInfoHistory.timestamp))
    histories, prev_cursor, next_cursor = datastore_util.FetchPage(
        query, request.count, request.cursor, backwards=request.backwards)
    history_msgs = [
        datastore_entities.ToMessage(entity) for entity in histories
    ]
    return api_messages.HostInfoHistoryCollection(
        histories=history_msgs,
        next_cursor=next_cursor,
        prev_cursor=prev_cursor)

  CONFIGS_LIST_RESOURCE = endpoints.ResourceContainer(
      lab_name=messages.StringField(1),
      count=messages.IntegerField(2, default=_DEFAULT_LIST_CONFIGS_COUNT),
      cursor=messages.StringField(3),
  )

  @endpoints.method(
      CONFIGS_LIST_RESOURCE,
      api_messages.HostConfigCollection,
      path="configs",
      http_method="GET",
      name="listHostConfigs")
  @api_common.with_ndb_context
  def ListHostConfigs(self, request):
    """List host configs.

    Args:
      request: an API request.

    Returns:
      an api_messages.HostConfigCollection object.
    """
    query = datastore_entities.HostConfig.query()
    if request.lab_name:
      query = query.filter(
          datastore_entities.HostConfig.lab_name == request.lab_name)
    host_configs, _, next_cursor = datastore_util.FetchPage(
        query, request.count, request.cursor)

    host_config_msgs = [datastore_entities.ToMessage(host_config)
                        for host_config in host_configs]

    return api_messages.HostConfigCollection(
        host_configs=host_config_msgs, next_cursor=next_cursor)

  METADATA_GET_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
  )

  @endpoints.method(
      METADATA_GET_RESOURCE,
      api_messages.HostMetadata,
      path="{hostname}/metadata",
      http_method="GET",
      name="getMetadata")
  @api_common.with_ndb_context
  def GetMetadata(self, request):
    """Get a host metadata.

    Args:
      request: an API request.

    Returns:
      an api_messages.HostMetadata object.
    """
    metadata = datastore_entities.HostMetadata.get_by_id(request.hostname)
    if not metadata:
      metadata = datastore_entities.HostMetadata(hostname=request.hostname)
    metadata_msg = datastore_entities.ToMessage(metadata)

    return metadata_msg

  METADATA_PATCH_RESOURCE = endpoints.ResourceContainer(
      hostname=messages.StringField(1, required=True),
      test_harness_image=messages.StringField(2),
  )

  @endpoints.method(
      METADATA_PATCH_RESOURCE,
      api_messages.HostMetadata,
      path="{hostname}/metadata",
      http_method="PATCH",
      name="patchMetadata")
  @api_common.with_ndb_context
  def PatchMetadata(self, request):
    """Patch a host metadata.

    Args:
      request: an API request.

    Returns:
      an api_messages.HostMetadata object.
    """
    metadata = datastore_entities.HostMetadata.get_by_id(request.hostname)
    if not metadata:
      metadata = datastore_entities.HostMetadata(
          id=request.hostname,
          hostname=request.hostname)
    if request.test_harness_image:
      metadata.populate(test_harness_image=request.test_harness_image)
    metadata.put()
    metadata_msg = datastore_entities.ToMessage(metadata)

    return metadata_msg

  BATCH_SET_TEST_HARNESS_IMAGES_RESOURCE = endpoints.ResourceContainer(
      hostnames=messages.StringField(1, repeated=True),
      test_harness_image=messages.StringField(2),
      user=messages.StringField(3),
  )

  @endpoints.method(
      BATCH_SET_TEST_HARNESS_IMAGES_RESOURCE,
      message_types.VoidMessage,
      path="hostMetadata:batchUpdate",
      http_method="POST",
      name="batchUpdateHostMetadata")
  @api_common.with_ndb_context
  def BatchUpdateHostMetadata(self, request):
    """Update HostMetadata on multiple hosts.

    Args:
      request: an API request.
    Request Params:
      hostname: list of strings, the name of hosts.
      test_harness_image: string, the url to test harness image.
      user: string, the user sending the request.

    Returns:
      a message_types.VoidMessage object.

    Raises:
      endpoints.BadRequestException, when request does not match existing hosts.
    """
    host_configs = ndb.get_multi(
        ndb.Key(datastore_entities.HostConfig, hostname)
        for hostname in request.hostnames)
    host_metadatas = ndb.get_multi(
        ndb.Key(datastore_entities.HostMetadata, hostname)
        for hostname in request.hostnames)
    hosts_no_permission = []
    hosts_not_enabled = []
    metadatas_to_update = []
    for hostname, config, metadata in zip(
        request.hostnames, host_configs, host_metadatas):
      if not config or not config.enable_ui_update:
        hosts_not_enabled.append(hostname)
        continue
      if request.user not in config.owners:
        hosts_no_permission.append(hostname)
        continue
      if not metadata:
        metadata = datastore_entities.HostMetadata(
            id=hostname, hostname=hostname)
      if not harness_image_metadata_syncer.AreHarnessImagesEqual(
          metadata.test_harness_image, request.test_harness_image):
        event = host_event.HostEvent(
            time=datetime.datetime.utcnow(),
            type=_HOST_UPDATE_STATE_CHANGED_EVENT_NAME,
            hostname=hostname,
            host_update_state=_HOST_UPDATE_STATE_PENDING,
            data={"host_update_target_image": request.test_harness_image})
        device_manager.HandleDeviceSnapshotWithNDB(event)
      metadata.populate(test_harness_image=request.test_harness_image)
      metadatas_to_update.append(metadata)
    ndb.put_multi(metadatas_to_update)

    if not hosts_no_permission and not hosts_not_enabled:
      return message_types.VoidMessage()

    error_message = ""
    if hosts_no_permission:
      error_message += (
          "Request user %s is not in the owner list of hosts [%s]. "
          % (request.user, ", ".join(hosts_no_permission)))
    if hosts_not_enabled:
      error_message += ("Hosts [%s] are not enabled to be updated from UI. "
                        % ", ".join(hosts_not_enabled))
    raise endpoints.BadRequestException(error_message)
Пример #22
0
class TasksCancelRequest(messages.Message):
    """Request to cancel some subset of pending/running tasks."""
    tags = messages.StringField(1, repeated=True)
    cursor = messages.StringField(2)
    limit = messages.IntegerField(3, default=100)
    kill_running = messages.BooleanField(4)
Пример #23
0
class ProjectUserListResponse(DjangoProtoRPCMessage):
    """ProtoRPC message definition to represent a list of stored users."""
    items = messages.MessageField(
        ProjectCollaboratorsResponseMessage, 1, repeated=True)
    is_list = messages.BooleanField(2)
Пример #24
0
class CancelResponse(messages.Message):
    """Result of a request to cancel a task."""
    ok = messages.BooleanField(1)
    was_running = messages.BooleanField(2)
Пример #25
0
class ClusterApi(remote.Service):
  """A class for cluster API service."""

  CLUSTER_LIST_RESOURCE = endpoints.ResourceContainer(
      message_types.VoidMessage,
      include_hosts=messages.BooleanField(1, default=False))

  @endpoints.method(CLUSTER_LIST_RESOURCE, ClusterInfoCollection,
                    path="/clusters", http_method="GET", name="list")
  @api_common.with_ndb_context
  def ListClusters(self, request):
    """Fetches a list of clusters that are available.

    Args:
      request: an API request.
    Returns:
      a ClusterInfoCollection object.
    """
    cluster_infos = []
    clusters = datastore_entities.ClusterInfo.query().fetch()
    for cluster in clusters:
      host_msgs = []
      # TODO: deprecate option include_hosts.
      if request.include_hosts:
        host_msgs = self._GetHostsForCluster(cluster.cluster)
      cluster_infos.append(self._BuildClusterInfo(cluster, host_msgs))
    return ClusterInfoCollection(cluster_infos=cluster_infos)

  CLUSTER_GET_RESOURCE = endpoints.ResourceContainer(
      message_types.VoidMessage,
      cluster_id=messages.StringField(1, variant=messages.Variant.STRING,
                                      required=True),
      include_hosts=messages.BooleanField(2, default=False),
      include_notes=messages.BooleanField(3, default=False),
  )

  @endpoints.method(
      CLUSTER_GET_RESOURCE,
      api_messages.ClusterInfo,
      path="{cluster_id}",
      http_method="GET", name="get")
  @api_common.with_ndb_context
  def GetCluster(self, request):
    """Fetches the information/status for a given cluster id.

    Args:
      request: an API request.
    Returns:
      a ClusterInfo message.
    Raises:
      endpoints.BadRequestException: If the given cluster ID is invalid.
    """
    cluster_id = request.cluster_id
    cluster = device_manager.GetCluster(cluster_id)
    if not cluster:
      raise endpoints.NotFoundException(
          "Cluster [%s] does not exist." % cluster_id)
    host_msgs = []
    if request.include_hosts:
      host_msgs = self._GetHostsForCluster(cluster_id)
    cluster_info = self._BuildClusterInfo(cluster, host_msgs)

    if request.include_notes:
      cluster_notes = datastore_entities.ClusterNote.query()
      cluster_notes = cluster_notes.filter(
          datastore_entities.ClusterNote.cluster == cluster_id)
      notes = [datastore_entities.ToMessage(n.note)
               for n in cluster_notes.iter()]
      cluster_info.notes = sorted(
          notes, key=lambda x: x.timestamp, reverse=True)
    return cluster_info

  def _GetHostsForCluster(self, cluster_id):
    """Get hosts and their devices for a cluster.

    Args:
      cluster_id: cluster id
    Returns:
      a list of HostInfoMessages include devices.
    """
    hosts = (datastore_entities.HostInfo.query()
             .filter(datastore_entities.HostInfo.clusters == cluster_id)
             .filter(datastore_entities.HostInfo.hidden == False)               .fetch())
    host_msgs = []
    for host in hosts:
      devices = (datastore_entities.DeviceInfo.query(ancestor=host.key)
                 .filter(datastore_entities.DeviceInfo.hidden == False)                   .fetch())
      host_msgs.append(datastore_entities.ToMessage(host, devices))
    return host_msgs

  CLUSTER_NOTE_RESOURCE = endpoints.ResourceContainer(
      cluster_id=messages.StringField(1, required=True),
      user=messages.StringField(2, required=True),
      message=messages.StringField(3),
      offline_reason=messages.StringField(4),
      recovery_action=messages.StringField(5),
      offline_reason_id=messages.IntegerField(6),
      recovery_action_id=messages.IntegerField(7),
      lab_name=messages.StringField(8),
      timestamp=message_types.DateTimeField(9, required=True),
  )

  @endpoints.method(CLUSTER_NOTE_RESOURCE, api_messages.Note,
                    path="{cluster_id}/note", http_method="POST",
                    name="newNote")
  @api_common.with_ndb_context
  def NewNote(self, request):
    """Submits a note for this host.

    Args:
      request: an API request.
    Returns:
      a VoidMessage
    """
    cluster = request.cluster_id
    timestamp = request.timestamp
    # Datastore only accepts UTC times. Doing a conversion if necessary.
    if timestamp.utcoffset() is not None:
      timestamp = timestamp.replace(tzinfo=None) - timestamp.utcoffset()
    note = datastore_entities.Note(user=request.user, timestamp=timestamp,
                                   message=request.message)
    cluster_note = datastore_entities.ClusterNote(cluster=cluster)
    cluster_note.note = note
    cluster_note.put()
    return datastore_entities.ToMessage(note)

  def _BuildClusterInfo(self, cluster, host_infos):
    """Helper to build a ClusterInfo object from host messages.

    Args:
      cluster: a cluster entity
      host_infos: a list of HostInfo messages.
    Returns:
      a ClusterInfo object.
    """
    run_targets = set()
    for host in host_infos:
      run_targets.update([d.run_target
                          for d in host.device_infos if d.run_target])
    run_target_messages = [api_messages.RunTarget(name=r) for r in run_targets]
    return api_messages.ClusterInfo(
        cluster_id=cluster.cluster,
        total_devices=cluster.total_devices,
        offline_devices=cluster.offline_devices,
        available_devices=cluster.available_devices,
        allocated_devices=cluster.allocated_devices,
        device_count_timestamp=cluster.device_count_timestamp,
        host_infos=host_infos,
        run_targets=run_target_messages,
        host_update_state_summary=datastore_entities.ToMessage(
            cluster.host_update_state_summary),
        host_count_by_harness_version=api_messages.MapToKeyValuePairMessages(
            cluster.host_count_by_harness_version),
        host_update_state_summaries_by_version=[
            datastore_entities.ToMessage(summary) for summary
            in cluster.host_update_state_summaries_by_version
        ])
Пример #26
0
class TaskResult(messages.Message):
    """Representation of the TaskResultSummary or TaskRunResult ndb model."""
    # Time when the task was abandoned instead of normal completion (e.g.
    # EXPIRED, BOT_DIED, KILLED).
    #
    # In the case of KILLED, this records the time the user requested the task to
    # stop.
    abandoned_ts = message_types.DateTimeField(1)
    # The same key cannot be repeated.
    bot_dimensions = messages.MessageField(StringListPair, 2, repeated=True)
    # Unique ID of the bot.
    bot_id = messages.StringField(3)
    # Hash of the bot code which ran the task.
    bot_version = messages.StringField(4)
    # List of task IDs that this task triggered, if any.
    children_task_ids = messages.StringField(5, repeated=True)
    # Time the task completed normally. Only one of abandoned_ts or completed_ts
    # can be set except for state == KILLED.
    #
    # In case of KILLED, completed_ts is the time the task completed.
    completed_ts = message_types.DateTimeField(6)
    # $ saved for task with state DEDUPED.
    cost_saved_usd = messages.FloatField(7)
    # Time the task was requested.
    created_ts = message_types.DateTimeField(8)
    # Task ID which results was reused for state DEDUPED.
    deduped_from = messages.StringField(9)
    # Duration of the task in seconds. This excludes overheads.
    duration = messages.FloatField(10)
    # Process exit code if relevant. May be forcibly set to -1 in exceptional
    # cases.
    exit_code = messages.IntegerField(11)
    # True if exit_code != 0.
    failure = messages.BooleanField(12)
    # True if state is BOT_DIED.
    internal_failure = messages.BooleanField(13)
    # Time the results was last updated in the DB.
    modified_ts = message_types.DateTimeField(14)
    # Isolated outputs, if any.
    outputs_ref = messages.MessageField(FilesRef, 15)
    # Server versions that touched this task.
    server_versions = messages.StringField(17, repeated=True)
    # Time the task started being run by a bot.
    started_ts = message_types.DateTimeField(18)
    # Current state of the task (e.g. PENDING, RUNNING, COMPLETED, EXPIRED, etc).
    state = messages.EnumField(StateField, 19)
    # Summary task ID (ending with '0') when creating a new task.
    task_id = messages.StringField(20)
    # Can be 0, 1 or 2. It is 0 for a deduped task, since nothing ran. It is
    # normally 1. It is 2 if the first try had an internal failure.
    try_number = messages.IntegerField(21)

    # Can be multiple values only in TaskResultSummary.
    costs_usd = messages.FloatField(22, repeated=True)
    # Name of the task. Only set when requesting task ID summary, ending with '0'.
    name = messages.StringField(23)
    # Tags associated with the task when it was requested. Only set when
    # requesting task ID summary, ending with '0'.
    tags = messages.StringField(24, repeated=True)
    # User on behalf this task was requested. Only set when requesting task ID
    # summary, ending with '0'.
    user = messages.StringField(25)
    # Statistics about overhead for an isolated task. Only sent when requested.
    performance_stats = messages.MessageField(PerformanceStats, 26)

    # Listing of the ACTUAL pinned CipdPackages that the task used. These can vary
    # from the input packages if the inputs included non-identity versions (e.g. a
    # ref like "latest").
    cipd_pins = messages.MessageField(CipdPins, 27)
    # Actual executed task id that this task represents. For deduped tasks, it is
    # the same value as deduped_from. This value can be empty if there is no
    # execution, for example the task was cancelled.
    run_id = messages.StringField(28)

    # Index in the TaskRequest.task_slices (TaskSlice instance) that this result
    # represents. This is updated when a TaskSlice is enqueued to run.
    #
    # The TaskSlice contains a TaskProperties, which defines what is run.
    current_task_slice = messages.IntegerField(29)
Пример #27
0
class Device(messages.Message):
    """Device ProtoRPC message.

  Attributes:
    serial_number: str, The serial number of the Chrome device.
    asset_tag: str, The asset tag of the Chrome device.
    identifier: str, the computed identifier for a device. Serial number if
        asset tag is not provided.
    enrolled: bool, Indicates the enrollment status of the device.
    device_model: int, Identifies the model name of the device.
    due_date: datetime, The date that device is due for return.
    last_know_healthy: datetime, The date to indicate the last known healthy
        status.
    shelf: shelf_messages.Shelf, The message for a shelf.
    assigned_user: str, The email of the user who is assigned to the device.
    assignment_date: datetime, The date the device was assigned to a user.
    current_ou: str, The current organizational unit the device belongs to.
    ou_change_date: datetime, The date the organizational unit was changed.
    locked: bool, Indicates whether or not the device is locked.
    lost: bool, Indicates whether or not the device is lost.
    mark_pending_return_date: datetime, The date a user marked device returned.
    chrome_device_id: str, A unique device ID.
    last_heartbeat: datetime, The date of the last time the device checked in.
    damaged: bool, Indicates the if the device is damaged.
    damaged_reason: str, A string denoting the reason for being reported as
        damaged.
    last_reminder: Reminder, Level, time, and count of the last reminder
        the device had.
    next_reminder: Reminder, Level, time, and count of the next reminder.
    page_size: int, The number of results to query for and display.
    page_number: int, the page index to offset the results.
    max_extend_date: datetime, Indicates maximum extend date a device can have.
    guest_enabled: bool, Indicates if guest mode has been already enabled.
    guest_permitted: bool, Indicates if guest mode has been allowed.
    given_name: str, The given name for the user.
    query: shared_message.SearchRequest, a message containing query options to
        conduct a search on an index.
    overdue: bool, Indicates that the due date has passed.
  """
    serial_number = messages.StringField(1)
    asset_tag = messages.StringField(2)
    identifier = messages.StringField(3)
    urlkey = messages.StringField(4)
    enrolled = messages.BooleanField(5, default=True)
    device_model = messages.StringField(6)
    due_date = message_types.DateTimeField(7)
    last_known_healthy = message_types.DateTimeField(8)
    shelf = messages.MessageField(shelf_messages.Shelf, 9)
    assigned_user = messages.StringField(10)
    assignment_date = message_types.DateTimeField(11)
    current_ou = messages.StringField(12)
    ou_changed_date = message_types.DateTimeField(13)
    locked = messages.BooleanField(14)
    lost = messages.BooleanField(15)
    mark_pending_return_date = message_types.DateTimeField(16)
    chrome_device_id = messages.StringField(17)
    last_heartbeat = message_types.DateTimeField(18)
    damaged = messages.BooleanField(19)
    damaged_reason = messages.StringField(20)
    last_reminder = messages.MessageField(Reminder, 21)
    next_reminder = messages.MessageField(Reminder, 22)
    page_size = messages.IntegerField(23, default=10)
    page_number = messages.IntegerField(24, default=1)
    max_extend_date = message_types.DateTimeField(25)
    guest_enabled = messages.BooleanField(26)
    guest_permitted = messages.BooleanField(27)
    given_name = messages.StringField(28)
    query = messages.MessageField(shared_messages.SearchRequest, 29)
    overdue = messages.BooleanField(30)
Пример #28
0
class DeletedResponse(messages.Message):
    """Indicates whether a bot was deleted."""
    deleted = messages.BooleanField(1)
Пример #29
0
class BooleanMessage(messages.Message):
    """BooleanMessage-- outbound Boolean value message"""
    data = messages.BooleanField(1)
Пример #30
0
class BooleanMsg(messages.Message):
    data = messages.BooleanField(1)
    accessToken = messages.StringField(2)