Пример #1
0
    def to_proto(self) -> FeatureViewProto:
        """
        Converts an feature view object to its protobuf representation.

        Returns:
            FeatureViewProto protobuf
        """

        meta = FeatureViewMetaProto(
            created_timestamp=self.created_timestamp,
            last_updated_timestamp=self.last_updated_timestamp,
            materialization_intervals=[],
        )
        for interval in self.materialization_intervals:
            interval_proto = MaterializationIntervalProto()
            interval_proto.start_time.FromDatetime(interval[0])
            interval_proto.end_time.FromDatetime(interval[1])
            meta.materialization_intervals.append(interval_proto)

        ttl_duration = None
        if self.ttl is not None:
            ttl_duration = Duration()
            ttl_duration.FromTimedelta(self.ttl)

        spec = FeatureViewSpecProto(
            name=self.name,
            entities=self.entities,
            features=[feature.to_proto() for feature in self.features],
            tags=self.tags,
            ttl=(ttl_duration if ttl_duration is not None else None),
            online=self.online,
            input=self.input.to_proto(),
        )

        return FeatureViewProto(spec=spec, meta=meta)
Пример #2
0
def test_list_point_in_time_changes():
    """Demonstrate listing assets along with their state changes."""
    from google.cloud import securitycenter_v1beta1 as securitycenter
    from google.protobuf.duration_pb2 import Duration
    from datetime import timedelta

    # [START demo_list_assets_changes]
    client = securitycenter.SecurityCenterClient()

    # ORGANIZATION_ID is the numeric ID of the organization (e.g. 123213123121)
    org_name = "organizations/{org_id}".format(org_id=ORGANIZATION_ID)
    project_filter = ("security_center_properties.resource_type=" +
                      '"google.cloud.resourcemanager.Project"')

    # List assets and their state change the last 30 days
    compare_delta = timedelta(days=30)
    # Convert the timedelta to a Duration
    duration_proto = Duration()
    duration_proto.FromTimedelta(compare_delta)
    # Call the API and print results.
    asset_iterator = client.list_assets(org_name,
                                        filter_=project_filter,
                                        compare_duration=duration_proto)
    for i, asset in enumerate(asset_iterator):
        print(i, asset)

    # [END demo_list_assets_changes]
    assert i > 0
Пример #3
0
    def to_proto(self) -> FeatureViewProto:
        """
        Converts an feature view object to its protobuf representation.

        Returns:
            FeatureViewProto protobuf
        """

        meta = FeatureViewMetaProto(
            created_timestamp=self.created_timestamp,
            last_updated_timestamp=self.last_updated_timestamp,
        )

        if self.ttl is not None:
            ttl_duration = Duration()
            ttl_duration.FromTimedelta(self.ttl)

        spec = FeatureViewSpecProto(
            name=self.name,
            entities=self.entities,
            features=[feature.to_proto() for feature in self.features],
            tags=self.tags,
            ttl=(ttl_duration if ttl_duration is not None else None),
            online=self.online,
            input=self.input.to_proto(),
        )

        return FeatureViewProto(spec=spec, meta=meta)
Пример #4
0
    def __init__(
        self,
        name: str,
        entities: List[str],
        features: List[Feature],
        tags: Dict[str, str],
        ttl: Optional[Union[Duration, timedelta]],
        online: bool,
        input: BigQuerySource,
    ):
        cols = [entity for entity in entities] + [feat.name for feat in features]
        for col in cols:
            if input.field_mapping is not None and col in input.field_mapping.keys():
                raise ValueError(
                    f"The field {col} is mapped to {input.field_mapping[col]} for this data source. Please either remove this field mapping or use {input.field_mapping[col]} as the Entity or Feature name."
                )

        self.name = name
        self.entities = entities
        self.features = features
        self.tags = tags
        if isinstance(ttl, timedelta):
            proto_ttl = Duration()
            proto_ttl.FromTimedelta(ttl)
            self.ttl = proto_ttl
        else:
            self.ttl = ttl

        self.online = online
        self.input = input
Пример #5
0
def group_findings_and_changes(source_name):
    """Demonstrates grouping all findings across an organization and
    associated changes."""
    i = 0
    # [START group_filtered_findings_with_changes]
    from datetime import timedelta

    from google.cloud import securitycenter
    from google.protobuf.duration_pb2 import Duration

    # Create a client.
    client = securitycenter.SecurityCenterClient()

    # source_name is the resource path for a source that has been
    # created previously (you can use list_sources to find a specific one).
    # Its format is:
    # source_name = "organizations/{organization_id}/sources/{source_id}"
    # e.g.:
    # source_name = "organizations/111122222444/sources/1234"

    # List assets and their state change the last 30 days
    compare_delta = timedelta(days=30)
    # Convert the timedelta to a Duration
    duration_proto = Duration()
    duration_proto.FromTimedelta(compare_delta)

    group_result_iterator = client.group_findings(
        source_name, group_by="state_change", compare_duration=duration_proto)
    for i, group_result in enumerate(group_result_iterator):
        print((i + 1), group_result)
    # [END group_findings_with_changes]
    return i
Пример #6
0
def test_create_upload_location(mock_raw_create_upload_location):
    client = _SynchronousFlyteClient(
        PlatformConfig.for_endpoint("a.b.com", True))
    client.get_upload_signed_url("foo", "bar", bytes(), "baz.qux",
                                 timedelta(minutes=42))
    duration_pb = Duration()
    duration_pb.FromTimedelta(timedelta(minutes=42))
    create_upload_location_request = _data_proxy_pb2.CreateUploadLocationRequest(
        project="foo",
        domain="bar",
        filename="baz.qux",
        expires_in=duration_pb)
    mock_raw_create_upload_location.assert_called_with(
        create_upload_location_request)
Пример #7
0
    def _create_csr(self):
        """Create CSR protobuf

        Returns:
             CSR protobuf object
        """
        csr = cert_utils.create_csr(self._gateway_key, self._hw_id)
        duration = Duration()
        duration.FromTimedelta(datetime.timedelta(days=4))
        csr = CSR(
            id=Identity(gateway=Identity.Gateway(hardware_id=self._hw_id)),
            valid_time=duration,
            csr_der=csr.public_bytes(serialization.Encoding.DER),
        )
        return csr
Пример #8
0
    def to_proto(self) -> FeatureViewProto:
        """
        Converts a feature view object to its protobuf representation.

        Returns:
            A FeatureViewProto protobuf.
        """
        meta = FeatureViewMetaProto(materialization_intervals=[])
        if self.created_timestamp:
            meta.created_timestamp.FromDatetime(self.created_timestamp)
        if self.last_updated_timestamp:
            meta.last_updated_timestamp.FromDatetime(
                self.last_updated_timestamp)
        for interval in self.materialization_intervals:
            interval_proto = MaterializationIntervalProto()
            interval_proto.start_time.FromDatetime(interval[0])
            interval_proto.end_time.FromDatetime(interval[1])
            meta.materialization_intervals.append(interval_proto)

        ttl_duration = None
        if self.ttl is not None:
            ttl_duration = Duration()
            ttl_duration.FromTimedelta(self.ttl)

        batch_source_proto = self.batch_source.to_proto()
        batch_source_proto.data_source_class_type = f"{self.batch_source.__class__.__module__}.{self.batch_source.__class__.__name__}"

        stream_source_proto = None
        if self.stream_source:
            stream_source_proto = self.stream_source.to_proto()
            stream_source_proto.data_source_class_type = f"{self.stream_source.__class__.__module__}.{self.stream_source.__class__.__name__}"

        spec = FeatureViewSpecProto(
            name=self.name,
            entities=self.entities,
            features=[field.to_proto() for field in self.schema],
            description=self.description,
            tags=self.tags,
            owner=self.owner,
            ttl=(ttl_duration if ttl_duration is not None else None),
            online=self.online,
            batch_source=batch_source_proto,
            stream_source=stream_source_proto,
        )

        return FeatureViewProto(spec=spec, meta=meta)
def test_group_assets_by_changes(organization_id):
    """Demonstrates grouping assets by there changes over a period of time."""
    # [START group_all_assets_by_change]
    from datetime import timedelta

    from google.cloud import securitycenter
    from google.protobuf.duration_pb2 import Duration

    client = securitycenter.SecurityCenterClient()

    duration_proto = Duration()
    duration_proto.FromTimedelta(timedelta(days=5))

    # organization_id is the numeric ID of the organization.
    # organization_id = "1234567777"
    org_name = "organizations/{org_id}".format(org_id=organization_id)
    result_iterator = client.group_assets(org_name,
                                          group_by="state_change",
                                          compare_duration=duration_proto)
    for i, result in enumerate(result_iterator):
        print((i + 1), result)
    # [END group_all_assets_by_change]
    # only one asset type is a project
    assert i >= 0
Пример #10
0
class Job:
    def __init__(self,
                 do_not_cache,
                 action_digest,
                 platform_requirements=None,
                 priority=0,
                 name=None,
                 operations=(),
                 cancelled_operations=set(),
                 lease=None,
                 stage=OperationStage.UNKNOWN.value,
                 cancelled=False,
                 queued_timestamp=None,
                 queued_time_duration=None,
                 worker_start_timestamp=None,
                 worker_completed_timestamp=None,
                 done=False,
                 result=None,
                 worker_name=None,
                 n_tries=0):
        self.__logger = logging.getLogger(__name__)

        self._name = name or str(uuid.uuid4())
        self._priority = priority
        self._lease = lease

        self.__execute_response = result
        if result is None:
            self.__execute_response = remote_execution_pb2.ExecuteResponse()
        self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata(
        )

        self.__queued_timestamp = Timestamp()
        if queued_timestamp is not None:
            self.__queued_timestamp.CopyFrom(queued_timestamp)

        self.__queued_time_duration = Duration()
        if queued_time_duration is not None:
            self.__queued_time_duration.CopyFrom(queued_time_duration)

        self.__worker_start_timestamp = Timestamp()
        if worker_start_timestamp is not None:
            self.__worker_start_timestamp.CopyFrom(worker_start_timestamp)

        self.__worker_completed_timestamp = Timestamp()
        if worker_completed_timestamp is not None:
            self.__worker_completed_timestamp.CopyFrom(
                worker_completed_timestamp)

        self.__operations_by_name = {op.name: op
                                     for op in operations
                                     }  # Name to Operation 1:1 mapping
        self.__operations_cancelled = cancelled_operations
        self.__lease_cancelled = cancelled
        self.__job_cancelled = cancelled

        self.__operation_metadata.action_digest.CopyFrom(action_digest)
        self.__operation_metadata.stage = stage

        self._do_not_cache = do_not_cache
        self._n_tries = n_tries

        self._platform_requirements = platform_requirements \
            if platform_requirements else dict()

        self._done = done
        self.worker_name = worker_name

    def __lt__(self, other):
        try:
            return self.priority < other.priority
        except AttributeError:
            return NotImplemented

    def __le__(self, other):
        try:
            return self.priority <= other.priority
        except AttributeError:
            return NotImplemented

    def __eq__(self, other):
        if isinstance(other, Job):
            return self.name == other.name
        return False

    def __ne__(self, other):
        return not self.__eq__(other)

    def __gt__(self, other):
        try:
            return self.priority > other.priority
        except AttributeError:
            return NotImplemented

    def __ge__(self, other):
        try:
            return self.priority >= other.priority
        except AttributeError:
            return NotImplemented

    # --- Public API ---

    @property
    def name(self):
        return self._name

    @property
    def cancelled(self):
        return self.__job_cancelled

    @property
    def priority(self):
        return self._priority

    def set_priority(self, new_priority, *, data_store):
        self._priority = new_priority
        data_store.update_job(self.name, {'priority': new_priority})

    @property
    def done(self):
        return self._done

    # --- Public API: REAPI ---

    @property
    def platform_requirements(self):
        return self._platform_requirements

    @property
    def do_not_cache(self):
        return self._do_not_cache

    @property
    def action_digest(self):
        return self.__operation_metadata.action_digest

    @property
    def operation_stage(self):
        return OperationStage(self.__operation_metadata.stage)

    @property
    def action_result(self):
        if self.__execute_response is not None:
            return self.__execute_response.result
        else:
            return None

    @property
    def execute_response(self):
        return self.__execute_response

    @execute_response.setter
    def execute_response(self, response):
        self.__execute_response = response
        for operation in self.__operations_by_name.values():
            operation.response.Pack(self.__execute_response)

    @property
    def holds_cached_result(self):
        if self.__execute_response is not None:
            return self.__execute_response.cached_result
        else:
            return False

    @property
    def queued_timestamp(self):
        return self.__queued_timestamp

    @property
    def queued_time_duration(self):
        return self.__queued_time_duration

    @property
    def worker_start_timestamp(self):
        return self.__worker_start_timestamp

    @property
    def worker_completed_timestamp(self):
        return self.__worker_completed_timestamp

    def mark_worker_started(self):
        self.__worker_start_timestamp.GetCurrentTime()

    def set_action_url(self, url):
        """Generates a CAS browser URL for the job's action."""
        if url.for_message('action', self.__operation_metadata.action_digest):
            self.__execute_response.message = url.generate()

    def set_cached_result(self, action_result):
        """Allows specifying an action result form the action cache for the job.

        Note:
            This won't trigger any :class:`Operation` stage transition.

        Args:
            action_result (ActionResult): The result from cache.
        """
        self.__execute_response.result.CopyFrom(action_result)
        self.__execute_response.cached_result = True

    def n_peers(self, operations_by_peer):
        return len([
            peer for peer, names in operations_by_peer.items()
            if any(name in self.__operations_by_name for name in names)
        ])

    def n_peers_for_operation(self, operation_name, operations_by_peer):
        return len([
            peer for peer, names in operations_by_peer.items()
            if any(name == operation_name for name in names)
        ])

    def register_new_operation_peer(self, peer, message_queue,
                                    operations_by_peer, peer_message_queues, *,
                                    data_store):
        """Subscribes to a new job's :class:`Operation` stage changes.

        Args:
            peer (str): a unique string identifying the client.
            message_queue (queue.Queue): the event queue to register.

        Returns:
            str: The name of the subscribed :class:`Operation`.
        """
        new_operation = operations_pb2.Operation()
        # Copy state from first existing and non cancelled operation:
        for operation in self.__operations_by_name.values():
            if operation.name not in self.__operations_cancelled:
                new_operation.CopyFrom(operation)
                break

        new_operation.name = str(uuid.uuid4())

        self.__logger.debug("Operation created for job [%s]: [%s]", self._name,
                            new_operation.name)

        self.__operations_by_name[new_operation.name] = new_operation
        if peer in operations_by_peer:
            operations_by_peer[peer].add(new_operation.name)
        else:
            operations_by_peer[peer] = set([new_operation.name])

        if peer in peer_message_queues:
            peer_message_queues[peer][new_operation.name] = message_queue
        else:
            peer_message_queues[peer] = {new_operation.name: message_queue}

        data_store.create_operation(new_operation, self._name)

        self._send_operations_updates(peers=[peer],
                                      operations_by_peer=operations_by_peer,
                                      peer_message_queues=peer_message_queues,
                                      data_store=data_store)

        return new_operation.name

    def register_operation_peer(self, operation_name, peer, message_queue,
                                operations_by_peer, peer_message_queues, *,
                                data_store):
        """Subscribes to one of the job's :class:`Operation` stage changes.

        Args:
            operation_name (str): an existing operation's name to subscribe to.
            peer (str): a unique string identifying the client.
            message_queue (queue.Queue): the event queue to register.

        Returns:
            str: The name of the subscribed :class:`Operation`.

        Raises:
            NotFoundError: If no operation with `operation_name` exists.
        """
        if operation_name not in self.__operations_by_name:
            raise NotFoundError(
                "Operation name does not exist: [{}]".format(operation_name))

        if peer in operations_by_peer:
            operations_by_peer[peer].add(operation_name)
        else:
            operations_by_peer[peer] = set([operation_name])

        if peer in peer_message_queues:
            peer_message_queues[peer][operation_name] = message_queue
        else:
            peer_message_queues[peer] = {operation_name: message_queue}

        self._send_operations_updates(peers=[peer],
                                      operations_by_peer=operations_by_peer,
                                      peer_message_queues=peer_message_queues,
                                      data_store=data_store)

    def unregister_operation_peer(self, operation_name, peer):
        """Unsubscribes to the job's :class:`Operation` stage change.

        Args:
            operation_name (str): an existing operation's name to unsubscribe from.
            peer (str): a unique string identifying the client.

        Raises:
            NotFoundError: If no operation with `operation_name` exists.
        """
        if operation_name not in self.__operations_by_name:
            raise NotFoundError(
                "Operation name does not exist: [{}]".format(operation_name))

    def list_operations(self):
        """Lists the :class:`Operation` related to a job.

        Returns:
            list: A list of :class:`Operation` names.
        """
        return list(self.__operations_by_name.keys())

    def get_operation(self, operation_name):
        """Returns a copy of the the job's :class:`Operation`.

        Args:
            operation_name (str): the operation's name.

        Raises:
            NotFoundError: If no operation with `operation_name` exists.
        """
        try:
            operation = self.__operations_by_name[operation_name]

        except KeyError:
            raise NotFoundError(
                "Operation name does not exist: [{}]".format(operation_name))

        return self._copy_operation(operation)

    def update_operation_stage(self, stage, operations_by_peer,
                               peer_message_queues, *, data_store):
        """Operates a stage transition for the job's :class:`Operation`.

        Args:
            stage (OperationStage): the operation stage to transition to.
        """
        if stage.value == self.__operation_metadata.stage:
            return

        changes = {}

        self.__operation_metadata.stage = stage.value
        changes["stage"] = stage.value

        self.__logger.debug("Stage changed for job [%s]: [%s] (operation)",
                            self._name, stage.name)

        if self.__operation_metadata.stage == OperationStage.QUEUED.value:
            if self.__queued_timestamp.ByteSize() == 0:
                self.__queued_timestamp.GetCurrentTime()
                changes[
                    "queued_timestamp"] = self.__queued_timestamp.ToDatetime()
            self._n_tries += 1
            changes["n_tries"] = self._n_tries

        elif self.__operation_metadata.stage == OperationStage.EXECUTING.value:
            queue_in, queue_out = self.__queued_timestamp.ToDatetime(
            ), datetime.utcnow()
            self.__queued_time_duration.FromTimedelta(queue_out - queue_in)
            changes[
                "queued_time_duration"] = self.__queued_time_duration.seconds

        elif self.__operation_metadata.stage == OperationStage.COMPLETED.value:
            self._done = True

        data_store.update_job(self.name, changes)

        self._send_operations_updates(operations_by_peer=operations_by_peer,
                                      peer_message_queues=peer_message_queues,
                                      data_store=data_store)

    def cancel_operation(self, operation_name, operations_by_peer,
                         peer_message_queues, *, data_store):
        """Triggers a job's :class:`Operation` cancellation.

        This may cancel any job's :class:`Lease` that may have been issued.

        Args:
            operation_name (str): the operation's name.

        Raises:
            NotFoundError: If no operation with `operation_name` exists.
        """
        try:
            operation = self.__operations_by_name[operation_name]

        except KeyError:
            raise NotFoundError(
                "Operation name does not exist: [{}]".format(operation_name))

        self.__operations_cancelled.add(operation.name)

        self.__logger.debug("Operation cancelled for job [%s]: [%s]",
                            self._name, operation.name)

        ongoing_operations = set(self.__operations_by_name.keys())
        # Job is cancelled if all the operation are:
        self.__job_cancelled = ongoing_operations.issubset(
            self.__operations_cancelled)

        if self.__job_cancelled:
            self.__operation_metadata.stage = OperationStage.COMPLETED.value
            changes = {
                "stage": OperationStage.COMPLETED.value,
                "cancelled": True
            }
            data_store.update_job(self.name, changes)
            if self._lease is not None:
                self.cancel_lease(data_store=data_store)

        peers_to_notify = set()
        # If the job is not cancelled, notify all the peers watching the given
        # operation; if the job is cancelled, only notify the peers for which
        # the operation status changed.
        for peer, names in operations_by_peer.items():
            relevant_names = [
                n for n in names if n in self.__operations_by_name
            ]
            if self.__job_cancelled:
                if not any(name in self.__operations_cancelled
                           for name in relevant_names):
                    peers_to_notify.add(peer)
                elif operation_name in relevant_names:
                    peers_to_notify.add(peer)

            else:
                if operation_name in relevant_names:
                    peers_to_notify.add(peer)

        self._send_operations_updates(peers=peers_to_notify,
                                      notify_cancelled=True,
                                      operations_by_peer=operations_by_peer,
                                      peer_message_queues=peer_message_queues,
                                      data_store=data_store)

    # --- Public API: RWAPI ---

    @property
    def lease(self):
        return self._lease

    @property
    def lease_state(self):
        if self._lease is not None:
            return LeaseState(self._lease.state)
        else:
            return None

    @property
    def lease_cancelled(self):
        return self.__lease_cancelled

    @property
    def n_tries(self):
        return self._n_tries

    def create_lease(self, worker_name, bot_id=None, *, data_store):
        """Emits a new :class:`Lease` for the job.

        Only one :class:`Lease` can be emitted for a given job. This method
        should only be used once, any further calls are ignored.

        Args:
            worker_name (string): The name of the worker this lease is for.
            bot_id (string): The name of the corresponding bot for this job's worker.
        """
        if self._lease is not None:
            return self._lease
        elif self.__job_cancelled:
            return None

        self._lease = bots_pb2.Lease()
        self._lease.id = self._name
        self._lease.payload.Pack(self.__operation_metadata.action_digest)
        self._lease.state = LeaseState.UNSPECIFIED.value

        if bot_id is None:
            bot_id = "UNKNOWN"
        self.__logger.debug(
            "Lease created for job [%s]: [%s] (assigned to bot [%s])",
            self._name, self._lease.id, bot_id)

        self.update_lease_state(LeaseState.PENDING,
                                skip_lease_persistence=True,
                                data_store=data_store)

        self.worker_name = worker_name

        return self._lease

    def update_lease_state(self,
                           state,
                           status=None,
                           result=None,
                           skip_lease_persistence=False,
                           *,
                           data_store):
        """Operates a state transition for the job's current :class:`Lease`.

        Args:
            state (LeaseState): the lease state to transition to.
            status (google.rpc.Status, optional): the lease execution status,
                only required if `state` is `COMPLETED`.
            result (google.protobuf.Any, optional): the lease execution result,
                only required if `state` is `COMPLETED`.
        """
        if state.value == self._lease.state:
            return

        job_changes = {}
        lease_changes = {}

        self._lease.state = state.value
        lease_changes["state"] = state.value

        self.__logger.debug("State changed for job [%s]: [%s] (lease)",
                            self._name, state.name)

        if self._lease.state == LeaseState.PENDING.value:
            self.__worker_start_timestamp.Clear()
            self.__worker_completed_timestamp.Clear()
            job_changes[
                "worker_start_timestamp"] = self.__worker_start_timestamp.ToDatetime(
                )
            job_changes[
                "worker_completed_timestamp"] = self.__worker_completed_timestamp.ToDatetime(
                )

            self._lease.status.Clear()
            self._lease.result.Clear()
            lease_changes["status"] = self._lease.status.code

        elif self._lease.state == LeaseState.COMPLETED.value:
            self.__worker_completed_timestamp.GetCurrentTime()
            job_changes[
                "worker_completed_timestamp"] = self.__worker_completed_timestamp.ToDatetime(
                )

            action_result = remote_execution_pb2.ActionResult()

            # TODO: Make a distinction between build and bot failures!
            if status.code != code_pb2.OK:
                self._do_not_cache = True
                job_changes["do_not_cache"] = True

            lease_changes["status"] = status.code

            if result is not None and result.Is(action_result.DESCRIPTOR):
                result.Unpack(action_result)

            action_metadata = action_result.execution_metadata
            action_metadata.queued_timestamp.CopyFrom(self.__queued_timestamp)
            action_metadata.worker_start_timestamp.CopyFrom(
                self.__worker_start_timestamp)
            action_metadata.worker_completed_timestamp.CopyFrom(
                self.__worker_completed_timestamp)

            self.__execute_response.result.CopyFrom(action_result)
            self.__execute_response.cached_result = False
            self.__execute_response.status.CopyFrom(status)

        data_store.update_job(self.name, job_changes)
        if not skip_lease_persistence:
            data_store.update_lease(self.name, lease_changes)

    def cancel_lease(self, *, data_store):
        """Triggers a job's :class:`Lease` cancellation.

        Note:
            This will not cancel the job's :class:`Operation`.
        """
        self.__lease_cancelled = True

        self.__logger.debug("Lease cancelled for job [%s]: [%s]", self._name,
                            self._lease.id)

        if self._lease is not None:
            self.update_lease_state(LeaseState.CANCELLED,
                                    data_store=data_store)

    def delete_lease(self):
        """Discard the job's :class:`Lease`.

        Note:
            This will not cancel the job's :class:`Operation`.
        """
        if self._lease is not None:
            self.__worker_start_timestamp.Clear()
            self.__worker_completed_timestamp.Clear()

            self.__logger.debug("Lease deleted for job [%s]: [%s]", self._name,
                                self._lease.id)

            self._lease = None

    # --- Public API: Monitoring ---

    def query_queue_time(self):
        return self.__queued_time_duration.ToTimedelta()

    def query_n_retries(self):
        return self._n_tries - 1 if self._n_tries > 0 else 0

    # --- Private API ---

    def _copy_operation(self, operation):
        """Simply duplicates a given :class:`Lease` object."""
        new_operation = operations_pb2.Operation()
        new_operation.CopyFrom(operation)

        new_operation.metadata.Pack(self.__operation_metadata)
        new_operation.response.Pack(self.__execute_response)

        return new_operation

    def _update_operation(self,
                          operation,
                          operation_metadata,
                          execute_response=None,
                          done=False,
                          *,
                          data_store):
        """Forges a :class:`Operation` message given input data."""
        operation.metadata.Pack(operation_metadata)

        if execute_response is not None:
            operation.response.Pack(execute_response)

        operation.done = done
        changes = {"done": done}
        data_store.update_operation(operation.name, changes)

    def _update_cancelled_operation(self,
                                    operation,
                                    operation_metadata,
                                    execute_response=None,
                                    *,
                                    data_store):
        """Forges a cancelled :class:`Operation` message given input data."""
        cancelled_operation_metadata = remote_execution_pb2.ExecuteOperationMetadata(
        )
        cancelled_operation_metadata.CopyFrom(operation_metadata)
        cancelled_operation_metadata.stage = OperationStage.COMPLETED.value

        operation.metadata.Pack(cancelled_operation_metadata)

        cancelled_execute_response = remote_execution_pb2.ExecuteResponse()
        if execute_response is not None:
            cancelled_execute_response.CopyFrom(self.__execute_response)
        cancelled_execute_response.status.code = code_pb2.CANCELLED
        cancelled_execute_response.status.message = "Operation cancelled by client."

        operation.response.Pack(cancelled_execute_response)

        operation.done = True
        changes = {"done": True, "cancelled": True}
        data_store.update_operation(operation.name, changes)

    def _send_operations_updates(self,
                                 peers=None,
                                 notify_cancelled=False,
                                 operations_by_peer=None,
                                 peer_message_queues=None,
                                 *,
                                 data_store):
        """Sends :class:`Operation` stage change messages to watchers."""
        if operations_by_peer is None:
            operations_by_peer = {}
        if peer_message_queues is None:
            peer_message_queues = {}

        for operation in self.__operations_by_name.values():
            if operation.name in self.__operations_cancelled:
                self._update_cancelled_operation(
                    operation,
                    self.__operation_metadata,
                    execute_response=self.__execute_response,
                    data_store=data_store)

            else:
                self._update_operation(
                    operation,
                    self.__operation_metadata,
                    execute_response=self.__execute_response,
                    done=self._done,
                    data_store=data_store)

        relevant_queues = {
            peer: mqs
            for peer, mqs in peer_message_queues.items()
            if any(name in self.__operations_by_name for name in mqs)
        }

        for peer, message_queues in relevant_queues.items():
            if peer not in operations_by_peer:
                continue
            elif peers and peer not in peers:
                continue

            operations = [
                self.__operations_by_name[name]
                for name in operations_by_peer[peer]
                if name in self.__operations_by_name
            ]
            for operation in operations:
                # Messages are pairs of (Exception, Operation,):
                if not notify_cancelled and operation.name in self.__operations_cancelled:
                    continue
                elif operation.name not in self.__operations_cancelled:
                    message = (
                        None,
                        self._copy_operation(operation),
                    )
                else:
                    message = (
                        CancelledError("Operation has been cancelled"),
                        self._copy_operation(operation),
                    )

                message_queue = message_queues[operation.name]
                message_queue.put(message)
Пример #11
0
def duration_from_timedelta(delta):
    dr = Duration()
    dr.FromTimedelta(delta)
    return dr
Пример #12
0
def Duration_from_timedelta(dt: datetime):
    pb_d = Duration()
    pb_d.FromTimedelta(dt)
    return pb_d