def _update_cancelled_operation(self, operation, operation_metadata, execute_response=None, *, data_store): """Forges a cancelled :class:`Operation` message given input data.""" cancelled_operation_metadata = remote_execution_pb2.ExecuteOperationMetadata( ) cancelled_operation_metadata.CopyFrom(operation_metadata) cancelled_operation_metadata.stage = OperationStage.COMPLETED.value operation.metadata.Pack(cancelled_operation_metadata) cancelled_execute_response = remote_execution_pb2.ExecuteResponse() if execute_response is not None: cancelled_execute_response.CopyFrom(self.__execute_response) cancelled_execute_response.status.code = code_pb2.CANCELLED cancelled_execute_response.status.message = "Operation cancelled by client." operation.response.Pack(cancelled_execute_response) operation.done = True changes = {"done": True, "cancelled": True} data_store.update_operation(operation.name, changes)
def test_wait_execution(instance, controller, context): scheduler = controller.execution_instance._scheduler job_name = scheduler.queue_job_action(action, action_digest, skip_cache_lookup=True) message_queue = queue.Queue() operation_name = controller.execution_instance.register_job_peer( job_name, context.peer(), message_queue) scheduler._update_job_operation_stage(job_name, OperationStage.COMPLETED) request = remote_execution_pb2.WaitExecutionRequest(name=operation_name) response = instance.WaitExecution(request, context) result = next(response) assert isinstance(result, operations_pb2.Operation) metadata = remote_execution_pb2.ExecuteOperationMetadata() result.metadata.Unpack(metadata) assert metadata.stage == job.OperationStage.COMPLETED.value assert result.done is True if isinstance(scheduler.data_store, SQLDataStore): with scheduler.data_store.session() as session: record = session.query(models.Job).filter_by(name=job_name).first() assert record is not None assert record.stage == job.OperationStage.COMPLETED.value assert record.operations assert all(op.done for op in record.operations)
def test_execute(skip_cache_lookup, instance, context): request = remote_execution_pb2.ExecuteRequest( instance_name='', action_digest=action_digest, skip_cache_lookup=skip_cache_lookup) response = instance.Execute(request, context) result = next(response) assert isinstance(result, operations_pb2.Operation) metadata = remote_execution_pb2.ExecuteOperationMetadata() result.metadata.Unpack(metadata) assert metadata.stage == OperationStage.QUEUED.value operation_uuid = result.name.split('/')[-1] assert uuid.UUID(operation_uuid, version=4) assert result.done is False
def test_cancel_operation(instance, controller, execute_request, context): job_name = controller.execution_instance.execute( execute_request.action_digest, execute_request.skip_cache_lookup) message_queue = queue.Queue() operation_name = controller.execution_instance.register_job_peer( job_name, context.peer(), message_queue) request = operations_pb2.CancelOperationRequest() request.name = "{}/{}".format(instance_name, operation_name) instance.CancelOperation(request, context) request = operations_pb2.GetOperationRequest() request.name = "{}/{}".format(instance_name, operation_name) response = instance.GetOperation(request, context) operation_metadata = remote_execution_pb2.ExecuteOperationMetadata() response.metadata.Unpack(operation_metadata) assert operation_metadata.stage == OperationStage.COMPLETED.value
def lists(context, json): stub = operations_pb2_grpc.OperationsStub(context.channel) request = operations_pb2.ListOperationsRequest(name=context.instance_name) try: response = stub.ListOperations(request) except RpcError as e: click.echo('Error: {}'.format(e.details()), err=True) sys.exit(-1) if not response.operations: click.echo('Error: No operations to list.', err=True) return operations_map = OrderedDict([(OperationStage.CACHE_CHECK, []), (OperationStage.QUEUED, []), (OperationStage.EXECUTING, []), (OperationStage.COMPLETED, [])]) for operation in response.operations: metadata = remote_execution_pb2.ExecuteOperationMetadata() # The metadata is expected to be an ExecuteOperationMetadata message: assert operation.metadata.Is(metadata.DESCRIPTOR) operation.metadata.Unpack(metadata) stage = OperationStage(metadata.stage) operations_map[stage].append(operation) for operations in operations_map.values(): operations.sort(key=attrgetter('name')) for operation in operations: if not json: _print_operation_status(operation) else: click.echo(json_format.MessageToJson(operation))
def _print_operation_status(operation, print_details=False): metadata = remote_execution_pb2.ExecuteOperationMetadata() # The metadata is expected to be an ExecuteOperationMetadata message: if not operation.metadata.Is(metadata.DESCRIPTOR): raise InvalidArgumentError( 'Metadata is not an ExecuteOperationMetadata ' 'message') operation.metadata.Unpack(metadata) stage = OperationStage(metadata.stage) if not operation.done: if stage == OperationStage.CACHE_CHECK: click.echo( 'CacheCheck: {}: Querying action-cache (stage={})'.format( operation.name, metadata.stage)) elif stage == OperationStage.QUEUED: click.echo('Queued: {}: Waiting for execution (stage={})'.format( operation.name, metadata.stage)) elif stage == OperationStage.EXECUTING: click.echo('Executing: {}: Currently running (stage={})'.format( operation.name, metadata.stage)) else: click.echo('Error: {}: In an invalid state (stage={})'.format( operation.name, metadata.stage), err=True) return assert stage == OperationStage.COMPLETED response = remote_execution_pb2.ExecuteResponse() # The response is expected to be an ExecutionResponse message: assert operation.response.Is(response.DESCRIPTOR) operation.response.Unpack(response) if response.status.code != code_pb2.OK: click.echo('Failure: {}: {} (code={})'.format(operation.name, response.status.message, response.status.code)) else: if response.result.exit_code != 0: click.echo( 'Success: {}: Completed with failure (stage={}, exit_code={})'. format(operation.name, metadata.stage, response.result.exit_code)) else: click.echo( 'Success: {}: Completed succesfully (stage={}, exit_code={})'. format(operation.name, metadata.stage, response.result.exit_code)) if print_details: metadata = response.result.execution_metadata click.echo(indent('worker={}'.format(metadata.worker), ' ')) queued = metadata.queued_timestamp.ToDatetime() click.echo(indent('queued_at={}'.format(queued), ' ')) worker_start = metadata.worker_start_timestamp.ToDatetime() worker_completed = metadata.worker_completed_timestamp.ToDatetime() click.echo( indent('work_duration={}'.format(worker_completed - worker_start), ' ')) fetch_start = metadata.input_fetch_start_timestamp.ToDatetime() fetch_completed = metadata.input_fetch_completed_timestamp.ToDatetime() click.echo( indent('fetch_duration={}'.format(fetch_completed - fetch_start), ' ')) execution_start = metadata.execution_start_timestamp.ToDatetime() execution_completed = metadata.execution_completed_timestamp.ToDatetime( ) click.echo( indent( 'exection_duration={}'.format(execution_completed - execution_start), ' ')) upload_start = metadata.output_upload_start_timestamp.ToDatetime() upload_completed = metadata.output_upload_completed_timestamp.ToDatetime( ) click.echo( indent( 'upload_duration={}'.format(upload_completed - upload_start), ' ')) click.echo( indent('total_duration={}'.format(worker_completed - queued), ' '))
def __init__(self, do_not_cache, action_digest, platform_requirements=None, priority=0, name=None, operations=(), cancelled_operations=set(), lease=None, stage=OperationStage.UNKNOWN.value, cancelled=False, queued_timestamp=None, queued_time_duration=None, worker_start_timestamp=None, worker_completed_timestamp=None, done=False, result=None, worker_name=None, n_tries=0): self.__logger = logging.getLogger(__name__) self._name = name or str(uuid.uuid4()) self._priority = priority self._lease = lease self.__execute_response = result if result is None: self.__execute_response = remote_execution_pb2.ExecuteResponse() self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata( ) self.__queued_timestamp = Timestamp() if queued_timestamp is not None: self.__queued_timestamp.CopyFrom(queued_timestamp) self.__queued_time_duration = Duration() if queued_time_duration is not None: self.__queued_time_duration.CopyFrom(queued_time_duration) self.__worker_start_timestamp = Timestamp() if worker_start_timestamp is not None: self.__worker_start_timestamp.CopyFrom(worker_start_timestamp) self.__worker_completed_timestamp = Timestamp() if worker_completed_timestamp is not None: self.__worker_completed_timestamp.CopyFrom( worker_completed_timestamp) self.__operations_by_name = {op.name: op for op in operations } # Name to Operation 1:1 mapping self.__operations_cancelled = cancelled_operations self.__lease_cancelled = cancelled self.__job_cancelled = cancelled self.__operation_metadata.action_digest.CopyFrom(action_digest) self.__operation_metadata.stage = stage self._do_not_cache = do_not_cache self._n_tries = n_tries self._platform_requirements = platform_requirements \ if platform_requirements else dict() self._done = done self.worker_name = worker_name