def list_pipelines(self, timeout=None) -> List[pipeline_types.PipelineInfo]:
        """
        Requests a list of pipelines from Clara.

        Returns:
            List of pipeline_types.PipelineInfo with running pipeline information
        """
        if (self._channel is None) or (self._stub is None):
            raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")

        request = pipelines_pb2.PipelinesListRequest(
            header=self.get_request_header()
        )

        response = self._stub.List(request, timeout=timeout)

        info_list = []

        responses = [resp for resp in response]

        if len(responses) > 0:
            for item in responses:
                if (item.details is None) or (item.details.pipeline_id.value == ''):
                    continue

                info = pipeline_types.PipelineInfo(
                    pipeline_id=pipeline_types.PipelineId(item.details.pipeline_id.value),
                    name=item.details.name,
                    metadata=item.details.metadata
                )

                info_list.append(info)

        return info_list
    def create_pipeline(self, definition: List[pipeline_types.PipelineDefinition],
                        pipeline_id: pipeline_types.PipelineId = None, metadata: Mapping[str, str] = None,
                        timeout=None) -> pipeline_types.PipelineId:
        """
        Requests the creation of a new pipeline by Clara.

        Args:
            definition(List[pipeline_types.PipelineDefinition]): Definition from which to create the new pipeline.
            pipeline_id:  Optional argument to force a specific pipeline identifier when replicating deployments.
                    Use ONLY with a high available primary-primary fail-over solution in place AND full understanding on
                    what it does.
            metadata(Mapping[str, str]): Set of key/value pairs to be appended to the pipeline metadata. If a metadata
                    key in the request already exists in the pipeline record, or if duplicate keys are passed in the
                    request, the pipeline will not be updated and and an error will be returned. Keys are compared using
                    case insensitive comparator functions. The maximum allowed size of a metadata key is 128 bytes,
                    while the maximum allowed size of a metadata value is 256 bytes. The maximum allowed size for the
                    overall metadata of an individual job is 4 Megabytes.

        Returns:
            pipeline_types.PipelineId of newly created pipeline
        """
        if (self._channel is None) or (self._stub is None):
            raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")

        if definition is None:
            raise Exception("Argument 'definition' must be initialized to a non-null list instance")

        request_list = []

        for item in definition:
            item_definition = pipelines_pb2.PipelineDefinitionFile(
                content=item.content,
                path=item.name
            )

            # If pipeline identifier set, must first be in GRPC Identifier format
            if pipeline_id is not None:
                pipeline_id = pipeline_id.to_grpc_value()

            request = pipelines_pb2.PipelinesCreateRequest(
                definition=item_definition,
                pipeline_id=pipeline_id,
                header=self.get_request_header()
            )

            if metadata is not None:
                request.metadata.update(metadata)

            request_list.append(request)

        request_list = RequestIterator(request_list)

        response = self._stub.Create(
            request_list(),
            timeout=timeout
        )

        self.check_response_header(header=response.header)

        return pipeline_types.PipelineId(response.pipeline_id.value)
    def get_status(self,
                   job_id: job_types.JobId,
                   timeout=None) -> job_types.JobDetails:
        """
        Get status of a job

        Args:
            job_id (job_types.JobId): job_id Unique identifier of the job to get the status of.

        Returns:
            job_types.JobDetails including the status of a known job
        """

        if (self._channel is None) or (self._stub is None):
            raise Exception(
                "Connection is currently closed. Please run reconnect() to reopen connection"
            )

        if job_id.value is None:
            raise Exception(
                "Job identifier must have instantiated non-null instance")

        request = jobs_pb2.JobsStatusRequest(header=self.get_request_header(),
                                             job_id=job_id.to_grpc_value())

        response = self._stub.Status(request, timeout=timeout)

        self.check_response_header(header=response.header)

        resp_operator_details = response.operator_details
        operator_details = {}

        for item in resp_operator_details:
            operator_details[item.name] = {}
            operator_details[item.name]["created"] = item.created
            operator_details[item.name]["started"] = item.started
            operator_details[item.name]["stopped"] = item.stopped
            operator_details[item.name]["status"] = item.status

        result = job_types.JobDetails(
            job_id=job_types.JobId(response.job_id.value),
            job_priority=response.priority,
            job_state=response.state,
            job_status=response.status,
            name=response.name,
            payload_id=payload_types.PayloadId(response.payload_id.value),
            pipeline_id=pipeline_types.PipelineId(response.pipeline_id.value),
            date_created=self.get_timestamp(response.created),
            date_started=self.get_timestamp(response.started),
            date_stopped=self.get_timestamp(response.stopped),
            operator_details=operator_details,
            messages=response.messages,
            metadata=response.metadata)

        return result
    def pipeline_details(self, pipeline_id: pipeline_types.PipelineId, timeout=None) -> pipeline_types.PipelineDetails:
        """
        Requests details of a pipeline, identified by pipeline_types.PipelineId, from Clara.

        Args:
            pipeline_id (pipeline_types.PipelineId): Unique identifier of the pipeline.

        Return:
            A pipeline_types.PipelineDetails instance with details on the pipeline specified by 'pipeline_id'
        """
        if (self._channel is None) or (self._stub is None):
            raise Exception("Connection is currently closed. Please run reconnect() to reopen connection")

        if pipeline_id.value is None or pipeline_id.value == "":
            raise Exception("Pipeline identifier argument must be initialized with non-null instance")

        request = pipelines_pb2.PipelinesDetailsRequest(
            header=self.get_request_header(),
            pipeline_id=pipeline_id.to_grpc_value(),
        )

        response = self._stub.Details(request, timeout=timeout)

        responses = [resp for resp in response]

        if len(responses) > 0:
            self.check_response_header(header=responses[0].header)

            result = pipeline_types.PipelineDetails(
                name=responses[0].name,
                pipeline_id=pipeline_types.PipelineId(responses[0].pipeline_id.value),
                metadata=responses[0].metadata
            )

            result_definition = []

            for resp in responses:
                result_definition.append(
                    pipeline_types.PipelineDefinition(
                        name=resp.name,
                        content=resp.definition
                    )
                )

            result.definition = result_definition

            return result

        return None
예제 #5
0
def test_create_job():
    requests = [
        jobs_pb2.JobsCreateRequest(
            header=BaseClient.get_request_header(),
            name='test job',
            pipeline_id=common_pb2.Identifier(
                value='92656d79fa414db6b294069c0e9e6df5'
            ),
            priority=jobs_pb2.JOB_PRIORITY_NORMAL
        )
    ]

    responses = [
        jobs_pb2.JobsCreateResponse(
            header=common_pb2.ResponseHeader(
                code=0,
                messages=[]),
            job_id=common_pb2.Identifier(
                value='432b274a8f754968888807fe1eba237b'
            ),
            payload_id=common_pb2.Identifier(
                value='7ac5c691e13d4f45894a3a70d9925936'
            )
        )
    ]

    stub_method_handlers = [(
        'Create',
        'unary_unary',
        (
            requests,
            responses
        )
    )]

    MockClaraJobsServiceClient.stub_method_handlers = stub_method_handlers

    with MockClaraJobsServiceClient('localhost:50051') as client:
        job_info = client.create_job(
            job_name='test job',
            pipeline_id=pipeline_types.PipelineId('92656d79fa414db6b294069c0e9e6df5')
        )

        print(job_info.job_id.value, job_info.payload_id.value)

        assert job_info.job_id.value == '432b274a8f754968888807fe1eba237b'
        assert job_info.payload_id.value == '7ac5c691e13d4f45894a3a70d9925936'
예제 #6
0
def test_create_pipeline_with_id():
    pipeline_yaml = 'pipeline.yaml'

    requests = [
        pipelines_pb2.PipelinesCreateRequest(
            header=BaseClient.get_request_header(),
            pipeline_id=common_pb2.Identifier(
                value='92656d79fa414db6b294069c0e9e6df5'),
            definition=pipelines_pb2.PipelineDefinitionFile(
                path='pipeline.yaml', content=PIPELINE_TEXT))
    ]

    responses = [
        pipelines_pb2.PipelinesCreateResponse(
            header=common_pb2.ResponseHeader(code=0, messages=[]),
            pipeline_id=common_pb2.Identifier(
                value='92656d79fa414db6b294069c0e9e6df5'))
    ]

    stub_method_handlers = [('Create', 'stream_unary', (requests, responses))]

    # set handlers
    MockClaraPipelineServiceClient.stub_method_handlers = stub_method_handlers

    def_list = [
        pipeline_types.PipelineDefinition(name=pipeline_yaml,
                                          content=PIPELINE_TEXT)
    ]

    pipeline_id = pipeline_types.PipelineId('92656d79fa414db6b294069c0e9e6df5')

    with MockClaraPipelineServiceClient('localhost:50051') as client:
        pipeline_id = client.create_pipeline(definition=def_list,
                                             pipeline_id=pipeline_id)
        print(pipeline_id)
        assert pipeline_id.value == '92656d79fa414db6b294069c0e9e6df5'
payloads_client = PayloadsClient(target=clara_ip_address, port=clara_port)
pipeline_client = PipelinesClient(target=clara_ip_address, port=clara_port)

# Create list of pipeline_types.PipelineDefinition with local path to pipeline .yaml
file_path = "../spleen_pipeline.yaml"
definitions = [
    pipeline_types.PipelineDefinition(name=file_path,
                                      content=Path(file_path).read_text())
]

# Create Pipeline with definition list created
pipeline_id = pipeline_client.create_pipeline(definition=definitions)

# Create Job with newly created Pipeline
job_info = jobs_client.create_job(job_name="spleenjob",
                                  pipeline_id=pipeline_types.PipelineId(
                                      pipeline_id.value))
job_id = job_info.job_id
payload_id = job_info.payload_id

# Local path to directory of files to upload to the job's payload on the Server
input_path = "../app_spleen-input_v1/dcm"

# Go through files in directory and upload to the job using the payload identifier
for file in os.listdir(input_path):
    file_path = input_path + "/" + str(file)
    with open(file_path, 'rb') as fp:
        payloads_client.upload(payload_id=payload_id,
                               blob_name=file,
                               file_object=fp)

# Get a List of the jobs
예제 #8
0
jobs_client = JobsClient(target=clara_ip_address, port=clara_port)

# Creates Filter of Healthy Jobs - Additionally could filter by Pipeline Id, State, Completion Time, and Creation Time
job_filter = job_types.JobFilter(has_job_status=[job_types.JobStatus.Healthy])

# List Current Jobs with Optional Filter
job_list = jobs_client.list_jobs(job_filter=job_filter)
print(job_list)

# Identifier of created pipeline (ex. colon tumor segmentation)
colon_tumor_pipeline_id = "f9a843935e654a30beb9d1b8352bfaac"

# Create Job
job_info = jobs_client.create_job(
    job_name="colontumor",
    pipeline_id=pipeline_types.PipelineId(colon_tumor_pipeline_id))
print(job_info.job_id.value)

# Start Job
job_token = jobs_client.start_job(job_id=job_info.job_id)
print(job_token.job_state)
print(job_token.job_status)

# Get Status of Job from Identifier
job_details = jobs_client.get_status(job_id=job_token.job_id)

print(job_details.job_state)
print(job_details.job_status)

# Gets List of Operators
print(job_details.operator_details.keys())
    def stream_jobs(self,
                    job_filter: job_types.JobFilter = None,
                    timeout=None):
        """
        Provides generator to stream current jobs on platform

        Args:
            job_filter (job_types.JobFilter): Optional filter used to limit the number of
            pipeline job records return

        Returns:
            list of job_types.JobInfo with known pipeline job details from the server.
        """

        if (self._channel is None) or (self._stub is None):
            raise Exception(
                "Connection is currently closed. Please run reconnect() to reopen connection"
            )

        empty = job_types.JobFilter()

        request = jobs_pb2.JobsListRequest(header=self.get_request_header())

        if job_filter != empty and job_filter is not None:
            request_filter = jobs_pb2.JobsListRequest.JobFilter

            if job_filter.completed_before is not None:
                day_one = datetime.datetime(1, 1, 1)
                if job_filter.completed_before.tzinfo is not None \
                        and job_filter.completed_before.tzinfo.utcoffset(job_filter.completed_before) is not None:
                    day_one = datetime.datetime(
                        1, 1, 1, tzinfo=job_filter.completed_before.tzinfo)

                seconds = (job_filter.completed_before -
                           day_one).total_seconds()
                request.filter.completed_before.value = int(seconds)

            if job_filter.created_after is not None:
                day_one = datetime.datetime(1, 1, 1)
                if job_filter.created_after.tzinfo is not None \
                        and job_filter.created_after.tzinfo.utcoffset(job_filter.created_after) is not None:
                    day_one = datetime.datetime(
                        1, 1, 1, tzinfo=job_filter.created_after.tzinfo)

                seconds = (job_filter.created_after - day_one).total_seconds()
                request.filter.created_after.value = int(seconds)

            if job_filter.has_job_state is not None:
                if len(job_filter.has_job_state) > 0:
                    for state in job_filter.has_job_state:
                        if (state.value < job_types.JobState.Minimum.value
                            ) or (state.value >
                                  job_types.JobState.Maximum.value):
                            raise Exception(
                                "Job states in filter must be within " +
                                str(job_types.JobState.Minimum) + " and " +
                                str(job_types.JobState.Maximum) + ", found:" +
                                str(state))

                        request.filter.has_state.append(state.value)

            if job_filter.has_job_status is not None:
                if len(job_filter.has_job_status) > 0:
                    for status in job_filter.has_job_status:
                        if (status.value < job_types.JobStatus.Minimum.value
                            ) or (status.value >
                                  job_types.JobStatus.Maximum.value):
                            raise Exception(
                                "Job status in filter must be within " +
                                str(job_types.JobStatus.Minimum) + " and " +
                                str(job_types.JobStatus.Maximum) + ", found:" +
                                str(status))

                        request.filter.has_status.append(status.value)

            if job_filter.pipeline_ids is not None:
                if len(job_filter.pipeline_ids) > 0:
                    for pipe_id in job_filter.pipeline_ids:
                        request.filter.pipeline_id.append(
                            pipe_id.to_grpc_value())

        response = self._stub.List(request, timeout=timeout)

        check_header = True

        for item in response:

            if check_header:
                self.check_response_header(header=item.header)
                check_header = False

            if (item.job_details is None) or (item.job_details.job_id.value
                                              == ''):
                continue

            info = job_types.JobInfo(
                job_id=job_types.JobId(item.job_details.job_id.value),
                job_priority=item.job_details.priority,
                job_state=item.job_details.state,
                job_status=item.job_details.status,
                name=item.job_details.job_name,
                payload_id=payload_types.PayloadId(
                    item.job_details.payload_id.value),
                pipeline_id=pipeline_types.PipelineId(
                    item.job_details.pipeline_id.value),
                date_created=self.get_timestamp(item.job_details.created),
                date_started=self.get_timestamp(item.job_details.started),
                date_stopped=self.get_timestamp(item.job_details.stopped),
                metadata=item.job_details.metadata)

            yield info