Пример #1
0
    def url(self):
        """
        The download URL for this Job's results.

        If `format` is not "download" or "email", `url` will be None.
        """
        if which_has(self._message.destination) not in ("download", "email"):
            return None
        return self.BUCKET_PREFIX.format(self.id)
Пример #2
0
def get_loader(output_destination: destinations_pb2.Destination):
    specific_destination = getattr(output_destination,
                                   which_has(output_destination))
    try:
        return LOADERS[type(specific_destination)]
    except KeyError:
        raise NotImplementedError(
            "Not possible to load results for output destination {}".format(
                type(specific_destination).__name__))
Пример #3
0
    def url(self):
        """
        The download URL for this Job's results.

        If `format` is not "download" or "email", `url` will be None.
        """
        destination = which_has(self._message.destination)
        if destination not in ("download", "email"):
            return None

        return getattr(self._message.destination, destination).result_url
Пример #4
0
def download(job: Job):
    response = requests.get(job.url)
    response.raise_for_status()
    # TODO error handling; likely the result has expired
    data = response.content

    message = job._message
    specific_format = getattr(message.format, which_has(message.format))

    if isinstance(specific_format, formats_pb2.Pyarrow):
        codec = response.headers["x-goog-meta-X-Arrow-Codec"]
        result_type = job.result_type
        marshalled = deserialize_pyarrow(data, codec)
        return unmarshal.unmarshal(result_type, marshalled)

    return data
Пример #5
0
    def result_to_file(self, file, timeout=None, progress_bar=None):
        """
        Save the result of the job to a file. This blocks until the job is
        complete.

        Only the "download" destination can be written to a file.
        For destinations like "catalog", where the data is handed off
        to another service, you'll need to use that service to retrieve it.
        (In the "catalog" case, that's `Raster` and `Metadata`.)

        Parameters
        ----------
        file: path or file-like object
            Path or file where results will be written
        timeout: int, optional
            The number of seconds to wait for the result.
        progress_bar: bool, optional
            Flag to draw the progress bar. Default is to ``True`` if in
            Jupyter Notebook.

        Example
        -------
        >>> from descarteslabs.workflows import Job, Int
        >>> job = Job(Int(1), {}, format="json") # doctest: +SKIP
        >>> job.result_to_file("one.json") # doctest: +SKIP

        >>> import io
        >>> from descarteslabs.workflows import Job, Int
        >>> job = Job(Int(2), {}, format="json") # doctest: +SKIP
        >>> bytestream = io.BytesIO() # doctest: +SKIP
        >>> job.result_to_file(bytestream) # doctest: +SKIP
        >>> print(bytestream.read()) # doctest: +SKIP
        b'2'
        """
        destination_name = which_has(self._message.destination)
        if destination_name not in ("download", "email"):
            raise NotImplementedError(
                "Not possible to automatically write results to a file for "
                "output destination {}. You'll need to load the data and write it "
                "out yourself.".format(destination_name)
            )

        if hasattr(file, "read"):
            close_file = False
        else:
            # assume it's a path
            file = open(os.path.expanduser(file), "wb")
            close_file = True

        try:
            self.wait(timeout=timeout, progress_bar=progress_bar)

            response = requests.get(self.url, stream=True)
            response.raise_for_status()
            # TODO error handling; likely the result has expired

            response.raw.decode_content = True
            shutil.copyfileobj(response.raw, file)
            # https://stackoverflow.com/a/13137873/10519953

        finally:
            if close_file:
                file.close()