Exemplo n.º 1
0
 def test_as_completed(self):
     tasks = [FutureTask("group_id", str(n), client=self.client) for n in range(5)]
     response1 = [{'id': str(n), 'result_type': 'json'} for n in range(3)]
     response2 = [{'id': str(n), 'result_type': 'json'} for n in range(3, 5)]
     self.mock_response(responses.POST, {'results': response1})
     self.mock_response(responses.POST, {'results': response2})
     completed_tasks = list(as_completed(tasks, show_progress=False))
     self.assertEqual(5, len(completed_tasks))
     self.assertEqual(list(range(5)), [int(r._task_result['id']) for r in completed_tasks])
Exemplo n.º 2
0
    def test_as_completed(self):
        tasks = [
            FutureTask("group_id", str(n), client=self.client)
            for n in range(5)
        ]

        response1 = [{"id": str(n), "result_type": "json"} for n in range(3)]
        response2 = [{
            "id": str(n),
            "result_type": "json"
        } for n in range(3, 5)]

        self.mock_response(
            responses.GET,
            {
                "id": "foo",
                "queue": {
                    "pending": 3,
                    "successes": 0,
                    "failures": 0
                },
                "status": "running",
            },
        )
        self.mock_response(
            responses.GET,
            {
                "id": "foo",
                "queue": {
                    "pending": 3,
                    "successes": 0,
                    "failures": 0
                },
                "status": "running",
            },
        )

        self.mock_response(responses.POST, {"results": response1})
        self.mock_response(responses.POST, {"results": response2})

        completed_tasks = list(as_completed(tasks, show_progress=False))

        assert 5 == len(completed_tasks)
        assert list(
            range(5)) == [int(r._task_result["id"]) for r in completed_tasks]
Exemplo n.º 3
0
    def test_as_completed_exception(self):
        tasks = [
            FutureTask("group_id", str(n), client=self.client)
            for n in range(5)
        ]

        response1 = [{"id": str(n), "result_type": "json"} for n in range(3)]
        response2 = [{
            "id": str(n),
            "result_type": "json"
        } for n in range(3, 5)]

        self.mock_response(
            responses.GET,
            {
                "id": "foo",
                "queue": {
                    "pending": 3,
                    "successes": 0,
                    "failures": 0
                },
                "status": "running",
            },
        )
        self.mock_response(
            responses.GET,
            {
                "id": "foo",
                "queue": {
                    "pending": 3,
                    "successes": 0,
                    "failures": 0
                },
                "status": "terminated",
            },
        )

        self.mock_response(responses.POST, {"results": response1})
        self.mock_response(responses.POST, {"results": response2})

        with pytest.raises(GroupTerminalException):
            list(as_completed(tasks, show_progress=False))
Exemplo n.º 4
0
Arquivo: job.py Projeto: wri/dl_jobs
 def _run_platform_tasks(self, async_func):
     self.tasks = async_func.map(self.args_list)
     nb_tasks = len(self.tasks)
     sample_ids = [t.tuid for t in self.tasks[:4]]
     if nb_tasks > 4:
         elps = ['...']
     else:
         elps = []
     self._print("group_id: {}".format(self.tasks[0].guid))
     self._print("nb_tasks: {}".format(nb_tasks))
     self._print("task_ids: {}".format(sample_ids + elps))
     self._response_divider(True, False)
     for task in as_completed(self.tasks):
         if self.noisy: utils.vspace(1)
         if task.is_success:
             self._print_result(task.result)
         else:
             utils.line("*")
             self._print(task.exception, plain_text=True)
             self._print(task.log, plain_text=True)
             utils.line("*")
Exemplo n.º 5
0
    def test_as_completed_exception(self):
        tasks = [
            FutureTask("group_id", str(n), client=self.client)
            for n in range(5)
        ]

        response1 = [{'id': str(n), 'result_type': 'json'} for n in range(3)]
        response2 = [{
            'id': str(n),
            'result_type': 'json'
        } for n in range(3, 5)]

        self.mock_response(
            responses.GET, {
                'id': 'foo',
                'queue': {
                    'pending': 3,
                    'successes': 0,
                    'failures': 0
                },
                'status': 'running'
            })
        self.mock_response(
            responses.GET, {
                'id': 'foo',
                'queue': {
                    'pending': 3,
                    'successes': 0,
                    'failures': 0
                },
                'status': 'terminated'
            })

        self.mock_response(responses.POST, {'results': response1})
        self.mock_response(responses.POST, {'results': response2})

        with self.assertRaises(GroupTerminalException):
            list(as_completed(tasks, show_progress=False))
Exemplo n.º 6
0
from descarteslabs.client.services.tasks import Tasks, as_completed

at = Tasks()


def f(x, option=None):
    return "Hello World {} {}".format(x, option)


async_func = at.create_function(
    f,
    name='hello-world',
    image="us.gcr.io/dl-ci-cd/images/tasks/public/py2/default:v2018.07.25",
)

task1 = async_func(1)
task2 = async_func(2, option='hi')
print(task1.result)
print(task2.result)

tasks = async_func.map(range(100))
for task in as_completed(tasks):
    print(task.result)
Exemplo n.º 7
0
def deploy(model_version, output_product):
    deploy_aoi = {
        "type":
        "Polygon",
        "coordinates": [[
            [-99.24164417538321, 26.138411465362807],
            [-93.37666136803256, 26.138411465362807],
            [-93.37666136803256, 31.060649553995205],
            [-99.24164417538321, 31.060649553995205],
            [-99.24164417538321, 26.138411465362807],
        ]],
    }

    # Make sure the output product exists
    try:
        dl.Catalog().get_product(output_product)
    except dl.client.exceptions.NotFoundError:
        print("Output product {} does not exist".format(output_product))
        return

    # Decompose our AOI into 1024x1024 pixel tiles at 90m resolution in UTM
    tiles = dl.scenes.DLTile.from_shape(deploy_aoi,
                                        resolution=90.0,
                                        tilesize=1024,
                                        pad=0)

    # Register our prediction function in the Tasks environment.
    #
    # We specify the resource requirements per worker (1 CPU & 2GB of RAM),
    # the environment (container with Python 3.7), and any extra PyPI
    # requirements (descarteslabs client and scikit-learn).
    tasks = dl.Tasks()
    run_model_remotely = tasks.create_function(
        run_model,
        name="example water model deployment",
        image=
        "us.gcr.io/dl-ci-cd/images/tasks/public/py3.7/default:v2019.05.29",
        cpu=1.0,
        memory="2Gi",
        requirements=[
            "descarteslabs[complete]==0.19.0", "scikit-learn==0.21.1"
        ],
    )

    # Create a list with arguments of each invocation of run_model
    task_arguments = [(model_version, dltile.key, output_product)
                      for dltile in tiles]

    results = run_model_remotely.map(*zip(*task_arguments))
    print("Submitted {} tasks to task group {}...".format(
        len(tiles), run_model_remotely.group_id))

    # Iterate through task results as they complete.
    #
    # If some of the tasks failed, we will print the console output and the
    # arguments of that invocation.
    #
    # Note that this is for informational purposes only, and the tasks will
    # continue running if the script is interrupted at this point. You can use
    # https://monitor.descarteslabs.com/ to see the status of all running
    # task groups.
    for i, task in enumerate(as_completed(results, show_progress=False)):
        percent_complete = 100.0 * i / len(results)
        print(
            "Progress update: {} completed out of {} ({:.2f}%) - last task took {:.2f}sec to {}"
            .format(
                i + 1,
                len(results),
                percent_complete,
                task.runtime,
                "succeed" if task.is_success else "fail",
            ))

        if not task.is_success:
            print("\nTASK FAILURE with arguments {}:\n{}".format(
                task.args, task.log.decode()))

    # Clean up the task group
    tasks.delete_group_by_id(run_model_remotely.group_id)
Exemplo n.º 8
0
from descarteslabs.client.services.tasks import Tasks, as_completed


def f():
    import tensorflow as tf
    hello = tf.constant('Hello, TensorFlow!')

    # Start tf session
    sess = tf.Session()

    # Run the op
    return sess.run(hello)


at = Tasks()

async_function = at.create_function(
    f,
    name="hello-tensorflow",
    image="us.gcr.io/dl-ci-cd/images/tasks/public/py2/default:v2018.07.25",
)

for task in as_completed([async_function()]):
    print(task.result)