Пример #1
0
def scheduleThumbnailJob(file,
                         attachToType,
                         attachToId,
                         user,
                         width=0,
                         height=0,
                         crop=True):
    """
    Schedule a local thumbnail creation job and return it.
    """
    job = Job().createLocalJob(title='Generate thumbnail for %s' %
                               file['name'],
                               user=user,
                               type='thumbnails.create',
                               public=False,
                               module='girder.plugins.thumbnails.worker',
                               kwargs={
                                   'fileId': str(file['_id']),
                                   'width': width,
                                   'height': height,
                                   'crop': crop,
                                   'attachToType': attachToType,
                                   'attachToId': str(attachToId)
                               })
    Job().scheduleJob(job)
    return job
    def testTaleImportBinderFail(self):
        with mock.patch("girder.plugins.wholetale.lib.pids_to_entities") as mock_pids:
            mock_pids.side_effect = ValueError
            resp = self.request(
                path="/tale/import",
                method="POST",
                user=self.user,
                params={
                    "url": "http://use.yt/upload/ef4cd901",
                    "spawn": False,
                    "imageId": self.image["_id"],
                    "asTale": True,
                    "taleKwargs": json.dumps({"title": "tale should fail"}),
                },
            )
            self.assertStatusOk(resp)
            tale = resp.json

            job = Job().findOne({"type": "wholetale.import_binder"})
            self.assertEqual(
                json.loads(job["kwargs"])["taleId"]["$oid"], tale["_id"]
            )

            for i in range(300):
                if job["status"] in {JobStatus.SUCCESS, JobStatus.ERROR}:
                    break
                time.sleep(0.1)
                job = Job().load(job["_id"], force=True)
            self.assertEqual(job["status"], JobStatus.ERROR)
            Job().remove(job)
        tale = Tale().load(tale["_id"], force=True)
        self.assertEqual(tale["status"], TaleStatus.ERROR)
        Tale().remove(tale)
Пример #3
0
    def testWorkerStatusEndpoint(self):
        # Create a job to be handled by the worker plugin
        from girder.plugins.jobs.models.job import Job
        job = Job().createJob(title='title',
                              type='foo',
                              handler='worker_handler',
                              user=self.admin,
                              public=False,
                              args=(),
                              kwargs={})

        job['kwargs'] = {
            'jobInfo':
            utils.jobInfoSpec(job),
            'inputs':
            [utils.girderInputSpec(self.adminFolder, resourceType='folder')],
            'outputs':
            [utils.girderOutputSpec(self.adminFolder, token=self.adminToken)]
        }
        job = Job().save(job)
        self.assertEqual(job['status'], JobStatus.INACTIVE)

        # Schedule the job
        with mock.patch('celery.Celery') as celeryMock:
            instance = celeryMock.return_value
            instance.send_task.return_value = FakeAsyncResult()

            Job().scheduleJob(job)

        # Call the worker status endpoint
        resp = self.request('/worker/status', method='GET', user=self.admin)
        self.assertStatusOk(resp)
        self.assertHasKeys(resp.json,
                           ['report', 'stats', 'ping', 'active', 'reserved'])
Пример #4
0
def schedule(event):
    """
    This is bound to the "jobs.schedule" event, and will be triggered any time
    a job is scheduled. This handler will process any job that has the
    handler field set to "worker_handler".
    """
    job = event.info
    if job['handler'] == 'worker_handler':
        task = job.get('celeryTaskName', 'girder_worker.run')

        # Set the job status to queued
        Job().updateJob(job, status=JobStatus.QUEUED)

        # Send the task to celery
        asyncResult = getCeleryApp().send_task(
            task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={
                'jobInfoSpec': jobInfoSpec(job, job.get('token', None)),
                'apiUrl': getWorkerApiUrl()
            })

        # Record the task ID from celery.
        Job().updateJob(job, otherFields={
            'celeryTaskId': asyncResult.task_id
        })

        # Stop event propagation since we have taken care of scheduling.
        event.stopPropagation()
Пример #5
0
 def createThumbnails(self, params):
     self.requireParams(['spec'], params)
     try:
         spec = json.loads(params['spec'])
         if not isinstance(spec, list):
             raise ValueError()
     except ValueError:
         raise RestException('The spec parameter must be a JSON list.')
     maxThumbnailFiles = int(Setting().get(
         constants.PluginSettings.LARGE_IMAGE_MAX_THUMBNAIL_FILES))
     if maxThumbnailFiles <= 0:
         raise RestException('Thumbnail files are not enabled.')
     jobKwargs = {'spec': spec}
     if params.get('logInterval') is not None:
         jobKwargs['logInterval'] = float(params['logInterval'])
     job = Job().createLocalJob(
         module='girder.plugins.large_image.rest.large_image',
         function='createThumbnailsJob',
         kwargs=jobKwargs,
         title='Create large image thumbnail files.',
         type='large_image_create_thumbnails',
         user=self.getCurrentUser(),
         public=True,
         async=True,
     )
     Job().scheduleJob(job)
     return job
    def testCopyWorkspaceFail(self):
        tale = Tale().createTale(
            self.image,
            [],
            creator=self.admin,
            title="tale one",
            public=True,
            config={"memLimit": "2g"},
        )

        job = Job().createLocalJob(
            title='Copy "{title}" workspace'.format(**tale),
            user=self.user,
            type="wholetale.copy_workspace",
            public=False,
            async=True,
            module="girder.plugins.wholetale.tasks.copy_workspace",
            args=(tale["workspaceId"], "non_existing"),
            kwargs={"user": self.user, "tale": tale},
        )
        Job().scheduleJob(job)
        for i in range(300):
            if job["status"] in {JobStatus.SUCCESS, JobStatus.ERROR}:
                break
            time.sleep(0.1)
            job = Job().load(job["_id"], force=True)
        self.assertEqual(job["status"], JobStatus.ERROR)
        Job().remove(job)
        tale = Tale().load(tale["_id"], force=True)
        self.assertEqual(tale["status"], TaleStatus.ERROR)
        Tale().remove(tale)
    def testBuildFail(self):
        from girder.plugins.jobs.models.job import Job
        resp = self.request(path='/instance',
                            method='POST',
                            user=self.user,
                            params={
                                'taleId': str(self.tale_one['_id']),
                                'name': 'tale that will fail',
                                'spawn': False
                            })
        self.assertStatusOk(resp)
        instance = resp.json

        job = Job().createJob(title='Fake build job',
                              type='celery',
                              handler='worker_handler',
                              user=self.user,
                              public=False,
                              args=[str(self.tale_one['_id']), False],
                              kwargs={},
                              otherFields={
                                  'wt_notification_id': 'nonexisting',
                                  'instance_id': instance['_id']
                              })
        job = Job().save(job)
        self.assertEqual(job['status'], JobStatus.INACTIVE)
        Job().updateJob(job, log='job queued', status=JobStatus.QUEUED)
        Job().updateJob(job, log='job running', status=JobStatus.RUNNING)
        Job().updateJob(job, log='job failed', status=JobStatus.ERROR)
        instance = Instance().load(instance['_id'], force=True)
        self.assertEqual(instance['status'], InstanceStatus.ERROR)
        Instance().remove(instance)
Пример #8
0
 def _waitForJobToBeRunning(self, job):
     from girder.plugins.jobs.constants import JobStatus
     from girder.plugins.jobs.models.job import Job
     job = Job().load(id=job['_id'], force=True)
     while job['status'] != JobStatus.RUNNING:
         time.sleep(0.01)
         job = Job().load(id=job['_id'], force=True)
     return job
Пример #9
0
    def _postTileViaHttp(self, itemId, fileId, jobAction=None):
        """
        When we know we need to process a job, we have to use an actual http
        request rather than the normal simulated request to cherrypy.  This is
        required because cherrypy needs to know how it was reached so that
        girder_worker can reach it when done.

        :param itemId: the id of the item with the file to process.
        :param fileId: the id of the file that should be processed.
        :param jobAction: if 'delete', delete the job immediately.
        :returns: metadata from the tile if the conversion was successful,
                  False if it converted but didn't result in useable tiles, and
                  None if it failed.
        """
        from girder.plugins.jobs.models.job import Job

        headers = [('Accept', 'application/json')]
        self._buildHeaders(headers, None, self.admin, None, None, None)
        headers = {header[0]: header[1] for header in headers}
        req = requests.post('http://127.0.0.1:%d/api/v1/item/%s/tiles' %
                            (int(os.environ['GIRDER_PORT']), itemId),
                            headers=headers,
                            data={'fileId': fileId})
        self.assertEqual(req.status_code, 200)
        # If we ask to create the item again right away, we should be told that
        # either there is already a job running or the item has already been
        # added
        req = requests.post('http://127.0.0.1:%d/api/v1/item/%s/tiles' %
                            (int(os.environ['GIRDER_PORT']), itemId),
                            headers=headers,
                            data={'fileId': fileId})
        self.assertEqual(req.status_code, 400)
        self.assertTrue('Item already has' in req.json()['message']
                        or 'Item is scheduled' in req.json()['message'])

        if jobAction == 'delete':
            Job().remove(Job().find({}, sort=[('_id', SortDir.DESCENDING)])[0])

        starttime = time.time()
        resp = None
        while time.time() - starttime < 30:
            try:
                resp = self.request(path='/item/%s/tiles' % itemId,
                                    user=self.admin)
                self.assertStatusOk(resp)
                break
            except AssertionError as exc:
                if 'didn\'t meet requirements' in exc.args[0]:
                    return False
                if 'No large image file' in exc.args[0]:
                    return None
                self.assertIn('is still pending creation', exc.args[0])
            time.sleep(0.1)
        self.assertStatusOk(resp)
        return resp.json
Пример #10
0
    def testTaleCopy(self):
        from girder.plugins.wholetale.models.tale import Tale
        from girder.plugins.wholetale.constants import TaleStatus
        from girder.plugins.jobs.models.job import Job
        from girder.plugins.jobs.constants import JobStatus
        tale = Tale().createTale(self.image, [],
                                 creator=self.admin,
                                 public=True)
        workspace = Tale().createWorkspace(tale)
        # Below workarounds a bug, it will be addressed elsewhere.
        workspace = Folder().setPublic(workspace, True, save=True)

        adapter = assetstore_utilities.getAssetstoreAdapter(self.ws_assetstore)
        size = 101
        data = BytesIO(b' ' * size)
        files = []
        files.append(Upload().uploadFromFile(data,
                                             size,
                                             'file01.txt',
                                             parentType='folder',
                                             parent=workspace,
                                             assetstore=self.ws_assetstore))
        fullPath = adapter.fullPath(files[0])

        # Create a copy
        resp = self.request(path='/tale/{_id}/copy'.format(**tale),
                            method='POST',
                            user=self.user)
        self.assertStatusOk(resp)

        new_tale = resp.json
        self.assertFalse(new_tale['public'])
        self.assertEqual(new_tale['dataSet'], tale['dataSet'])
        self.assertEqual(new_tale['copyOfTale'], str(tale['_id']))
        self.assertEqual(new_tale['imageId'], str(tale['imageId']))
        self.assertEqual(new_tale['creatorId'], str(self.user['_id']))
        self.assertEqual(new_tale['status'], TaleStatus.PREPARING)

        copied_file_path = re.sub(workspace['name'], new_tale['_id'], fullPath)
        job = Job().findOne({'type': 'wholetale.copy_workspace'})
        for i in range(10):
            job = Job().load(job['_id'], force=True)
            if job['status'] == JobStatus.SUCCESS:
                break
            time.sleep(0.1)
        self.assertTrue(os.path.isfile(copied_file_path))
        resp = self.request(path='/tale/{_id}'.format(**new_tale),
                            method='GET',
                            user=self.user)
        self.assertStatusOk(resp)
        new_tale = resp.json
        self.assertEqual(new_tale['status'], TaleStatus.READY)

        Tale().remove(new_tale)
        Tale().remove(tale)
Пример #11
0
    def testLocalJob(self):
        # Make sure local jobs still work
        from girder.plugins.jobs.models.job import Job
        job = Job().createLocalJob(title='local',
                                   type='local',
                                   user=self.users[0],
                                   module='plugin_tests.worker_test',
                                   function='local_job')

        Job().scheduleJob(job)

        job = Job().load(job['_id'], force=True, includeLog=True)
        self.assertIn('job ran', job['log'])
Пример #12
0
    def delete(self, item, skipFileIds=None):
        deleted = False
        if 'largeImage' in item:
            job = None
            if 'jobId' in item['largeImage']:
                try:
                    job = Job().load(item['largeImage']['jobId'],
                                     force=True,
                                     exc=True)
                except ValidationException:
                    # The job has been deleted, but we still need to clean up
                    # the rest of the tile information
                    pass
            if (item['largeImage'].get('expected') and job
                    and job.get('status')
                    in (JobStatus.QUEUED, JobStatus.RUNNING)):
                # cannot cleanly remove the large image, since a conversion
                # job is currently in progress
                # TODO: cancel the job
                # TODO: return a failure error code
                return False

            # If this file was created by the worker job, delete it
            if 'jobId' in item['largeImage']:
                if job:
                    # TODO: does this eliminate all traces of the job?
                    # TODO: do we want to remove the original job?
                    Job().remove(job)
                del item['largeImage']['jobId']

            if 'originalId' in item['largeImage']:
                # The large image file should not be the original file
                assert item['largeImage']['originalId'] != \
                    item['largeImage'].get('fileId')

                if ('fileId' in item['largeImage'] and
                    (not skipFileIds
                     or item['largeImage']['fileId'] not in skipFileIds)):
                    file = File().load(id=item['largeImage']['fileId'],
                                       force=True)
                    if file:
                        File().remove(file)
                del item['largeImage']['originalId']

            del item['largeImage']

            item = self.save(item)
            deleted = True
        self.removeThumbnailFiles(item)
        return deleted
Пример #13
0
 def copyTale(self, tale):
     user = self.getCurrentUser()
     image = self.model('image', 'wholetale').load(tale['imageId'],
                                                   user=user,
                                                   level=AccessType.READ,
                                                   exc=True)
     default_author = ' '.join((user['firstName'], user['lastName']))
     new_tale = self._model.createTale(
         image,
         tale['dataSet'],
         creator=user,
         save=True,
         title=tale.get('title'),
         description=tale.get('description'),
         public=False,
         config=tale.get('config'),
         icon=image.get('icon', ('https://raw.githubusercontent.com/'
                                 'whole-tale/dashboard/master/public/'
                                 'images/whole_tale_logo.png')),
         illustration=tale.get('illustration',
                               ('https://raw.githubusercontent.com/'
                                'whole-tale/dashboard/master/public/'
                                'images/demo-graph2.jpg')),
         authors=tale.get('authors', default_author),
         category=tale.get('category', 'science'),
         narrative=tale.get('narrative'),
         licenseSPDX=tale.get('licenseSPDX'),
         status=TaleStatus.PREPARING,
         relatedIdentifiers=tale.get('relatedIdentifiers'),
     )
     new_tale['copyOfTale'] = tale['_id']
     new_tale = self._model.save(new_tale)
     # asynchronously copy the workspace of a source Tale
     tale_workspaceId = self._model.createWorkspace(tale)['_id']
     new_tale_workspaceId = self._model.createWorkspace(new_tale)['_id']
     job = Job().createLocalJob(
         title='Copy "{title}" workspace'.format(**tale),
         user=user,
         type='wholetale.copy_workspace',
         public=False,
         async=True,
         module='girder.plugins.wholetale.tasks.copy_workspace',
         args=(tale_workspaceId, new_tale_workspaceId),
         kwargs={
             'user': user,
             'tale': new_tale
         })
     Job().scheduleJob(job)
     return new_tale
Пример #14
0
def cancel(event):
    """
    This is bound to the "jobs.cancel" event, and will be triggered any time
    a job is canceled. This handler will process any job that has the
    handler field set to "worker_handler".
    """
    job = event.info
    if job['handler'] in ['worker_handler', 'celery_handler']:
        # Stop event propagation and prevent default, we are using a custom state
        event.stopPropagation().preventDefault()

        celeryTaskId = job.get('celeryTaskId')

        if celeryTaskId is None:
            msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id."
                   % job['_id'])
            logger.warn(msg)
            return

        if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED,
                                 JobStatus.SUCCESS, JobStatus.ERROR]:
            # Set the job status to canceling
            Job().updateJob(job, status=CustomJobStatus.CANCELING)

            # Send the revoke request.
            asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp())
            asyncResult.revoke()
Пример #15
0
    def _createThumbnails(self, spec, cancel=False):
        from girder.plugins.jobs.constants import JobStatus
        from girder.plugins.jobs.models.job import Job

        params = {'spec': json.dumps(spec)}
        if cancel:
            params['logInterval'] = 0
        resp = self.request(method='PUT',
                            path='/large_image/thumbnails',
                            user=self.admin,
                            params=params)
        self.assertStatusOk(resp)
        job = resp.json
        if cancel:
            job = self._waitForJobToBeRunning(job)
            job = Job().cancelJob(job)

        starttime = time.time()
        while True:
            self.assertTrue(time.time() - starttime < 30)
            resp = self.request('/job/%s' % str(job['_id']))
            self.assertStatusOk(resp)
            if resp.json.get('status') == JobStatus.SUCCESS:
                return True
            if resp.json.get('status') == JobStatus.ERROR:
                return False
            if resp.json.get('status') == JobStatus.CANCELED:
                return 'canceled'
            time.sleep(0.1)
Пример #16
0
    def startRun(self, run, entrypoint):
        user = self.getCurrentUser()

        if not entrypoint:
            entrypoint = "run.sh"

        runRoot = Folder().load(run['parentId'],
                                user=user,
                                level=AccessType.WRITE)
        tale = Tale().load(runRoot['meta']['taleId'],
                           user=user,
                           level=AccessType.READ)

        resource = {
            'type': 'wt_recorded_run',
            'tale_id': tale['_id'],
            'tale_title': tale['title']
        }

        token = Token().createToken(user=user, days=0.5)

        notification = init_progress(resource, user, 'Recorded run',
                                     'Initializing', RECORDED_RUN_STEP_TOTAL)

        rrTask = recorded_run.signature(
            args=[str(run['_id']),
                  str(tale['_id']), entrypoint],
            girder_job_other_fields={
                'wt_notification_id': str(notification['_id']),
            },
            girder_client_token=str(token['_id']),
        ).apply_async()

        return Job().filter(rrTask.job, user=user)
Пример #17
0
    def _createHistogramJob(self, **kwargs):
        from girder.plugins.jobs.models.job import Job
        from girder.plugins.jobs.constants import JobStatus
        from girder.plugins.histogram.models.histogram import Histogram

        file, item = self._uploadFile('plugins/large_image/plugin_tests/test_files/test_L_8.png')

        token = Token().createToken(self.admin)

        doc = Histogram().createHistogramJob(item, file, user=self.admin,
                                             token=token, **kwargs)

        complete = (JobStatus.SUCCESS, JobStatus.ERROR, JobStatus.CANCELED)
        starttime = time.time()
        while True:
            self.assertTrue(time.time() - starttime < 30)
            job = Job().load(doc['_id'], user=self.admin, exc=True)
            if job.get('status') in complete:
                break
            time.sleep(0.1)
        if job.get('log'):
            print(job.get('log'))
        assert job.get('status') == JobStatus.SUCCESS

        return ObjectId(job['_id'])
Пример #18
0
def jobInfoSpec(job, token=None, logPrint=True):
    """
    Build the jobInfo specification for a task to write status and log output
    back to a Girder job.

    :param job: The job document representing the worker task.
    :type job: dict
    :param token: The token to use. Creates a job token if not passed.
    :type token: str or dict
    :param logPrint: Whether standard output from the job should be
    """
    if token is None:
        token = Job().createJobToken(job)

    if isinstance(token, dict):
        token = token['_id']

    return {
        'method': 'PUT',
        'url': '/'.join((getWorkerApiUrl(), 'job', str(job['_id']))),
        'reference': str(job['_id']),
        'headers': {
            'Girder-Token': token
        },
        'logPrint': logPrint
    }
Пример #19
0
def attachParentJob(event):
    """Attach parentJob before a model is saved."""
    job = event.info
    if job.get('celeryParentTaskId'):
        celeryParentTaskId = job['celeryParentTaskId']
        parentJob = Job().findOne({'celeryTaskId': celeryParentTaskId})
        event.info['parentId'] = parentJob['_id']
def run(job):
    jobModel = Job()
    jobModel.updateJob(job, status=JobStatus.RUNNING)

    src_workspace_id, dest_workspace_id = job["args"]
    user = job["kwargs"]["user"]
    tale = job["kwargs"]["tale"]

    try:
        parent = Folder().load(src_workspace_id,
                               user=user,
                               exc=True,
                               level=AccessType.READ)
        workspace = Folder().load(dest_workspace_id, user=user, exc=True)
        Folder().copyFolderComponents(parent, workspace, user, None)
        tale["status"] = TaleStatus.READY
        Tale().updateTale(tale)
        jobModel.updateJob(job,
                           status=JobStatus.SUCCESS,
                           log="Copying finished")
    except Exception:
        tale["status"] = TaleStatus.ERROR
        Tale().updateTale(tale)
        t, val, tb = sys.exc_info()
        log = "%s: %s\n%s" % (t.__name__, repr(val), traceback.extract_tb(tb))
        jobModel.updateJob(job, status=JobStatus.ERROR, log=log)
        raise
Пример #21
0
def _onUpload(event):
    """
    Look at uploads containing references related to this plugin. If found,
    they are used to link item task outputs back to a job document.
    """
    try:
        ref = json.loads(event.info.get('reference', ''))
    except ValueError:
        return

    if isinstance(ref, dict) and ref.get('type') == 'item_tasks.output':
        jobModel = Job()
        tokenModel = Token()
        token = event.info['currentToken']

        if tokenModel.hasScope(token, 'item_tasks.job_write:%s' % ref['jobId']):
            job = jobModel.load(ref['jobId'], force=True, exc=True)
        else:
            job = jobModel.load(
                ref['jobId'], level=AccessType.WRITE, user=event.info['currentUser'], exc=True)

        file = event.info['file']
        item = Item().load(file['itemId'], force=True)

        # Add link to job model to the output item
        jobModel.updateJob(job, otherFields={
            'itemTaskBindings.outputs.%s.itemId' % ref['id']: item['_id']
        })

        # Also a link in the item to the job that created it
        item['createdByJob'] = job['_id']
        Item().save(item)
Пример #22
0
def runSlicerCliTasksDescriptionForFolder(self, folder, image, args, pullImage,
                                          params):
    jobModel = Job()
    token = Token().createToken(days=3,
                                scope='item_task.set_task_spec.%s' %
                                folder['_id'],
                                user=self.getCurrentUser())
    job = jobModel.createJob(title='Read docker task specs: %s' % image,
                             type='folder.item_task_slicer_cli_description',
                             handler='worker_handler',
                             user=self.getCurrentUser())

    if args[-1:] == ['--xml']:
        args = args[:-1]

    jobOptions = {
        'itemTaskId': folder['_id'],
        'kwargs': {
            'task': {
                'mode': 'docker',
                'docker_image': image,
                'container_args': args + ['--xml'],
                'pull_image': pullImage,
                'outputs': [{
                    'id': '_stdout',
                    'format': 'text'
                }],
            },
            'outputs': {
                '_stdout': {
                    'mode':
                    'http',
                    'method':
                    'POST',
                    'format':
                    'text',
                    'url':
                    '/'.join((utils.getWorkerApiUrl(), 'folder',
                              str(folder['_id']), 'item_task_slicer_cli_xml')),
                    'headers': {
                        'Girder-Token': token['_id']
                    },
                    'params': {
                        'image': image,
                        'args': json.dumps(args),
                        'pullImage': pullImage
                    }
                }
            },
            'jobInfo': utils.jobInfoSpec(job),
            'validate': False,
            'auto_convert': False
        }
    }
    job.update(jobOptions)

    job = jobModel.save(job)
    jobModel.scheduleJob(job)
    return job
    def testTaleImportZip(self):
        image = self.model("image", "wholetale").createImage(
            name="Jupyter Classic",
            creator=self.user,
            public=True,
            config=dict(
                template="base.tpl",
                buildpack="PythonBuildPack",
                user="******",
                port=8888,
                urlPath="",
            ),
        )
        with mock.patch("fs.copy.copy_fs") as mock_copy:
            with open(
                os.path.join(DATA_PATH, "5c92fbd472a9910001fbff72.zip"), "rb"
            ) as fp:
                resp = self.request(
                    path="/tale/import",
                    method="POST",
                    user=self.user,
                    type="application/zip",
                    body=fp.read(),
                )

            self.assertStatusOk(resp)
            tale = resp.json

            from girder.plugins.jobs.models.job import Job

            job = Job().findOne({"type": "wholetale.import_tale"})
            self.assertEqual(
                json.loads(job["kwargs"])["taleId"]["$oid"], tale["_id"]
            )
            for i in range(300):
                if job["status"] in {JobStatus.SUCCESS, JobStatus.ERROR}:
                    break
                time.sleep(0.1)
                job = Job().load(job["_id"], force=True)
            self.assertEqual(job["status"], JobStatus.SUCCESS)
        mock_copy.assert_called_once()
        # TODO: make it more extensive...
        self.assertTrue(
            self.model("tale", "wholetale").findOne({"title": "Water Tale"}) is not None
        )
        self.model("image", "wholetale").remove(image)
Пример #24
0
def load(info):
    events.bind('jobs.schedule', 'worker', schedule)
    events.bind('jobs.status.validate', 'worker', validateJobStatus)
    events.bind('jobs.status.validTransitions', 'worker', validTransitions)
    events.bind('jobs.cancel', 'worker', cancel)
    events.bind('model.job.save.after', 'worker', attachJobInfoSpec)
    events.bind('model.job.save', 'worker', attachParentJob)
    Job().exposeFields(AccessType.SITE_ADMIN, {'celeryTaskId', 'celeryQueue'})
Пример #25
0
def _updateJob(event):
    """
    Called when a job is saved, updated, or removed.  If this is a large image
    job and it is ended, clean up after it.
    """
    from girder.plugins.jobs.constants import JobStatus
    from girder.plugins.jobs.models.job import Job

    job = event.info[
        'job'] if event.name == 'jobs.job.update.after' else event.info
    meta = job.get('meta', {})
    if (meta.get('creator') != 'large_image' or not meta.get('itemId')
            or meta.get('task') != 'createImageItem'):
        return
    status = job['status']
    if event.name == 'model.job.remove' and status not in (JobStatus.ERROR,
                                                           JobStatus.CANCELED,
                                                           JobStatus.SUCCESS):
        status = JobStatus.CANCELED
    if status not in (JobStatus.ERROR, JobStatus.CANCELED, JobStatus.SUCCESS):
        return
    item = Item().load(meta['itemId'], force=True)
    if not item or 'largeImage' not in item:
        return
    if item.get('largeImage', {}).get('expected'):
        # We can get a SUCCESS message before we get the upload message, so
        # don't clear the expected status on success.
        if status != JobStatus.SUCCESS:
            del item['largeImage']['expected']
    notify = item.get('largeImage', {}).get('notify')
    msg = None
    if notify:
        del item['largeImage']['notify']
        if status == JobStatus.SUCCESS:
            msg = 'Large image created'
        elif status == JobStatus.CANCELED:
            msg = 'Large image creation canceled'
        else:  # ERROR
            msg = 'FAILED: Large image creation failed'
        msg += ' for item %s' % item['name']
    if (status in (JobStatus.ERROR, JobStatus.CANCELED)
            and 'largeImage' in item):
        del item['largeImage']
    Item().save(item)
    if msg and event.name != 'model.job.remove':
        Job().updateJob(job, progressMessage=msg)
    if notify:
        Notification().createNotification(
            type='large_image.finished_image_item',
            data={
                'job_id': job['_id'],
                'item_id': item['_id'],
                'success': status == JobStatus.SUCCESS,
                'status': status
            },
            user={'_id': job.get('userId')},
            expires=datetime.datetime.utcnow() +
            datetime.timedelta(seconds=30))
Пример #26
0
 def deleteIncompleteTiles(self, params):
     result = {'removed': 0}
     while True:
         item = Item().findOne({'largeImage.expected': True})
         if not item:
             break
         job = Job().load(item['largeImage']['jobId'], force=True)
         if job and job.get('status') in (
                 JobStatus.QUEUED, JobStatus.RUNNING):
             job = Job().cancelJob(job)
         if job and job.get('status') in (
                 JobStatus.QUEUED, JobStatus.RUNNING):
             result['message'] = ('The job for item %s could not be '
                                  'canceled' % (str(item['_id'])))
             break
         ImageItem().delete(item)
         result['removed'] += 1
     return result
Пример #27
0
    def executeTask(self, item, jobTitle, includeJobInfo, inputs, outputs):
        user = self.getCurrentUser()
        if jobTitle is None:
            jobTitle = item['name']
        task, handler = self._validateTask(item)

        jobModel = Job()
        job = jobModel.createJob(title=jobTitle,
                                 type='item_task',
                                 handler=handler,
                                 user=user)

        # If this is a user auth token, we make an IO-enabled token
        token = self.getCurrentToken()
        tokenModel = Token()
        if tokenModel.hasScope(token, TokenScope.USER_AUTH):
            token = tokenModel.createToken(user=user,
                                           days=7,
                                           scope=(TokenScope.DATA_READ,
                                                  TokenScope.DATA_WRITE))
            job['itemTaskTempToken'] = token['_id']

        token = tokenModel.addScope(token,
                                    'item_tasks.job_write:%s' % job['_id'])

        job.update({
            'itemTaskId': item['_id'],
            'itemTaskBindings': {
                'inputs': inputs,
                'outputs': outputs
            },
            'kwargs': {
                'task':
                task,
                'inputs':
                self._transformInputs(inputs, token),
                'outputs':
                self._transformOutputs(outputs, token, job, task, item['_id']),
                'validate':
                False,
                'auto_convert':
                False,
                'cleanup':
                True
            }
        })

        if includeJobInfo:
            job['kwargs']['jobInfo'] = utils.jobInfoSpec(job)

        if 'itemTaskCeleryQueue' in item.get('meta', {}):
            job['celeryQueue'] = item['meta']['itemTaskCeleryQueue']

        job = jobModel.save(job)
        jobModel.scheduleJob(job)

        return job
Пример #28
0
    def setUp(self):
        base.TestCase.setUp(self)

        self.users = [User().createUser(
            'usr' + str(n), 'passwd', 'tst', 'usr', '*****@*****.**' % n)
            for n in range(3)]

        from girder.plugins.jobs.models.job import Job
        self.jobModel = Job()
    def testTaleImportZipFail(self):
        image = Image().createImage(
            name="Jupyter Classic",
            creator=self.user,
            public=True,
            config=dict(
                template="base.tpl",
                buildpack="PythonBuildPack",
                user="******",
                port=8888,
                urlPath="",
            ),
        )
        with mock.patch("girder.plugins.wholetale.lib.pids_to_entities") as mock_pids:
            mock_pids.side_effect = ValueError
            with open(
                os.path.join(DATA_PATH, "5c92fbd472a9910001fbff72.zip"), "rb"
            ) as fp:
                resp = self.request(
                    path="/tale/import",
                    method="POST",
                    user=self.user,
                    type="application/zip",
                    body=fp.read(),
                )

            self.assertStatusOk(resp)
            tale = resp.json

            job = Job().findOne({"type": "wholetale.import_tale"})
            self.assertEqual(
                json.loads(job["kwargs"])["taleId"]["$oid"], tale["_id"]
            )
            for i in range(300):
                if job["status"] in {JobStatus.SUCCESS, JobStatus.ERROR}:
                    break
                time.sleep(0.1)
                job = Job().load(job["_id"], force=True)
            self.assertEqual(job["status"], JobStatus.ERROR)
            Job().remove(job)
        tale = Tale().load(tale["_id"], force=True)
        self.assertEqual(tale["status"], TaleStatus.ERROR)
        Tale().remove(tale)
        Image().remove(image)
Пример #30
0
def setMultiscaleMetaData(jobId, inputFolderId, outputFolderId):
    """Set the multiscale meta data for the jobId.

    Currently, we use this to keep track of the input and output
    folders.

    Returns the updated job.
    """
    # We want to update the job with some multiscale settings.
    # We will put it in the meta data.
    job = Job().findOne({'_id': jobId})
    multiscale_io = {
        'meta': {
            'multiscale_settings': {
                'inputFolderId': inputFolderId,
                'outputFolderId': outputFolderId
            }
        }
    }

    return Job().updateJob(job, otherFields=multiscale_io)