def _loadMetadata(self):
        fields = [key.split('_', 1)[1].lower() for key in
                  dir(libtiff_ctypes.tiff_h) if key.startswith('TIFFTAG_')]
        info = {}
        for field in fields:
            try:
                value = self._tiffFile.GetField(field)
                if value is not None:
                    info[field] = value
            except TypeError as err:
                logger.debug('Loading field "%s" in directory number %d '
                             'resulted in TypeError - "%s"',
                             field, self._directoryNum, err)

        for func in self.CoreFunctions[2:]:
            if hasattr(self._tiffFile, func):
                value = getattr(self._tiffFile, func)()
                if value:
                    info[func.lower()] = value
        self._tiffInfo = info
        self._tileWidth = info.get('tilewidth')
        self._tileHeight = info.get('tilelength')
        self._imageWidth = info.get('imagewidth')
        self._imageHeight = info.get('imagelength')
        self.parse_image_description(info.get('imagedescription', ''))
    def __init__(self, filePath, directoryNum):
        """
        Create a new reader for a tiled image file directory in a TIFF file.

        :param filePath: A path to a TIFF file on disk.
        :type filePath: str
        :param directoryNum: The number of the TIFF image file directory to
        open.
        :type directoryNum: int
        :raises: InvalidOperationTiffException or IOTiffException or
        ValidationTiffException
        """
        # TODO how many to keep in the cache
        # create local cache to store Jpeg tables and
        # getTileByteCountsType

        self.cache = LRUCache(10)

        self._tiffFile = None

        self._open(filePath, directoryNum)
        self._loadMetadata()
        logger.debug('TiffDirectory %d Information %r',
                     directoryNum, self._tiffInfo)
        try:
            self._validate()
        except ValidationTiffException:
            self._close()
            raise
    def importFile(self, item, path, user, name=None, mimeType=None, **kwargs):
        """
        Import a single file from the filesystem into the assetstore.

        :param item: The parent item for the file.
        :type item: dict
        :param path: The path on the local filesystem.
        :type path: str
        :param user: The user to list as the creator of the file.
        :type user: dict
        :param name: Name for the file. Defaults to the basename of ``path``.
        :type name: str
        :param mimeType: MIME type of the file if known.
        :type mimeType: str
        :returns: The file document that was created.
        """
        logger.debug('Importing file %s to item %s on filesystem assetstore %s',
                     path, item['_id'], self.assetstore['_id'])
        stat = os.stat(path)
        name = name or os.path.basename(path)

        file = File().createFile(
            name=name, creator=user, item=item, reuseExisting=True, assetstore=self.assetstore,
            mimeType=mimeType, size=stat.st_size, saveFile=False)
        file['path'] = os.path.abspath(os.path.expanduser(path))
        file['mtime'] = stat.st_mtime
        file['imported'] = True
        file = File().save(file)
        logger.debug('Imported file %s to item %s on filesystem assetstore %s',
                     path, item['_id'], self.assetstore['_id'])
        return file
    def adjustDBUri(cls, uri, *args, **kwargs):
        """
        Adjust a uri to match the form sqlite requires.  This can convert a
        Girder resource path to an aprpopriate physical file reference.

        :param uri: the uri to adjust.
        :returns: the adjusted uri
        """
        uri = super(SqliteSAConnector, cls).adjustDBUri(uri, *args, **kwargs)
        if '://' in uri:
            uri = uri.split('://', 1)[0] + ':////' + uri.split('://', 1)[1].lstrip('/')
        uri = super(SqliteSAConnector, cls).adjustDBUri(uri, *args, **kwargs)
        # If we have a Girder resource path, convert it.  If this looks like a
        # file but doesn't exist, check if it is a resource path.  If this is
        # not a resoruce path to a file that we can read directly, treat this
        # the same as a missing file.
        if (':///' in uri and not os.path.exists(uri.split(':///', 1)[1])):
            resourcepath = path_util.lookUpPath(
                uri.split(':///', 1)[1], test=True, filter=False, force=True)
            if resourcepath and resourcepath['model'] == 'file':
                file = resourcepath['document']
                adapter = File().getAssetstoreAdapter(file)
                if hasattr(adapter, 'fullPath'):
                    filepath = adapter.fullPath(file)
                    if os.path.exists(filepath):
                        uri = uri.split(':///', 1)[0] + ':///' + filepath
                        log.debug('Using Girder file for SQLite database')
        return uri
Exemple #5
0
    def refine(self, params):
        sid = str(params['item']['_id'])
        pos_uuids = params['pos_uuids']
        neg_uuids = params['neg_uuids'] if params['neg_uuids'] is not None else []

        if len(pos_uuids) == 0:
            raise RestException('No positive UUIDs given.')

        with self.controller:
            if not self.controller.has_session_uuid(sid):
                raise RestException('Session ID %s not found.' % sid, 404)
            iqrs = self.controller.get_session(sid)
            iqrs.lock.acquire()  # lock BEFORE releasing controller

        try:
            descriptor_index = self._descriptorIndexFromSessionId(sid)
            neighbor_index = self._nearestNeighborIndex(sid, descriptor_index)

            if descriptor_index is None or neighbor_index is None:
                logger.error('Unable to compute descriptor or neighbor index from sid %s.' % sid)
                raise RestException('Unable to compute descriptor or neighbor index from sid %s.' % sid, 500)

            # Get appropriate descriptor elements from index for
            # setting new adjudication state.
            try:
                pos_descrs = set(descriptor_index.get_many_descriptors(pos_uuids))
                neg_descrs = set(descriptor_index.get_many_descriptors(neg_uuids))
            except KeyError as ex:
                logger.warn(traceback.format_exc())
                raise RestException('Descriptor UUID %s not found in index.' % ex, 404)

            # if a new classifier should be made upon the next
            # classification request.
            diff_pos = pos_descrs.symmetric_difference(iqrs.positive_descriptors)
            diff_neg = neg_descrs.symmetric_difference(iqrs.negative_descriptors)

            if diff_pos or diff_neg:
                logger.debug("[%s] session Classifier dirty", sid)
                self.session_classifier_dirty[sid] = True

            logger.info("[%s] Setting adjudications", sid)
            iqrs.positive_descriptors = pos_descrs
            iqrs.negative_descriptors = neg_descrs

            logger.info("[%s] Updating working index", sid)
            iqrs.update_working_index(neighbor_index)

            logger.info("[%s] Refining", sid)
            iqrs.refine()

        finally:
            iqrs.lock.release()

        return sid
Exemple #6
0
    def __call__(self, op, path, *args, **kwargs):
        """
        Generically allow logging and error handling for any operation.

        :param op: operation to perform.
        :param path: path within the fuse (e.g., '', '/user', '/user/<name>',
            etc.).
        """
        logger.debug('-> %s %s %s', op, path, repr(args))
        ret = '[exception]'
        try:
            ret = getattr(self, op)(path, *args, **kwargs)
            return ret
        except Exception as e:
            # Log all exceptions and then reraise them
            if getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
                logger.debug('-- %s %r', op, e)
            else:
                logger.exception('-- %s', op)
            raise e
        finally:
            if op != 'read':
                logger.debug('<- %s %s', op, repr(ret))
            else:
                logger.debug('<- %s (length %d) %r', op, len(ret), ret[:16])
Exemple #7
0
 def __init__(self, operations, mountpoint, *args, **kwargs):
     """
     This wraps fuse.FUSE so that errors are logged rather than raising a
     RuntimeError exception.
     """
     try:
         logger.debug('Mounting %s\n' % mountpoint)
         super(FUSELogError, self).__init__(operations, mountpoint, *args, **kwargs)
         logger.debug('Mounted %s\n' % mountpoint)
     except RuntimeError:
         logprint.error(
             'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
             'it empty?  Does the user have permission to create FUSE '
             'mounts?  It could be another FUSE mount issue, too.' % (
                 mountpoint, ))
         Setting().unset(SettingKey.GIRDER_MOUNT_INFORMATION)
Exemple #8
0
 def __init__(self, operations, mountpoint, *args, **kwargs):
     """
     This wraps fuse.FUSE so that errors are logged rather than raising a
     RuntimeError exception.
     """
     try:
         logger.debug('Mounting %s\n' % mountpoint)
         super(FUSELogError, self).__init__(operations, mountpoint, *args,
                                            **kwargs)
         logger.debug('Mounted %s\n' % mountpoint)
     except RuntimeError:
         logprint.error(
             'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
             'it empty?  Does the user have permission to create FUSE '
             'mounts?  It could be another FUSE mount issue, too.' %
             (mountpoint, ))
         Setting().unset(SettingKey.GIRDER_MOUNT_INFORMATION)
Exemple #9
0
def _get_attrs_via_head(obj, url):
    name = obj['filename']
    size = obj['filesize']
    try:
        req = Request(url)
        req.get_method = lambda: 'HEAD'
        resp = urlopen(req)
    except HTTPError as err:
        logger.debug(str(err))
        return name, size

    content_disposition = resp.getheader('Content-Disposition')
    if content_disposition:
        fname = _CNTDISP_REGEX.search(content_disposition)
        if fname:
            name = fname.groups()[0]

    content_length = resp.getheader('Content-Length')
    if content_length:
        size = int(content_length)

    return name, size
Exemple #10
0
    def importFile(self, item, path, user, name=None, mimeType=None, **kwargs):
        """
        Import a single file from the filesystem into the assetstore.

        :param item: The parent item for the file.
        :type item: dict
        :param path: The path on the local filesystem.
        :type path: str
        :param user: The user to list as the creator of the file.
        :type user: dict
        :param name: Name for the file. Defaults to the basename of ``path``.
        :type name: str
        :param mimeType: MIME type of the file if known.
        :type mimeType: str
        :returns: The file document that was created.
        """
        logger.debug(
            'Importing file %s to item %s on filesystem assetstore %s', path,
            item['_id'], self.assetstore['_id'])
        stat = os.stat(path)
        name = name or os.path.basename(path)

        file = File().createFile(name=name,
                                 creator=user,
                                 item=item,
                                 reuseExisting=True,
                                 assetstore=self.assetstore,
                                 mimeType=mimeType,
                                 size=stat.st_size,
                                 saveFile=False)
        file['path'] = os.path.abspath(os.path.expanduser(path))
        file['mtime'] = stat.st_mtime
        file['imported'] = True
        file = File().save(file)
        logger.debug('Imported file %s to item %s on filesystem assetstore %s',
                     path, item['_id'], self.assetstore['_id'])
        return file
Exemple #11
0
def get_package_list(path, package=None, isChild=False):
    """"""
    if package is None:
        package = {}

    package_pid = get_package_pid(path)
    logger.debug('Found package PID of {}.'.format(package_pid))

    docs = get_documents(package_pid)

    # Filter the Solr result by TYPE so we can construct the package
    metadata = extract_metadata_docs(docs)
    data = extract_data_docs(docs)
    children = extract_resource_docs(docs)

    # Determine the folder name. This is usually the title of the metadata file
    # in the package but when there are multiple metadata files in the package,
    # we need to figure out which one is the 'main' or 'documenting' one.
    primary_metadata = [doc for doc in metadata if 'documents' in doc]

    check_multiple_metadata(primary_metadata)

    data += [doc for doc in metadata if doc['identifier'] != primary_metadata[0]['identifier']]

    fileList = get_package_files(data, metadata, primary_metadata)

    # Add a new entry in the package structure
    # if isChild:
    #    package[-1][primary_metadata[0]['title']] = {'fileList': []}
    # else:
    package[primary_metadata[0]['title']] = {'fileList': []}

    package[primary_metadata[0]['title']]['fileList'].append(fileList)
    if children is not None and len(children) > 0:
        for child in children:
            get_package_list(child['identifier'], package[primary_metadata[0]['title']], True)
    return package
Exemple #12
0
def import_recursive(job):
    try:
        root = job['kwargs']['root']
        token = job['kwargs']['token']

        jobModel = ModelImporter.model('job', 'jobs')
        userModel = ModelImporter.model('user')

        user = userModel.load(job['userId'], force=True)

        childModel = ModelImporter.model('cohort', 'digital_slide_archive')
        children = list(
            ModelImporter.model('folder').childFolders(root,
                                                       'collection',
                                                       user=user))
        count = len(children)
        progress = 0

        job = jobModel.updateJob(job,
                                 log='Started TCGA import\n',
                                 status=JobStatus.RUNNING,
                                 progressCurrent=progress,
                                 progressTotal=count)
        logger.info('Starting recursive TCGA import')

        for child in children:
            progress += 1
            try:
                msg = 'Importing "%s"' % child.get('name', '')
                job = jobModel.updateJob(job,
                                         log=msg,
                                         progressMessage=msg + '\n',
                                         progressCurrent=progress)
                logger.debug(msg)
                childModel.importDocument(child,
                                          recurse=True,
                                          user=user,
                                          token=token,
                                          job=job)
                job = jobModel.load(id=job['_id'], force=True)

                # handle any request to stop execution
                if (not job or job['status']
                        in (JobStatus.CANCELED, JobStatus.ERROR)):
                    logger.info('TCGA import job halted with')
                    return

            except ValidationException:
                logger.warning('Failed to import %s' % child.get('name', ''))

        logger.info('Starting recursive TCGA import')
        job = jobModel.updateJob(job,
                                 log='Finished TCGA import\n',
                                 status=JobStatus.SUCCESS,
                                 progressCurrent=count,
                                 progressMessage='Finished TCGA import')
    except Exception as e:
        logger.exception('Importing TCGA failed with %s' % str(e))
        job = jobModel.updateJob(job,
                                 log='Import failed with %s\n' % str(e),
                                 status=JobStatus.ERROR)
Exemple #13
0
 def createEmptyResource(self, name):
     logger.debug('%s -> createEmptyResource(%s)' % (self.getRefUrl(), name))
     return FolderResource.createEmptyResource(self, name)
Exemple #14
0
    def __init__(self, path, **kwargs):
        """
        Initialize the tile class.  See the base class for other available
        parameters.

        :param path: a filesystem path for the tile source.
        """
        super(TiffFileTileSource, self).__init__(path, **kwargs)

        largeImagePath = self._getLargeImagePath()
        lastException = None
        # Associated images are smallish TIFF images that have an image
        # description and are not tiled.  They have their own TIFF directory.
        # Individual TIFF images can also have images embedded into their
        # directory as tags (this is a vendor-specific method of adding more
        # images into a file) -- those are stored in the individual
        # directories' _embeddedImages field.
        self._associatedImages = {}

        # Query all know directories in the tif file.  Only keep track of
        # directories that contain tiled images.
        alldir = []
        for directoryNum in itertools.count():  # pragma: no branch
            try:
                td = TiledTiffDirectory(largeImagePath, directoryNum)
            except ValidationTiffException as exc:
                lastException = exc
                self._addAssociatedImage(largeImagePath, directoryNum)
                continue
            except TiffException as exc:
                if not lastException:
                    lastException = exc
                break
            if not td.tileWidth or not td.tileHeight:
                continue
            # Calculate the tile level, where 0 is a single tile, 1 is up to a
            # set of 2x2 tiles, 2 is 4x4, etc.
            level = int(
                math.ceil(
                    math.log(
                        max(
                            float(td.imageWidth) / td.tileWidth,
                            float(td.imageHeight) / td.tileHeight)) /
                    math.log(2)))
            if level < 0:
                continue
            # Store information for sorting with the directory.
            alldir.append((td.tileWidth * td.tileHeight, level,
                           td.imageWidth * td.imageHeight, directoryNum, td))
        # If there are no tiled images, raise an exception.
        if not len(alldir):
            msg = 'File %s didn\'t meet requirements for tile source: %s' % (
                largeImagePath, lastException)
            logger.debug(msg)
            raise TileSourceException(msg)
        # Sort the known directories by image area (width * height).  Given
        # equal area, sort by the level.
        alldir.sort()
        # The highest resolution image is our preferred image
        highest = alldir[-1][-1]
        directories = {}
        # Discard any images that use a different tiling scheme than our
        # preferred image
        for tdir in alldir:
            td = tdir[-1]
            level = tdir[1]
            if (td.tileWidth != highest.tileWidth
                    or td.tileHeight != highest.tileHeight):
                continue
            # If a layer's image is not a multiple of the tile size, it should
            # be near a power of two of the highest resolution image.
            if (((td.imageWidth % td.tileWidth)
                 and not nearPowerOfTwo(td.imageWidth, highest.imageWidth)) or
                ((td.imageHeight % td.tileHeight)
                 and not nearPowerOfTwo(td.imageHeight, highest.imageHeight))):
                continue
            directories[level] = td
        if not len(directories) or (len(directories) < 2
                                    and max(directories.keys()) + 1 > 4):
            raise TileSourceException(
                'Tiff image must have at least two levels.')

        # Sort the directories so that the highest resolution is the last one;
        # if a level is missing, put a None value in its place.
        self._tiffDirectories = [
            directories.get(key) for key in range(max(directories.keys()) + 1)
        ]
        self.tileWidth = highest.tileWidth
        self.tileHeight = highest.tileHeight
        self.levels = len(self._tiffDirectories)
        self.sizeX = highest.imageWidth
        self.sizeY = highest.imageHeight
Exemple #15
0
    def refine(self, params):
        sid = str(params['item']['_id'])
        pos_uuids = params['pos_uuids']
        neg_uuids = params['neg_uuids'] if params[
            'neg_uuids'] is not None else []

        if len(pos_uuids) == 0:
            raise RestException('No positive UUIDs given.')

        with self.controller:
            if not self.controller.has_session_uuid(sid):
                raise RestException('Session ID %s not found.' % sid, 404)
            iqrs = self.controller.get_session(sid)
            iqrs.lock.acquire()  # lock BEFORE releasing controller

        try:
            descriptor_set = self._descriptorSetFromSessionId(sid)
            neighbor_index = self._nearestNeighborIndex(sid, descriptor_set)

            if descriptor_set is None or neighbor_index is None:
                logger.error(
                    'Unable to compute descriptor or neighbor index from sid %s.'
                    % sid)
                raise RestException(
                    'Unable to compute descriptor or neighbor index from sid %s.'
                    % sid, 500)

            # Get appropriate descriptor elements from index for
            # setting new adjudication state.
            try:
                pos_descrs = set(
                    descriptor_set.get_many_descriptors(pos_uuids))
                neg_descrs = set(
                    descriptor_set.get_many_descriptors(neg_uuids))
            except KeyError as ex:
                logger.warn(traceback.format_exc())
                raise RestException(
                    'Descriptor UUID %s not found in '
                    'descriptor set.' % ex, 404)

            # if a new classifier should be made upon the next
            # classification request.
            diff_pos = pos_descrs.symmetric_difference(
                iqrs.positive_descriptors)
            diff_neg = neg_descrs.symmetric_difference(
                iqrs.negative_descriptors)

            if diff_pos or diff_neg:
                logger.debug("[%s] session Classifier dirty", sid)
                self.session_classifier_dirty[sid] = True

            logger.info("[%s] Setting adjudications", sid)
            iqrs.positive_descriptors = pos_descrs
            iqrs.negative_descriptors = neg_descrs

            logger.info("[%s] Updating working index", sid)
            iqrs.update_working_set(neighbor_index)

            logger.info("[%s] Refining", sid)
            iqrs.refine()

        finally:
            iqrs.lock.release()

        return sid
Exemple #16
0
def genRESTEndPointsForSlicerCLIsForItem(restResource,
                                         cliItem,
                                         registerNamedRoute=False):
    """Generates REST end points for slicer CLIs placed in subdirectories of a
    given root directory and attaches them to a REST resource with the given
    name.

    For each CLI, it creates:
    * a GET Route (<apiURL>/`restResourceName`/<cliRelativePath>/xmlspec)
    that returns the xml spec of the CLI
    * a POST Route (<apiURL>/`restResourceName`/<cliRelativePath>/run)
    that runs the CLI

    It also creates a GET route (<apiURL>/`restResourceName`) that returns a
    list of relative routes to all CLIs attached to the generated REST resource

    Parameters
    ----------
    restResource : a dockerResource
        REST resource to which the end-points should be attached
    cliItem : CliItem

    """

    # validate restResource argument
    if not isinstance(restResource, Resource):
        raise Exception('restResource must be a Docker Resource')

    try:
        handler = genHandlerToRunDockerCLI(cliItem)

        # define CLI handler function
        cliRunHandler = boundHandler(restResource)(handler)

        cliRunHandlerName = 'run_%s' % cliItem._id
        setattr(restResource, cliRunHandlerName, cliRunHandler)

        restRunPath = ('cli', str(cliItem._id), 'run')
        restResource.route('POST', restRunPath, cliRunHandler)

        if registerNamedRoute:
            restNamedRunPath = (cliItem.restBasePath, cliItem.name, 'run')
            restResource.route('POST', restNamedRunPath, cliRunHandler)

        def undoFunction():
            try:
                restResource.removeRoute('POST', restRunPath, cliRunHandler)
                if registerNamedRoute:
                    restResource.removeRoute('POST', restNamedRunPath,
                                             cliRunHandler)
                delattr(restResource, cliRunHandlerName)
            except Exception:
                logger.exception('Failed to remove route')

        # store new rest endpoint
        restResource.storeEndpoints(cliItem.image, cliItem.name, undoFunction)

        logger.debug('Created REST endpoints for %s', cliItem.name)
    except Exception:
        logger.exception('Failed to create REST endpoints for %r',
                         cliItem.name)

    return restResource
Exemple #17
0
    def save(self, annotation, *args, **kwargs):
        """
        When saving an annotation, override the collection insert_one and
        replace_one methods so that we don't save the elements with the main
        annotation.  Still use the super class's save method, so that all of
        the triggers are fired as expected and cancelling and modifications can
        be done as needed.

        Because Mongo doesn't support transactions, a version number is stored
        with the annotation and with the associated elements.  This is used to
        add the new elements first, then update the annotation, and delete the
        old elements.  The allows version integrity if another thread queries
        the annotation at the same time.

        :param annotation: the annotation document to save.
        :returns: the saved document.  If it is a new document, the _id has
                  been added.
        """
        starttime = time.time()
        replace_one = self.collection.replace_one
        insert_one = self.collection.insert_one
        version = self.model('annotationelement',
                             'large_image').getNextVersionValue()
        if '_id' not in annotation:
            oldversion = None
        else:
            # We read the old version from the existing record, because we
            # don't want to trust that the input _version has not been altered
            # or is present.
            oldversion = self.collection.find_one({
                '_id': annotation['_id']
            }).get('_version')
        annotation['_version'] = version
        _elementQuery = annotation.pop('_elementQuery', None)

        def replaceElements(query, doc, *args, **kwargs):
            self.model('annotationelement', 'large_image').updateElements(doc)
            elements = doc['annotation'].pop('elements', None)
            ret = replace_one(query, doc, *args, **kwargs)
            if elements:
                doc['annotation']['elements'] = elements
            self.model('annotationelement',
                       'large_image').removeOldElements(doc, oldversion)
            return ret

        def insertElements(doc, *args, **kwargs):
            elements = doc['annotation'].pop('elements', None)
            # When creating an annotation, there is a window of time where the
            # elements aren't set (this is unavoidable without database
            # transactions, as we need the annotation's id to set the
            # elements).
            ret = insert_one(doc, *args, **kwargs)
            if elements is not None:
                doc['annotation']['elements'] = elements
                self.model('annotationelement',
                           'large_image').updateElements(doc)
            # If we are inserting, we shouldn't have any old elements, so don't
            # bother removing them.
            return ret

        self.collection.replace_one = replaceElements
        self.collection.insert_one = insertElements
        result = super(Annotation, self).save(annotation, *args, **kwargs)
        self.collection.replace_one = replace_one
        self.collection.insert_one = insert_one
        if _elementQuery:
            result['_elementQuery'] = _elementQuery
        logger.debug('Saved annotation in %5.3fs' % (time.time() - starttime))
        return result
def finalizeInstance(event):
    job = event.info['job']

    if job.get("instance_id"):
        instance = Instance().load(job["instance_id"], force=True)

        if (instance["status"] == InstanceStatus.LAUNCHING
                and job["status"] == JobStatus.ERROR  # noqa
            ):
            instance["status"] = InstanceStatus.ERROR
            Instance().updateInstance(instance)

    if job['title'] == 'Spawn Instance' and job.get('status') is not None:
        status = int(job['status'])
        instance_id = job['args'][0]['instanceId']
        instance = Instance().load(instance_id, force=True, exc=True)
        update = True
        if (status == JobStatus.SUCCESS
                and instance["status"] == InstanceStatus.LAUNCHING  # noqa
            ):
            service = getCeleryApp().AsyncResult(job['celeryTaskId']).get()
            valid_keys = set(containerInfoSchema['properties'].keys())
            containerInfo = {key: service.get(key, '') for key in valid_keys}
            url = service.get('url', 'https://google.com')
            _wait_for_server(url)

            # Since _wait_for_server can potentially take some time,
            # we need to refresh the state of the instance
            instance = Instance().load(instance_id, force=True, exc=True)
            if instance["status"] != InstanceStatus.LAUNCHING:
                return  # bail

            # Preserve the imageId / current digest in containerInfo
            tale = Tale().load(instance['taleId'], force=True)
            containerInfo['imageId'] = tale['imageId']
            containerInfo['digest'] = tale['imageInfo']['digest']

            instance.update({
                'url': url,
                'status': InstanceStatus.RUNNING,
                'containerInfo': containerInfo,
            })
            if "sessionId" in service:
                instance["sessionId"] = ObjectId(service["sessionId"])
        elif (status == JobStatus.ERROR
              and instance["status"] != InstanceStatus.ERROR  # noqa
              ):
            instance['status'] = InstanceStatus.ERROR
        elif (status in (JobStatus.QUEUED, JobStatus.RUNNING)
              and instance["status"] != InstanceStatus.LAUNCHING  # noqa
              ):
            instance['status'] = InstanceStatus.LAUNCHING
        else:
            update = False

        if update:
            msg = "Updating instance ({_id}) in finalizeInstance".format(
                **instance)
            msg += " for job(id={_id}, status={status})".format(**job)
            logger.debug(msg)
            Instance().updateInstance(instance)
    def yieldElements(self, annotation, region=None, info=None):
        """
        Given an annotation, fetch the elements from the database and add them
        to it.
            When a region is used to request specific element, the following
        keys can be specified:
            left, right, top, bottom, low, high: the spatial area where
        elements are located, all in pixels.  If an element's bounding box is
        at least partially within the requested area, that element is included.
            minimumSize: the minimum size of an element to return.
            sort, sortdir: standard sort options.  The sort key can include
        size and details.
            limit: limit the total number of elements by this value.  Defaults
        to no limit.
            offset: the offset within the query to start returning values.  If
        maxDetails is used, to get subsequent sets of elements, the offset
        needs to be increased by the actual number of elements returned from a
        previous query, which will vary based on the details of the elements.
            maxDetails: if specified, limit the total number of elements by the
        sum of their details values.  This is applied in addition to limit.
        The sum of the details values of the elements may exceed maxDetails
        slightly (the sum of all but the last element will be less than
        maxDetails, but the last element may exceed the value).

        :param annotation: the annotation to get elements for.  Modified.
        :param region: if present, a dictionary restricting which annotations
            are returned.
        """
        info = info if info is not None else {}
        region = region or {}
        query = {
            'annotationId': annotation.get('_annotationId', annotation['_id']),
            '_version': annotation['_version']
        }
        for key in region:
            if key in self.bboxKeys and self.bboxKeys[key][1]:
                if self.bboxKeys[key][1] == '$gte' and float(region[key]) <= 0:
                    continue
                query[self.bboxKeys[key][0]] = {
                    self.bboxKeys[key][1]: float(region[key])
                }
        if region.get('sort') in self.bboxKeys:
            sortkey = self.bboxKeys[region['sort']][0]
        else:
            sortkey = region.get('sort') or '_id'
        sortdir = int(
            region['sortdir']) if region.get('sortdir') else SortDir.ASCENDING
        limit = int(region['limit']) if region.get('limit') else 0
        maxDetails = int(region.get('maxDetails') or 0)
        queryLimit = maxDetails if maxDetails and (
            not limit or maxDetails < limit) else limit
        offset = int(region['offset']) if region.get('offset') else 0
        logger.debug('element query %r for %r', query, region)
        elementCursor = self.find(query=query,
                                  sort=[(sortkey, sortdir)],
                                  limit=queryLimit,
                                  offset=offset,
                                  fields={
                                      '_id': True,
                                      'element': True,
                                      'bbox.details': True
                                  })

        info.update({
            'count': elementCursor.count(),
            'offset': offset,
            'filter': query,
            'sort': [sortkey, sortdir],
        })
        details = count = 0
        if maxDetails:
            info['maxDetails'] = maxDetails
        if limit:
            info['limit'] = limit
        for entry in elementCursor:
            element = entry['element']
            element.setdefault('id', entry['_id'])
            yield element
            count += 1
            details += entry.get('bbox', {}).get('details', 1)
            if maxDetails and details >= maxDetails:
                break
        info['returned'] = count
        info['details'] = details
Exemple #20
0
def get_package_pid(path):
    """Get the pid of a package from its path."""

    initial_pid = find_initial_pid(path)
    logger.debug('Parsed initial PID of {}.'.format(initial_pid))
    return find_resource_pid(initial_pid)
    def yieldElements(self, annotation, region=None, info=None):
        """
        Given an annotation, fetch the elements from the database.

        When a region is used to request specific element, the following
        keys can be specified:

            :left, right, top, bottom, low, high: the spatial area where
                elements are located, all in pixels.  If an element's bounding
                box is at least partially within the requested area, that
                element is included.
            :minimumSize: the minimum size of an element to return.
            :sort, sortdir: standard sort options.  The sort key can include
                size and details.
            :limit: limit the total number of elements by this value.  Defaults
                to no limit.
            :offset: the offset within the query to start returning values.  If
                maxDetails is used, to get subsequent sets of elements, the
                offset needs to be increased by the actual number of elements
                returned from a previous query, which will vary based on the
                details of the elements.
            :maxDetails: if specified, limit the total number of elements by
                the sum of their details values.  This is applied in addition
                to limit.  The sum of the details values of the elements may
                exceed maxDetails slightly (the sum of all but the last element
                will be less than maxDetails, but the last element may exceed
                the value).
            :centroids: if specified and true, only return the id, center of
                the bounding box, and bounding box size for each element.

        :param annotation: the annotation to get elements for.  Modified.
        :param region: if present, a dictionary restricting which annotations
            are returned.
        :param info: an optional dictionary that will be modified with
            additional query information, including count (total number of
            available elements), returned (number of elements in response),
            maxDetails (as specified by the region dictionary), details (sum of
            details returned), limit (as specified by region), centroids (a
            boolean based on the region specification).
        :returns: a list of elements.  If centroids were requested, each entry
            is a list with str(id), x, y, size.  Otherwise, each entry is the
            element record.
        """
        info = info if info is not None else {}
        region = region or {}
        query = {
            'annotationId': annotation.get('_annotationId', annotation['_id']),
            '_version': annotation['_version']
        }
        for key in region:
            if key in self.bboxKeys and self.bboxKeys[key][1]:
                if self.bboxKeys[key][1] == '$gte' and float(region[key]) <= 0:
                    continue
                query[self.bboxKeys[key][0]] = {
                    self.bboxKeys[key][1]: float(region[key])
                }
        if region.get('sort') in self.bboxKeys:
            sortkey = self.bboxKeys[region['sort']][0]
        else:
            sortkey = region.get('sort') or '_id'
        sortdir = int(
            region['sortdir']) if region.get('sortdir') else SortDir.ASCENDING
        limit = int(region['limit']) if region.get('limit') else 0
        maxDetails = int(region.get('maxDetails') or 0)
        queryLimit = maxDetails if maxDetails and (
            not limit or maxDetails < limit) else limit
        offset = int(region['offset']) if region.get('offset') else 0
        logger.debug('element query %r for %r', query, region)
        fields = {'_id': True, 'element': True, 'bbox.details': True}
        centroids = str(region.get('centroids')).lower() == 'true'
        if centroids:
            # fields = {'_id': True, 'element': True, 'bbox': True}
            fields = {'_id': True, 'element.id': True, 'bbox': True}
            proplist = []
            propskeys = [
                'type', 'fillColor', 'lineColor', 'lineWidth', 'closed'
            ]
            for key in propskeys:
                fields['element.%s' % key] = True
            props = {}
            info['centroids'] = True
            info['props'] = proplist
            info['propskeys'] = propskeys
        elementCursor = self.find(query=query,
                                  sort=[(sortkey, sortdir)],
                                  limit=queryLimit,
                                  offset=offset,
                                  fields=fields)

        info.update({
            'count': elementCursor.count(),
            'offset': offset,
            'filter': query,
            'sort': [sortkey, sortdir],
        })
        details = count = 0
        if maxDetails:
            info['maxDetails'] = maxDetails
        if limit:
            info['limit'] = limit
        for entry in elementCursor:
            element = entry['element']
            element.setdefault('id', entry['_id'])
            if centroids:
                bbox = entry.get('bbox')
                if not bbox or 'lowx' not in bbox or 'size' not in bbox:
                    continue
                prop = tuple(element.get(key) for key in propskeys)
                if prop not in props:
                    props[prop] = len(props)
                    proplist.append(list(prop))
                yield [
                    str(element['id']), (bbox['lowx'] + bbox['highx']) / 2,
                    (bbox['lowy'] + bbox['highy']) / 2,
                    bbox['size'] if entry.get('type') != 'point' else 0,
                    props[prop]
                ]
                details += 1
            else:
                yield element
                details += entry.get('bbox', {}).get('details', 1)
            count += 1
            if maxDetails and details >= maxDetails:
                break
        info['returned'] = count
        info['details'] = details
Exemple #22
0
    def addStyle(self, m, layerSrs, extent=None):
        """
        Attaches raster style option to mapnik raster layer and adds the layer
        to the mapnik map.

        :param m: mapnik map.
        :param layerSrs: the layer projection
        :param extent: the extent to use for the mapnik layer.
        """
        interpColorTable = {
            'red': ['#000000', '#ff0000'],
            'green': ['#000000', '#00ff00'],
            'blue': ['#000000', '#0000ff'],
            'gray': ['#000000', '#ffffff'],
            'alpha': ['#ffffff00', '#ffffffff'],
        }
        bands = self.getBandInformation()
        style = []
        if hasattr(self, 'style'):
            styleBands = self.style['bands'] if 'bands' in self.style else [self.style]
            for styleBand in styleBands:

                styleBand = styleBand.copy()
                styleBand['band'] = self._bandNumber(styleBand.get('band'))
                style.append(styleBand)
        if not len(style):
            for interp in ('red', 'green', 'blue', 'gray', 'palette', 'alpha'):
                band = self._bandNumber(interp, False)
                # If we don't have the requested band, or we only have alpha,
                # or this is gray or palette and we already added another band,
                # skip this interpretation.
                if (band is None or
                        (interp == 'alpha' and not len(style)) or
                        (interp in ('gray', 'palette') and len(style))):
                    continue
                if interp == 'palette':
                    style.append({'band': band, 'palette': 'colortable'})
                else:
                    style.append({
                        'band': band,
                        'palette': interpColorTable[interp],
                        'min': 'auto',
                        'max': 'auto',
                        'nodata': 'auto',
                        'scheme': 'linear',
                        'composite': 'multiply' if interp == 'alpha' else 'lighten'
                    })
        if not len(style):
            style.append({'band': -1})
        logger.debug('mapnik addTile specified style: %r, used style %r',
                     getattr(self, 'style', None), style)
        for styleBand in style:
            if styleBand['band'] != -1:
                colorizer = self._colorizerFromStyle(styleBand)
                composite = getattr(mapnik.CompositeOp, styleBand.get('composite', 'lighten'))
                nodata = styleBand.get('nodata')
                if nodata == 'auto':
                    nodata = bands.get('nodata')
            else:
                colorizer = None
                composite = None
                nodata = None
            self._addStyleToMap(
                m, layerSrs, colorizer, styleBand['band'], extent, composite, nodata)
Exemple #23
0
 def createCollection(self, name):
     logger.debug('%s -> createCollection(%s)' % (self.getRefUrl(), name))
     FolderResource.createCollection(self, name)
Exemple #24
0
    def save(self, annotation, *args, **kwargs):
        """
        When saving an annotation, override the collection insert_one and
        replace_one methods so that we don't save the elements with the main
        annotation.  Still use the super class's save method, so that all of
        the triggers are fired as expected and cancelling and modifications can
        be done as needed.

        Because Mongo doesn't support transactions, a version number is stored
        with the annotation and with the associated elements.  This is used to
        add the new elements first, then update the annotation, and delete the
        old elements.  The allows version integrity if another thread queries
        the annotation at the same time.

        :param annotation: the annotation document to save.
        :returns: the saved document.  If it is a new document, the _id has
                  been added.
        """
        starttime = time.time()
        with self._writeLock:
            replace_one = self.collection.replace_one
            insert_one = self.collection.insert_one
        version = Annotationelement().getNextVersionValue()
        if '_id' not in annotation:
            oldversion = None
        else:
            if '_annotationId' in annotation:
                annotation['_id'] = annotation['_annotationId']
            # We read the old version from the existing record, because we
            # don't want to trust that the input _version has not been altered
            # or is present.
            oldversion = self.collection.find_one(
                {'_id': annotation['_id']}).get('_version')
        annotation['_version'] = version
        _elementQuery = annotation.pop('_elementQuery', None)
        annotation.pop('_active', None)
        annotation.pop('_annotationId', None)

        def replaceElements(query, doc, *args, **kwargs):
            Annotationelement().updateElements(doc)
            elements = doc['annotation'].pop('elements', None)
            if self._historyEnabled:
                oldAnnotation = self.collection.find_one(query)
                if oldAnnotation:
                    oldAnnotation['_annotationId'] = oldAnnotation.pop('_id')
                    oldAnnotation['_active'] = False
                    insert_one(oldAnnotation)
            ret = replace_one(query, doc, *args, **kwargs)
            if elements:
                doc['annotation']['elements'] = elements
            if not self._historyEnabled:
                Annotationelement().removeOldElements(doc, oldversion)
            return ret

        def insertElements(doc, *args, **kwargs):
            # When creating an annotation, store the elements first, then store
            # the annotation without elements, then restore the elements.
            doc.setdefault('_id', ObjectId())
            if doc['annotation'].get('elements') is not None:
                Annotationelement().updateElements(doc)
            # If we are inserting, we shouldn't have any old elements, so don't
            # bother removing them.
            elements = doc['annotation'].pop('elements', None)
            ret = insert_one(doc, *args, **kwargs)
            if elements is not None:
                doc['annotation']['elements'] = elements
            return ret

        with self._writeLock:
            self.collection.replace_one = replaceElements
            self.collection.insert_one = insertElements
            try:
                result = super(Annotation, self).save(annotation, *args, **kwargs)
            finally:
                self.collection.replace_one = replace_one
                self.collection.insert_one = insert_one
        if _elementQuery:
            result['_elementQuery'] = _elementQuery

        annotation.pop('groups', None)
        self.injectAnnotationGroupSet(annotation)

        logger.debug('Saved annotation in %5.3fs' % (time.time() - starttime))
        events.trigger('large_image.annotations.save_history', {
            'annotation': annotation
        }, async=True)
        return result
def genRESTEndPointsForSlicerCLIsInDockerCache(restResource, dockerCache):
    """Generates REST end points for slicer CLIs placed in subdirectories of a
    given root directory and attaches them to a REST resource with the given
    name.

    For each CLI, it creates:
    * a GET Route (<apiURL>/`restResourceName`/<cliRelativePath>/xmlspec)
    that returns the xml spec of the CLI
    * a POST Route (<apiURL>/`restResourceName`/<cliRelativePath>/run)
    that runs the CLI

    It also creates a GET route (<apiURL>/`restResourceName`) that returns a
    list of relative routes to all CLIs attached to the generated REST resource

    Parameters
    ----------
    restResource : a dockerResource
        REST resource to which the end-points should be attached
    dockerCache : DockerCache object representing data stored in settings

    """

    dockerImages = dockerCache.getImageNames()
    # validate restResource argument
    if not isinstance(restResource, Resource):
        raise Exception('restResource must be a ' 'Docker Resource')

    for dimg in dockerImages:

        docker_image = dockerCache.getImageByName(dimg)
        # get CLI list
        cliListSpec = docker_image.getCLIListSpec()

        # Add REST end-point for each CLI
        for cliRelPath in cliListSpec.keys():
            restPath = dimg.replace(':', '_').replace('/',
                                                      '_').replace('@', '_')
            # create a POST REST route that runs the CLI
            try:
                cliXML = docker_image.getCLIXML(cliRelPath)

                cliRunHandler = genHandlerToRunDockerCLI(
                    dimg, cliRelPath, cliXML, restResource)

            except Exception:
                logger.exception('Failed to create REST endpoints for %r',
                                 cliRelPath)
                continue

            cliSuffix = os.path.normpath(cliRelPath).replace(os.sep, '_')

            cliRunHandlerName = restPath + '_run_' + cliSuffix
            setattr(restResource, cliRunHandlerName, cliRunHandler)
            restResource.route('POST', (restPath, cliRelPath, 'run'),
                               getattr(restResource, cliRunHandlerName))

            # store new rest endpoint
            restResource.storeEndpoints(
                dimg, cliRelPath, 'run',
                ['POST', (restPath, cliRelPath, 'run'), cliRunHandlerName])

            # create GET REST route that returns the xml of the CLI
            try:
                cliGetXMLSpecHandler = genHandlerToGetDockerCLIXmlSpec(
                    cliRelPath, cliXML, restResource)
            except Exception:
                logger.exception('Failed to create REST endpoints for %s',
                                 cliRelPath)
                exc_type, exc_obj, exc_tb = sys.exc_info()
                fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                logger.error('%r', [exc_type, fname, exc_tb.tb_lineno])
                continue

            cliGetXMLSpecHandlerName = restPath + '_get_xml_' + cliSuffix
            setattr(restResource, cliGetXMLSpecHandlerName,
                    cliGetXMLSpecHandler)
            restResource.route('GET', (
                restPath,
                cliRelPath,
                'xmlspec',
            ), getattr(restResource, cliGetXMLSpecHandlerName))

            restResource.storeEndpoints(dimg, cliRelPath, 'xmlspec', [
                'GET',
                (restPath, cliRelPath, 'xmlspec'), cliGetXMLSpecHandlerName
            ])
            logger.debug('Created REST endpoints for %s', cliRelPath)

    return restResource