Example #1
0
def location_precision_indexer(obj, **kw):
    return location_precision(obj)
Example #2
0
    def place(self, context, vocabs=True):
        """Create a graph centered on a Place and its Feature."""
        g = place_graph()

        purl = context.absolute_url()
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        if vh_root:
            purl = purl.replace(vh_root, '')

        context_page = purl
        context_subj = URIRef(context_page)
        feature_subj = URIRef(context_page + "#this")

        # Type
        g.add((context_subj, RDF.type, PLEIADES['Place']))

        g.add((context_subj, RDF.type, SKOS['Concept']))
        g.add((context_subj, SKOS['inScheme'],
               URIRef("https://pleiades.stoa.org/places")))

        # Triples concerning the real world ancient place.
        g.add((feature_subj, RDF.type, SPATIAL['Feature']))

        # primary topic
        g.add((feature_subj, FOAF['primaryTopicOf'], context_subj))

        # title as rdfs:label
        g.add((feature_subj, RDFS['label'], Literal(context.Title())))

        # description as rdfs:comment
        g.add((feature_subj, RDFS['comment'], Literal(context.Description())))

        orig_url = context_page.replace('https://', 'http://')
        if orig_url and orig_url != context_page:
            g.add((context_subj, OWL['sameAs'], URIRef(orig_url)))

        g = self.dcterms(context, g)
        g = self.provenance(context, g, context_subj)

        # Place or feature types

        place_types = get_vocabulary('place_types')
        place_types = dict([(t['id'], t) for t in place_types])
        url = self.portal.absolute_url() + '/vocabularies/place-types'
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        pcats = set(filter(None, context.getPlaceType()))
        for pcat in pcats:
            item = place_types.get(pcat)
            if not item:
                continue
            iurl = url + '/' + pcat
            g.add((context_subj, PLEIADES['hasFeatureType'], URIRef(iurl)))

            if vocabs:
                g = RegVocabGrapher(self.portal, self.request).concept(
                    'place-types', place_types[pcat], g)

        # Names as skos:label and prefLabel
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(path={
            'query': folder_path,
            'depth': 1
        },
                              portal_type='Name',
                              review_state='published')
        names = [b.getObject() for b in brains]

        for obj in names:
            name = Literal(
                obj.getNameAttested() or obj.getNameTransliterated(),
                obj.getNameLanguage() or None)
            g.add((context_subj, SKOS['altLabel'], name))

            name_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasName'], name_subj))
            g.add((name_subj, RDF.type, PLEIADES['Name']))

            orig_url = str(name_subj).replace('https://', 'http://')
            if orig_url and orig_url != str(name_subj):
                g.add((name_subj, OWL['sameAs'], URIRef(orig_url)))

            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, name_subj, vocabs=vocabs)
            g = self.provenance(obj, g, name_subj)
            g = self.references(obj, g, name_subj)

            nameAttested = obj.getNameAttested()
            if nameAttested:
                g.add((name_subj, PLEIADES['nameAttested'],
                       Literal(nameAttested,
                               obj.getNameLanguage() or None)))

            for nr in obj.getNameTransliterated().split(','):
                nr = nr.strip()
                g.add((name_subj, PLEIADES['nameRomanized'], Literal(nr)))

        # representative point
        xs = []
        ys = []
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(path={
            'query': folder_path,
            'depth': 1
        },
                              portal_type='Location',
                              review_state='published')
        locs = [b.getObject() for b in brains]
        features = [wrap(ob, 0) for ob in locs]

        # get representative point
        loc_prec = location_precision(context)
        if loc_prec == 'precise':
            repr_point = None
            for f in features:
                if f.geometry and hasattr(f.geometry, '__geo_interface__'):
                    shape = asShape(f.geometry)
                    b = shape.bounds
                    xs.extend([b[0], b[2]])
                    ys.extend([b[1], b[3]])
                    if repr_point is None:
                        repr_point = shape.centroid
            if len(xs) * len(ys) > 0:
                bbox = [min(xs), min(ys), max(xs), max(ys)]
            else:
                bbox = None

            if repr_point:
                g.add((context_subj, GEO['lat'], Literal(repr_point.y)))
                g.add((context_subj, GEO['long'], Literal(repr_point.x)))
            elif bbox:
                g.add((context_subj, GEO['lat'],
                       Literal((bbox[1] + bbox[3]) / 2.0)))
                g.add((context_subj, GEO['long'],
                       Literal((bbox[0] + bbox[2]) / 2.0)))
        elif loc_prec == 'rough':
            for loc in locs:
                ref = loc.getLocation()
                gridbase = "http://atlantides.org/capgrids/"
                if ref and ref.startswith(gridbase):
                    try:
                        params = ref.rstrip("/")[len(gridbase):].split("/")
                        if len(params) == 1:
                            mapnum = params[0]
                            grids = [None]
                        elif len(params) == 2:
                            mapnum = params[0]
                            grids = [v.upper() for v in params[1].split("+")]
                        else:
                            log.error("Invalid location identifier %s" % ref)
                            continue
                        for grid in grids:
                            grid_uri = gridbase + mapnum + "#" + (grid
                                                                  or "this")
                            g.add((context_subj, OSSPATIAL['within'],
                                   URIRef(grid_uri)))

                            e = URIRef(grid_uri +
                                       "-extent")  # the grid's extent
                            g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                            g.add((URIRef(grid_uri), OSGEO['extent'], e))
                            bounds = capgrids.box(mapnum, grid)
                            shape = box(*bounds)
                            g.add((e, OSGEO['asGeoJSON'],
                                   Literal(geojson.dumps(shape))))
                            g.add(
                                (e, OSGEO['asWKT'], Literal(wkt.dumps(shape))))
                    except (ValueError, TypeError):
                        log.exception(
                            "Exception caught computing grid extent for %r",
                            loc)

        # Locations
        for obj in locs:

            locn_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasLocation'], locn_subj))
            g.add((locn_subj, RDF.type, PLEIADES['Location']))

            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, locn_subj, vocabs=vocabs)
            g = self.provenance(obj, g, locn_subj)
            g = self.references(obj, g, locn_subj)

            orig_url = str(locn_subj).replace('https://', 'http://')
            if orig_url and orig_url != str(locn_subj):
                g.add((locn_subj, OWL['sameAs'], URIRef(orig_url)))

            dc_locn = obj.getLocation()
            gridbase = "http://atlantides.org/capgrids/"

            if dc_locn and dc_locn.startswith(gridbase):
                try:
                    params = dc_locn.rstrip("/")[len(gridbase):].split("/")
                    if len(params) == 1:
                        mapnum = params[0]
                        grids = [None]
                    elif len(params) == 2:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1].split("+")]
                    elif len(params) >= 3:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1:]]
                    else:
                        log.error("Invalid location identifier %s" % ref)
                        continue

                    for grid in grids:
                        grid_uri = gridbase + mapnum + "#" + (grid or "this")
                        bounds = capgrids.box(mapnum, grid)
                        shape = box(*bounds)

                        g.add((locn_subj, OSSPATIAL['partiallyOverlaps'],
                               URIRef(grid_uri)))

                        e = URIRef(grid_uri + "-extent")  # the grid's extent
                        g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                        g.add((URIRef(grid_uri), OSGEO['extent'], e))
                        g.add((e, OSGEO['asGeoJSON'],
                               Literal(geojson.dumps(shape))))
                        g.add((e, OSGEO['asWKT'], Literal(wkt.dumps(shape))))

                except:
                    log.exception(
                        "Exception caught computing grid extent for %r", obj)

            else:
                try:
                    f = wrap(obj, 0)
                    if (f.geometry
                            and hasattr(f.geometry, '__geo_interface__')):
                        shape = asShape(f.geometry)
                        g.add((locn_subj, OSGEO['asGeoJSON'],
                               Literal(geojson.dumps(shape))))
                        g.add((locn_subj, OSGEO['asWKT'],
                               Literal(wkt.dumps(shape))))
                except:
                    log.exception("Couldn't wrap and graph %r", obj)

        # connects with
        for f in context.getConnectedPlaces():
            if self.wftool.getInfoFor(f, 'review_state') != 'published':
                continue
            furl = f.absolute_url()
            vh_root = context.REQUEST.environ.get('VH_ROOT')
            if vh_root:
                furl = furl.replace(vh_root, '')
            feature_obj = URIRef(furl + "#this")
            g.add((feature_subj, SPATIAL['C'], feature_obj))
            g.add((context_subj, RDFS['seeAlso'], URIRef(furl)))

        # dcterms:coverage
        coverage = geoContext(context)
        if coverage:
            g.add((context_subj, DCTERMS['coverage'], Literal(coverage)))

        g = self.references(context, g, context_subj)

        return g
Example #3
0
def location_precision_indexer(obj, **kw):
    return location_precision(obj)
Example #4
0
    def place(self, context, vocabs=True):
        """Create a graph centered on a Place and its Feature."""
        g = place_graph()
        
        purl = context.absolute_url()
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        if vh_root:
            purl = purl.replace(vh_root, '')
        
        context_page = purl
        context_subj = URIRef(context_page)
        feature_subj = URIRef(context_page + "#this")
        
        # Type
        g.add((context_subj, RDF.type, PLEIADES['Place']))
        
        g.add((context_subj, RDF.type, SKOS['Concept']))
        g.add((
            context_subj, 
            SKOS['inScheme'], 
            URIRef("http://pleiades.stoa.org/places")))
        
        # Triples concerning the real world ancient place.
        g.add((feature_subj, RDF.type, SPATIAL['Feature']))

        # primary topic
        g.add((
            feature_subj,
            FOAF['primaryTopicOf'],
            context_subj))

        # title as rdfs:label
        g.add((
            feature_subj,
            RDFS['label'], 
            Literal(context.Title())))

        # description as rdfs:comment
        g.add((
            feature_subj,
            RDFS['comment'], 
            Literal(context.Description())))

        g = self.dcterms(context, g)
        g = self.provenance(context, g, context_subj)

        # Place or feature types

        place_types = self.vocabs['place-types']
        pcats = set(filter(None, context.getPlaceType()))
        for pcat in pcats:
            item = place_types.get(pcat)
            if not item:
                continue
            iurl = item.absolute_url()
            vh_root = item.REQUEST.environ.get('VH_ROOT')
            if vh_root:
                iurl = iurl.replace(vh_root, '')
            g.add((
                context_subj,
                PLEIADES['hasFeatureType'],
                URIRef(iurl)))

            if vocabs:
                g = VocabGrapher(place_types, self.request).concept(
                    place_types[pcat], g)

        # Names as skos:label and prefLabel
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(
            path={'query': folder_path, 'depth': 1}, 
            portal_type='Name', 
            review_state='published')
        objs = [b.getObject() for b in brains]
        name_ratings = [
            self.catalog.getIndexDataForRID(
                b.getRID())['average_rating'] for b in brains]
        rated_names = sorted(
            zip(name_ratings, objs),
            reverse=True)
        
        for rating, obj in rated_names[:1]:
            name = Literal(
                obj.getNameAttested() or obj.getNameTransliterated(),
                obj.getNameLanguage() or None)
            if rating and rating[0] > 0.0:
                g.add((
                    context_subj,
                    SKOS['prefLabel'],
                    name))
            else:
                g.add((
                    context_subj,
                    SKOS['altLabel'],
                    name))
        
        for rating, obj in rated_names[1:]:
            name = Literal(
                obj.getNameAttested() or obj.getNameTransliterated(),
                obj.getNameLanguage() or None)
            g.add((
                context_subj,
                SKOS['altLabel'], 
                name))
        
        # Names
        for rating, obj in rated_names:
            
            name_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasName'], name_subj))
            g.add((name_subj, RDF.type, PLEIADES['Name']))
            
            g = self.dcterms(obj, g)
            
            g = self.temporal(obj, g, name_subj, vocabs=vocabs)
            g = self.provenance(obj, g, name_subj)
            g = self.references(obj, g, name_subj)

            nameAttested = obj.getNameAttested()
            if nameAttested:
                g.add((
                    name_subj, 
                    PLEIADES['nameAttested'], 
                    Literal(nameAttested, obj.getNameLanguage() or None)))

            for nr in obj.getNameTransliterated().split(','):
                nr = nr.strip()
                g.add((name_subj, PLEIADES['nameRomanized'], Literal(nr)))

        ## representative point
        xs = []
        ys = []
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(
            path={'query': folder_path, 'depth': 1}, 
            portal_type='Location', 
            review_state='published')
        locs = [b.getObject() for b in brains]
        location_ratings = [
            self.catalog.getIndexDataForRID(
                b.getRID())['average_rating'] for b in brains]
        features = [wrap(ob, 0) for ob in locs]

        # get representative point
        loc_prec = location_precision(context)
        if loc_prec == 'precise':
            repr_point = None
            for r, f in sorted(zip(location_ratings, features), reverse=True):
                if f.geometry and hasattr(f.geometry, '__geo_interface__'):
                    shape = asShape(f.geometry)
                    b = shape.bounds
                    xs.extend([b[0], b[2]])
                    ys.extend([b[1], b[3]])
                    if repr_point is None and r and r[0] > 0.0:
                        repr_point = shape.centroid
            if len(xs) * len(ys) > 0:
                bbox = [min(xs), min(ys), max(xs), max(ys)]
            else:
                bbox = None
        
            if repr_point:
                g.add((
                    context_subj,
                    GEO['lat'],
                    Literal(repr_point.y)))
                g.add((
                    context_subj,
                    GEO['long'],
                    Literal(repr_point.x)))
            elif bbox:
                g.add((
                    context_subj,
                    GEO['lat'],
                    Literal((bbox[1]+bbox[3])/2.0)))
                g.add((
                    context_subj,
                    GEO['long'],
                    Literal((bbox[0]+bbox[2])/2.0)))
        elif loc_prec == 'rough':
            for loc in locs:
                ref = loc.getLocation()
                gridbase = "http://atlantides.org/capgrids/"
                if ref and ref.startswith(gridbase):
                    params = ref.rstrip("/")[len(gridbase):].split("/")
                    if len(params) == 1:
                        mapnum = params[0]
                        grids = [None]
                    elif len(params) == 2:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1].split("+")]
                    else:
                        log.error("Invalid location identifier %s" % ref)
                        continue
                    for grid in grids:
                        grid_uri = gridbase + mapnum + "#" + (grid or "this")
                        bounds = capgrids.box(mapnum, grid)
                        shape = box(*bounds)

                        g.add((
                            context_subj,
                            OSSPATIAL['within'],
                            URIRef(grid_uri)))

                        e = URIRef(grid_uri + "-extent") # the grid's extent
                        g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                        g.add((
                            URIRef(grid_uri),
                            OSGEO['extent'],
                            e))
                        g.add((
                            e,
                            OSGEO['asGeoJSON'],
                            Literal(geojson.dumps(shape))))
                        g.add((
                            e,
                            OSGEO['asWKT'],
                            Literal(wkt.dumps(shape))))

        # Locations
        for obj in locs:
            
            locn_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasLocation'], locn_subj))
            g.add((locn_subj, RDF.type, PLEIADES['Location']))
            
            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, locn_subj, vocabs=vocabs)
            g = self.provenance(obj, g, locn_subj)
            g = self.references(obj, g, locn_subj)

            dc_locn = obj.getLocation()
            gridbase = "http://atlantides.org/capgrids/"

            if dc_locn and dc_locn.startswith(gridbase):
                try:
                    params = dc_locn.rstrip("/")[len(gridbase):].split("/")
                    if len(params) == 1:
                        mapnum = params[0]
                        grids = [None]
                    elif len(params) == 2:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1].split("+")]
                    elif len(params) >= 3:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1:]]
                    else:
                        log.error("Invalid location identifier %s" % ref)
                        continue

                    for grid in grids:
                        grid_uri = gridbase + mapnum + "#" + (grid or "this")
                        bounds = capgrids.box(mapnum, grid)
                        shape = box(*bounds)

                        g.add((
                            locn_subj,
                            OSSPATIAL['partiallyOverlaps'],
                            URIRef(grid_uri)))

                        e = URIRef(grid_uri + "-extent") # the grid's extent
                        g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                        g.add((
                            URIRef(grid_uri),
                            OSGEO['extent'],
                            e))
                        g.add((
                            e,
                            OSGEO['asGeoJSON'],
                            Literal(geojson.dumps(shape))))
                        g.add((
                            e,
                            OSGEO['asWKT'],
                            Literal(wkt.dumps(shape))))

                except:
                    log.exception("Exception caught computing grid extent for %r", obj)

            else:
                try:
                    f = wrap(obj, 0)
                    if (f.geometry and 
                            hasattr(f.geometry, '__geo_interface__')):
                        shape = asShape(f.geometry)
                        g.add((
                            locn_subj,
                            OSGEO['asGeoJSON'],
                            Literal(geojson.dumps(shape))))
                        g.add((
                            locn_subj,
                            OSGEO['asWKT'],
                            Literal(wkt.dumps(shape))))
                except:
                    log.exception("Couldn't wrap and graph %r", obj)

        # connects with
        for f in (context.getConnections() + 
                context.getConnections_from()):
            if self.wftool.getInfoFor(f, 'review_state') != 'published':
                continue
            furl = f.absolute_url()
            vh_root = context.REQUEST.environ.get('VH_ROOT')
            if vh_root:
                furl = furl.replace(vh_root, '')
            feature_obj = URIRef(furl + "#this")
            g.add((feature_subj, SPATIAL['C'], feature_obj))
            g.add((context_subj, RDFS['seeAlso'], URIRef(furl)))

        # dcterms:coverage
        coverage = geoContext(context)
        if coverage:
            g.add((
                context_subj,
                DCTERMS['coverage'],
                Literal(coverage) ))

        g = self.references(context, g, context_subj)

        return g