Esempio n. 1
0
    def scheme(self, vocab_name):
        g = place_graph()
        vurl = self.portal.absolute_url() + '/vocabularies/%s' % vocab_name
        vh_root = self.request.environ.get('VH_ROOT')
        if vh_root:
            vurl = vurl.replace(vh_root, '')
        g.add((
            URIRef(vurl),
            RDF.type,
            SKOS['ConceptScheme']))

        # No dublin core in registry vocabs
        # hardcoding for now
        g.add((
            URIRef(vurl),
            DCTERMS['title'],
            Literal("Time Periods")))
        g.add((
            URIRef(vurl),
            DCTERMS['description'],
            Literal("Named time periods for the site.")))

        orig_url = vurl.replace('https://', 'http://')
        if orig_url and orig_url != vurl:
            g.add((URIRef(vurl), OWL['sameAs'], URIRef(orig_url)))

        key = vocab_name.replace('-', '_')
        vocab = get_vocabulary(key)
        for term in vocab:
            g = self.concept(vocab_name, term, g)

        return g
Esempio n. 2
0
 def getTimePeriods(self):
     """
     """
     time_periods = get_vocabulary('time_periods')
     time_periods_list = [p['id'] for p in time_periods]
     def timeperiod_index(period):
         if period in time_periods_list:
             index = time_periods_list.index(period)
         else:
             index = -1
         return index
     periods = []
     for name in self.getNames():
         for a in name.getAttestations():
             if a['timePeriod'] not in periods:
                 periods.append(a['timePeriod'])
     if hasattr(self, 'getLocations'):
         for l6n in self.getLocations():
             for a in l6n.getAttestations():
                 if a['timePeriod'] not in periods:
                     periods.append(a['timePeriod'])
     if hasattr(self, 'getFeatures'):
         for f in self.getFeatures():
             for p in f.getTimePeriods():
                 if p not in periods:
                     periods.append(p)
     return sorted([p for p in periods if p], key=timeperiod_index)
Esempio n. 3
0
    def temporal(self, context, g, subj, vocabs=True):

        periods = get_vocabulary('time_periods')
        periods = dict([(p['id'], p) for p in periods])
        purl = self.portal.absolute_url() + '/vocabularies/time-periods'
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        if vh_root:
            purl = purl.replace(vh_root, '')

        for attestation in context.getAttestations():
            turl = purl + "/" + attestation['timePeriod']
            g.add((
                subj,
                PLEIADES['during'],
                URIRef(turl)))

            if vocabs:
                g = RegVocabGrapher(self.portal, self.request).concept(
                    'time-periods', periods[attestation['timePeriod']], g)

        span = TimeSpanWrapper(context).timeSpan
        if span:
            g.add((
                subj,
                PLEIADES['start_date'],
                Literal(span['start'])))
            g.add((
                subj,
                PLEIADES['end_date'],
                Literal(span['end'])))

        return g
Esempio n. 4
0
 def get_place_type_data(self):
     featureTypes = set(self.catalog.uniqueValuesFor('getFeatureType'))
     places_in_use = sorted(featureTypes)
     place_types = get_vocabulary('place_types')
     places = {p['id']: p['title'] for p in place_types}
     data = [{'id': p, 'title': places[p]} \
         for p in places_in_use if p and p in places]
     return sorted(data, key=lambda k: k['title'].lower())
Esempio n. 5
0
def dump_catalog(context, portal_type, cschema, **extras):
    schema = cschema.copy()
    tp_vocab = get_vocabulary('time_periods')
    tp_ranges = periodRanges(tp_vocab)

    include_features = False
    kwextras = extras.copy()
    if 'include_features' in kwextras:
        include_features = True
        del kwextras['include_features']
    catalog = getToolByName(context, 'portal_catalog')
    if 'collection_path' in extras:
        collection = catalog(
            path={'query': extras['collection_path'], 'depth': 0}
            )[0].getObject()
        targets = collection.queryCatalog()
        results = []
        for target in targets:
            results += catalog(
                path=target.getPath(), portal_type=portal_type, **kwextras)
    else:
        query = {'portal_type': portal_type}
        if not include_features:
            query.update(
                path={'query': '/plone/places', 'depth': 2},
                review_state='published')
        query.update(kwextras)
        results = catalog(query)
    writer = UnicodeWriter(sys.stdout)
    keys = sorted(schema.keys())
    writer.writerow(keys)
    if include_features:
        schema['pid'] = getFeaturePID
    for b in results:

        # representative point
        try:
            lon, lat = map(float, b.reprPt[0])
            precision = b.reprPt[1]
            schema['reprLat'] = lambda a, b: str(lat)
            schema['reprLong'] = lambda a, b: str(lon)
            schema['reprLatLong'] = lambda a, b: "%f,%f" % (lat, lon)
        except:
            log.warn("Unlocated: %s" % b.getPath())

        # dates
        years = []
        for tp in getattr(b, 'getTimePeriods', []):
            if tp:
                years.extend(list(tp_ranges[tp]))
        if len(years) >= 2:
            dmin, dmax = min(years), max(years)
            schema['minDate'] = lambda a, b: str(dmin)
            schema['maxDate'] = lambda a, b: str(dmax)
            schema['timePeriodsRange'] = lambda a, b: "%.1f,%.1f" % (dmin, dmax)

        writer.writerow([schema[k](b, catalog) or "" for k in keys])
Esempio n. 6
0
 def verb(self, ob):
     global ctype_dict
     if ctype_dict is None:
         vocabulary = get_vocabulary('relationship_types')
         ctype_dict = {t['id']:t['title'] for t in vocabulary}
     ctype = ob.getRelationshipType()
     if type(ctype) is list:
         ctype = ctype[0]
     return(ctype_dict[ctype])
Esempio n. 7
0
 def getSortedTemporalAttestations(self):
     time_periods = get_vocabulary('time_periods')
     time_periods_list = [p['id'] for p in time_periods]
     def timeperiod_index(attestation):
         if attestation['timePeriod'] in time_periods_list:
             index = time_periods_list.index(attestation['timePeriod'])
         else:
             index = -1
         return index
     return sorted(self.getAttestations(), key=timeperiod_index)
Esempio n. 8
0
 def displaySortedTemporalAttestations(self):
     time_periods = get_vocabulary('time_periods')
     time_periods_dict = {p['id']:p['title'] for p in time_periods}
     attestations = self.getSortedTemporalAttestations()
     vocab_c = TemporalAttestation.schema[
         'confidence'].vocabulary.getVocabularyDict(self)
     try:
         return [dict(timePeriod=time_periods_dict[a['timePeriod']],
                      confidence=vocab_c[a['confidence']])
                 for a in attestations]
     except KeyError:
         return []
Esempio n. 9
0
 def timeSpan(self):
     catalog = self.context.aq_parent
     vocab = get_vocabulary('time_periods')
     ranges = periodRanges(vocab)
     years = []
     tp = getattr(self.context, 'getTimePeriods', [])
     if callable(tp):
         values = tp()
     else:
         values = tp
     for val in values:
         if val and val in ranges:
             years.extend(list(ranges[val]))
     if len(years) >= 2:
         return {'start': int(min(years)), 'end': int(max(years))}
     else:
         return None
Esempio n. 10
0
    def temporalRange(self):
        # @@@ move to util function
        request = getRequest()
        if request is not None and hasattr(request, '_period_ranges'):
            period_ranges = request._period_ranges
        else:
            tp_vocab = get_vocabulary('time_periods')
            period_ranges = periodRanges(tp_vocab)
            if request is not None:
                request._period_ranges = period_ranges

        timePeriods = self.brain.getTimePeriods
        years = []
        for period in timePeriods:
            years.extend(list(period_ranges[period]))
        if len(years) >= 2:
            return min(years), max(years)
        else:
            return None
Esempio n. 11
0
    def temporalRange(self, period_ranges=None):
        """Nominal temporal range, not accounting for level of confidence"""
        # cache period ranges on request
        if period_ranges is None:
            request = getRequest()
            if request is not None and hasattr(request, '_period_ranges'):
                period_ranges = request._period_ranges
            else:
                vocab = get_vocabulary('time_periods')
                period_ranges = periodRanges(vocab)
                if request is not None:
                    request._period_ranges = period_ranges

        years = []
        for a in self.getAttestations():
            tp = a['timePeriod']
            if tp:
                years.extend(list(period_ranges[a['timePeriod']]))
        if len(years) >= 2:
            return min(years), max(years)
        else:
            return None
Esempio n. 12
0
    def temporal(self, context, g, subj, vocabs=True):

        periods = get_vocabulary('time_periods')
        periods = dict([(p['id'], p) for p in periods])
        purl = self.portal.absolute_url() + '/vocabularies/time-periods'
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        if vh_root:
            purl = purl.replace(vh_root, '')

        for attestation in context.getAttestations():
            turl = purl + "/" + attestation['timePeriod']
            g.add((subj, PLEIADES['during'], URIRef(turl)))

            if vocabs:
                g = RegVocabGrapher(self.portal, self.request).concept(
                    'time-periods', periods[attestation['timePeriod']], g)

        span = TimeSpanWrapper(context).timeSpan
        if span:
            g.add((subj, PLEIADES['start_date'], Literal(span['start'])))
            g.add((subj, PLEIADES['end_date'], Literal(span['end'])))

        return g
Esempio n. 13
0
    def scheme(self, vocab_name):
        g = place_graph()
        vurl = self.portal.absolute_url() + '/vocabularies/%s' % vocab_name
        vh_root = self.request.environ.get('VH_ROOT')
        if vh_root:
            vurl = vurl.replace(vh_root, '')
        g.add((URIRef(vurl), RDF.type, SKOS['ConceptScheme']))

        # No dublin core in registry vocabs
        # hardcoding for now
        g.add((URIRef(vurl), DCTERMS['title'], Literal("Time Periods")))
        g.add((URIRef(vurl), DCTERMS['description'],
               Literal("Named time periods for the site.")))

        orig_url = vurl.replace('https://', 'http://')
        if orig_url and orig_url != vurl:
            g.add((URIRef(vurl), OWL['sameAs'], URIRef(orig_url)))

        key = vocab_name.replace('-', '_')
        vocab = get_vocabulary(key)
        for term in vocab:
            g = self.concept(vocab_name, term, g)

        return g
Esempio n. 14
0
 def terms(self):
     return get_vocabulary(self.vocabkey)
Esempio n. 15
0
    def __call__(self):
        context = self.context
        atctool = getToolByName(context, 'portal_atct')
        vtool = getToolByName(context, 'portal_vocabularies')
        utils = getToolByName(context, 'plone_utils')

        try:
            atctool.removeIndex("getFeatureType")
        except:
            pass
        try:
            atctool.removeIndex("getTimePeriods")
        except:
            pass

        try:
            atctool.addIndex('getFeatureType',
                             'Place/Feature Type',
                             'Type of ancient place or feature',
                             enabled=True)
        except:
            pass
        try:
            atctool.addIndex('getTimePeriods',
                             'Time Periods',
                             'Attested time periods',
                             enabled=True)
        except:
            pass

        v_times = [(t['id'], t['title'])
                   for t in get_vocabulary('time_periods')]
        v_types = [(t['id'], t['title'])
                   for t in get_vocabulary('place_types')]

        # [time]/[type]
        for ko, vo in v_times.items():
            tid = context.invokeFactory('Topic',
                                        id=utils.normalizeString(ko),
                                        title=vo)
            topic = context[tid]
            c = topic.addCriterion('getTimePeriods', 'ATSimpleStringCriterion')
            c.setValue(ko)
            c = topic.addCriterion('portal_type', 'ATPortalTypeCriterion')
            c.setValue('Place')
            topic.setSortCriterion('sortable_title', reversed=False)

            for ki, vi in v_types.items():
                sid = topic.invokeFactory('Topic',
                                          id=utils.normalizeString(ki),
                                          title=vi)
                subtopic = topic[sid]
                subtopic.setAcquireCriteria(True)
                c = subtopic.addCriterion('getFeatureType',
                                          'ATSimpleStringCriterion')
                c.setValue(ki)
                subtopic.setSortCriterion('sortable_title', reversed=False)

        # [type]/[time]
        for ko, vo in v_types.items():
            tid = context.invokeFactory('Topic',
                                        id=utils.normalizeString(ko),
                                        title=vo)
            topic = context[tid]
            c = topic.addCriterion('getFeatureType', 'ATSimpleStringCriterion')
            c.setValue(ko)
            c = topic.addCriterion('portal_type', 'ATPortalTypeCriterion')
            c.setValue('Place')
            topic.setSortCriterion('sortable_title', reversed=False)

            for ki, vi in v_times.items():
                sid = topic.invokeFactory('Topic',
                                          id=utils.normalizeString(ki),
                                          title=vi)
                subtopic = topic[sid]
                subtopic.setAcquireCriteria(True)
                c = subtopic.addCriterion('getTimePeriods',
                                          'ATSimpleStringCriterion')
                c.setValue(ki)
                topic.setSortCriterion('sortable_title', reversed=False)

        return 1
Esempio n. 16
0
    def place(self, context, vocabs=True):
        """Create a graph centered on a Place and its Feature."""
        g = place_graph()

        purl = context.absolute_url()
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        if vh_root:
            purl = purl.replace(vh_root, '')

        context_page = purl
        context_subj = URIRef(context_page)
        feature_subj = URIRef(context_page + "#this")

        # Type
        g.add((context_subj, RDF.type, PLEIADES['Place']))

        g.add((context_subj, RDF.type, SKOS['Concept']))
        g.add((
            context_subj,
            SKOS['inScheme'],
            URIRef("https://pleiades.stoa.org/places")))

        # Triples concerning the real world ancient place.
        g.add((feature_subj, RDF.type, SPATIAL['Feature']))

        # primary topic
        g.add((
            feature_subj,
            FOAF['primaryTopicOf'],
            context_subj))

        # title as rdfs:label
        g.add((
            feature_subj,
            RDFS['label'],
            Literal(context.Title())))

        # description as rdfs:comment
        g.add((
            feature_subj,
            RDFS['comment'],
            Literal(context.Description())))

        orig_url = context_page.replace('https://', 'http://')
        if orig_url and orig_url != context_page:
            g.add((context_subj, OWL['sameAs'], URIRef(orig_url)))

        g = self.dcterms(context, g)
        g = self.provenance(context, g, context_subj)

        # Place or feature types

        place_types = get_vocabulary('place_types')
        place_types = dict([(t['id'], t) for t in place_types])
        url = self.portal.absolute_url() + '/vocabularies/place-types'
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        pcats = set(filter(None, context.getPlaceType()))
        for pcat in pcats:
            item = place_types.get(pcat)
            if not item:
                continue
            iurl = url + '/' + pcat
            g.add((
                context_subj,
                PLEIADES['hasFeatureType'],
                URIRef(iurl)))

            if vocabs:
                g = RegVocabGrapher(self.portal, self.request).concept(
                    'place-types', place_types[pcat], g)

        # Names as skos:label and prefLabel
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(
            path={'query': folder_path, 'depth': 1},
            portal_type='Name',
            review_state='published')
        names = [b.getObject() for b in brains]

        for obj in names:
            name = Literal(
                obj.getNameAttested() or obj.getNameTransliterated(),
                obj.getNameLanguage() or None)
            g.add((
                context_subj,
                SKOS['altLabel'],
                name))

            name_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasName'], name_subj))
            g.add((name_subj, RDF.type, PLEIADES['Name']))

            orig_url = str(name_subj).replace('https://', 'http://')
            if orig_url and orig_url != str(name_subj):
                g.add((name_subj, OWL['sameAs'], URIRef(orig_url)))

            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, name_subj, vocabs=vocabs)
            g = self.provenance(obj, g, name_subj)
            g = self.references(obj, g, name_subj)

            nameAttested = obj.getNameAttested()
            if nameAttested:
                g.add((
                    name_subj,
                    PLEIADES['nameAttested'],
                    Literal(nameAttested, obj.getNameLanguage() or None)))

            for nr in obj.getNameTransliterated().split(','):
                nr = nr.strip()
                g.add((name_subj, PLEIADES['nameRomanized'], Literal(nr)))

        # representative point
        xs = []
        ys = []
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(
            path={'query': folder_path, 'depth': 1},
            portal_type='Location',
            review_state='published')
        locs = [b.getObject() for b in brains]
        features = [wrap(ob, 0) for ob in locs]

        # get representative point
        loc_prec = location_precision(context)
        if loc_prec == 'precise':
            repr_point = None
            for f in features:
                if f.geometry and hasattr(f.geometry, '__geo_interface__'):
                    shape = asShape(f.geometry)
                    b = shape.bounds
                    xs.extend([b[0], b[2]])
                    ys.extend([b[1], b[3]])
                    if repr_point is None:
                        repr_point = shape.centroid
            if len(xs) * len(ys) > 0:
                bbox = [min(xs), min(ys), max(xs), max(ys)]
            else:
                bbox = None

            if repr_point:
                g.add((
                    context_subj,
                    GEO['lat'],
                    Literal(repr_point.y)))
                g.add((
                    context_subj,
                    GEO['long'],
                    Literal(repr_point.x)))
            elif bbox:
                g.add((
                    context_subj,
                    GEO['lat'],
                    Literal((bbox[1]+bbox[3])/2.0)))
                g.add((
                    context_subj,
                    GEO['long'],
                    Literal((bbox[0]+bbox[2])/2.0)))
        elif loc_prec == 'rough':
            for loc in locs:
                ref = loc.getLocation()
                gridbase = "http://atlantides.org/capgrids/"
                if ref and ref.startswith(gridbase):
                    try:
                        params = ref.rstrip("/")[len(gridbase):].split("/")
                        if len(params) == 1:
                            mapnum = params[0]
                            grids = [None]
                        elif len(params) == 2:
                            mapnum = params[0]
                            grids = [v.upper() for v in params[1].split("+")]
                        else:
                            log.error("Invalid location identifier %s" % ref)
                            continue
                        for grid in grids:
                            grid_uri = gridbase + mapnum + "#" + (grid or "this")
                            g.add((
                                context_subj,
                                OSSPATIAL['within'],
                                URIRef(grid_uri)))

                            e = URIRef(grid_uri + "-extent")  # the grid's extent
                            g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                            g.add((
                                URIRef(grid_uri),
                                OSGEO['extent'],
                                e))
                            bounds = capgrids.box(mapnum, grid)
                            shape = box(*bounds)
                            g.add((
                                e,
                                OSGEO['asGeoJSON'],
                                Literal(geojson.dumps(shape))))
                            g.add((
                                e,
                                OSGEO['asWKT'],
                                Literal(wkt.dumps(shape))))
                    except (ValueError, TypeError):
                        log.exception("Exception caught computing grid extent for %r", loc)

        # Locations
        for obj in locs:

            locn_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasLocation'], locn_subj))
            g.add((locn_subj, RDF.type, PLEIADES['Location']))

            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, locn_subj, vocabs=vocabs)
            g = self.provenance(obj, g, locn_subj)
            g = self.references(obj, g, locn_subj)

            orig_url = str(locn_subj).replace('https://', 'http://')
            if orig_url and orig_url != str(locn_subj):
                g.add((locn_subj, OWL['sameAs'], URIRef(orig_url)))

            dc_locn = obj.getLocation()
            gridbase = "http://atlantides.org/capgrids/"

            if dc_locn and dc_locn.startswith(gridbase):
                try:
                    params = dc_locn.rstrip("/")[len(gridbase):].split("/")
                    if len(params) == 1:
                        mapnum = params[0]
                        grids = [None]
                    elif len(params) == 2:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1].split("+")]
                    elif len(params) >= 3:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1:]]
                    else:
                        log.error("Invalid location identifier %s" % ref)
                        continue

                    for grid in grids:
                        grid_uri = gridbase + mapnum + "#" + (grid or "this")
                        bounds = capgrids.box(mapnum, grid)
                        shape = box(*bounds)

                        g.add((
                            locn_subj,
                            OSSPATIAL['partiallyOverlaps'],
                            URIRef(grid_uri)))

                        e = URIRef(grid_uri + "-extent") # the grid's extent
                        g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                        g.add((
                            URIRef(grid_uri),
                            OSGEO['extent'],
                            e))
                        g.add((
                            e,
                            OSGEO['asGeoJSON'],
                            Literal(geojson.dumps(shape))))
                        g.add((
                            e,
                            OSGEO['asWKT'],
                            Literal(wkt.dumps(shape))))

                except:
                    log.exception("Exception caught computing grid extent for %r", obj)

            else:
                try:
                    f = wrap(obj, 0)
                    if (f.geometry and
                            hasattr(f.geometry, '__geo_interface__')):
                        shape = asShape(f.geometry)
                        g.add((
                            locn_subj,
                            OSGEO['asGeoJSON'],
                            Literal(geojson.dumps(shape))))
                        g.add((
                            locn_subj,
                            OSGEO['asWKT'],
                            Literal(wkt.dumps(shape))))
                except:
                    log.exception("Couldn't wrap and graph %r", obj)

        # connects with
        for f in context.getConnectedPlaces():
            if self.wftool.getInfoFor(f, 'review_state') != 'published':
                continue
            furl = f.absolute_url()
            vh_root = context.REQUEST.environ.get('VH_ROOT')
            if vh_root:
                furl = furl.replace(vh_root, '')
            feature_obj = URIRef(furl + "#this")
            g.add((feature_subj, SPATIAL['C'], feature_obj))
            g.add((context_subj, RDFS['seeAlso'], URIRef(furl)))

        # dcterms:coverage
        coverage = geoContext(context)
        if coverage:
            g.add((
                context_subj,
                DCTERMS['coverage'],
                Literal(coverage) ))

        g = self.references(context, g, context_subj)

        return g
Esempio n. 17
0
 def periodRanges(self):
     vocab = get_vocabulary('time_periods')
     return periodRanges(vocab)
Esempio n. 18
0
    def place(self, context, vocabs=True):
        """Create a graph centered on a Place and its Feature."""
        g = place_graph()

        purl = context.absolute_url()
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        if vh_root:
            purl = purl.replace(vh_root, '')

        context_page = purl
        context_subj = URIRef(context_page)
        feature_subj = URIRef(context_page + "#this")

        # Type
        g.add((context_subj, RDF.type, PLEIADES['Place']))

        g.add((context_subj, RDF.type, SKOS['Concept']))
        g.add((context_subj, SKOS['inScheme'],
               URIRef("https://pleiades.stoa.org/places")))

        # Triples concerning the real world ancient place.
        g.add((feature_subj, RDF.type, SPATIAL['Feature']))

        # primary topic
        g.add((feature_subj, FOAF['primaryTopicOf'], context_subj))

        # title as rdfs:label
        g.add((feature_subj, RDFS['label'], Literal(context.Title())))

        # description as rdfs:comment
        g.add((feature_subj, RDFS['comment'], Literal(context.Description())))

        orig_url = context_page.replace('https://', 'http://')
        if orig_url and orig_url != context_page:
            g.add((context_subj, OWL['sameAs'], URIRef(orig_url)))

        g = self.dcterms(context, g)
        g = self.provenance(context, g, context_subj)

        # Place or feature types

        place_types = get_vocabulary('place_types')
        place_types = dict([(t['id'], t) for t in place_types])
        url = self.portal.absolute_url() + '/vocabularies/place-types'
        vh_root = context.REQUEST.environ.get('VH_ROOT')
        pcats = set(filter(None, context.getPlaceType()))
        for pcat in pcats:
            item = place_types.get(pcat)
            if not item:
                continue
            iurl = url + '/' + pcat
            g.add((context_subj, PLEIADES['hasFeatureType'], URIRef(iurl)))

            if vocabs:
                g = RegVocabGrapher(self.portal, self.request).concept(
                    'place-types', place_types[pcat], g)

        # Names as skos:label and prefLabel
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(path={
            'query': folder_path,
            'depth': 1
        },
                              portal_type='Name',
                              review_state='published')
        names = [b.getObject() for b in brains]

        for obj in names:
            name = Literal(
                obj.getNameAttested() or obj.getNameTransliterated(),
                obj.getNameLanguage() or None)
            g.add((context_subj, SKOS['altLabel'], name))

            name_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasName'], name_subj))
            g.add((name_subj, RDF.type, PLEIADES['Name']))

            orig_url = str(name_subj).replace('https://', 'http://')
            if orig_url and orig_url != str(name_subj):
                g.add((name_subj, OWL['sameAs'], URIRef(orig_url)))

            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, name_subj, vocabs=vocabs)
            g = self.provenance(obj, g, name_subj)
            g = self.references(obj, g, name_subj)

            nameAttested = obj.getNameAttested()
            if nameAttested:
                g.add((name_subj, PLEIADES['nameAttested'],
                       Literal(nameAttested,
                               obj.getNameLanguage() or None)))

            for nr in obj.getNameTransliterated().split(','):
                nr = nr.strip()
                g.add((name_subj, PLEIADES['nameRomanized'], Literal(nr)))

        # representative point
        xs = []
        ys = []
        folder_path = "/".join(context.getPhysicalPath())
        brains = self.catalog(path={
            'query': folder_path,
            'depth': 1
        },
                              portal_type='Location',
                              review_state='published')
        locs = [b.getObject() for b in brains]
        features = [wrap(ob, 0) for ob in locs]

        # get representative point
        loc_prec = location_precision(context)
        if loc_prec == 'precise':
            repr_point = None
            for f in features:
                if f.geometry and hasattr(f.geometry, '__geo_interface__'):
                    shape = asShape(f.geometry)
                    b = shape.bounds
                    xs.extend([b[0], b[2]])
                    ys.extend([b[1], b[3]])
                    if repr_point is None:
                        repr_point = shape.centroid
            if len(xs) * len(ys) > 0:
                bbox = [min(xs), min(ys), max(xs), max(ys)]
            else:
                bbox = None

            if repr_point:
                g.add((context_subj, GEO['lat'], Literal(repr_point.y)))
                g.add((context_subj, GEO['long'], Literal(repr_point.x)))
            elif bbox:
                g.add((context_subj, GEO['lat'],
                       Literal((bbox[1] + bbox[3]) / 2.0)))
                g.add((context_subj, GEO['long'],
                       Literal((bbox[0] + bbox[2]) / 2.0)))
        elif loc_prec == 'rough':
            for loc in locs:
                ref = loc.getLocation()
                gridbase = "http://atlantides.org/capgrids/"
                if ref and ref.startswith(gridbase):
                    try:
                        params = ref.rstrip("/")[len(gridbase):].split("/")
                        if len(params) == 1:
                            mapnum = params[0]
                            grids = [None]
                        elif len(params) == 2:
                            mapnum = params[0]
                            grids = [v.upper() for v in params[1].split("+")]
                        else:
                            log.error("Invalid location identifier %s" % ref)
                            continue
                        for grid in grids:
                            grid_uri = gridbase + mapnum + "#" + (grid
                                                                  or "this")
                            g.add((context_subj, OSSPATIAL['within'],
                                   URIRef(grid_uri)))

                            e = URIRef(grid_uri +
                                       "-extent")  # the grid's extent
                            g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                            g.add((URIRef(grid_uri), OSGEO['extent'], e))
                            bounds = capgrids.box(mapnum, grid)
                            shape = box(*bounds)
                            g.add((e, OSGEO['asGeoJSON'],
                                   Literal(geojson.dumps(shape))))
                            g.add(
                                (e, OSGEO['asWKT'], Literal(wkt.dumps(shape))))
                    except (ValueError, TypeError):
                        log.exception(
                            "Exception caught computing grid extent for %r",
                            loc)

        # Locations
        for obj in locs:

            locn_subj = URIRef(context_page + "/" + obj.getId())
            g.add((context_subj, PLEIADES['hasLocation'], locn_subj))
            g.add((locn_subj, RDF.type, PLEIADES['Location']))

            g = self.dcterms(obj, g)

            g = self.temporal(obj, g, locn_subj, vocabs=vocabs)
            g = self.provenance(obj, g, locn_subj)
            g = self.references(obj, g, locn_subj)

            orig_url = str(locn_subj).replace('https://', 'http://')
            if orig_url and orig_url != str(locn_subj):
                g.add((locn_subj, OWL['sameAs'], URIRef(orig_url)))

            dc_locn = obj.getLocation()
            gridbase = "http://atlantides.org/capgrids/"

            if dc_locn and dc_locn.startswith(gridbase):
                try:
                    params = dc_locn.rstrip("/")[len(gridbase):].split("/")
                    if len(params) == 1:
                        mapnum = params[0]
                        grids = [None]
                    elif len(params) == 2:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1].split("+")]
                    elif len(params) >= 3:
                        mapnum = params[0]
                        grids = [v.upper() for v in params[1:]]
                    else:
                        log.error("Invalid location identifier %s" % ref)
                        continue

                    for grid in grids:
                        grid_uri = gridbase + mapnum + "#" + (grid or "this")
                        bounds = capgrids.box(mapnum, grid)
                        shape = box(*bounds)

                        g.add((locn_subj, OSSPATIAL['partiallyOverlaps'],
                               URIRef(grid_uri)))

                        e = URIRef(grid_uri + "-extent")  # the grid's extent
                        g.add((e, RDF.type, OSGEO['AbstractGeometry']))
                        g.add((URIRef(grid_uri), OSGEO['extent'], e))
                        g.add((e, OSGEO['asGeoJSON'],
                               Literal(geojson.dumps(shape))))
                        g.add((e, OSGEO['asWKT'], Literal(wkt.dumps(shape))))

                except:
                    log.exception(
                        "Exception caught computing grid extent for %r", obj)

            else:
                try:
                    f = wrap(obj, 0)
                    if (f.geometry
                            and hasattr(f.geometry, '__geo_interface__')):
                        shape = asShape(f.geometry)
                        g.add((locn_subj, OSGEO['asGeoJSON'],
                               Literal(geojson.dumps(shape))))
                        g.add((locn_subj, OSGEO['asWKT'],
                               Literal(wkt.dumps(shape))))
                except:
                    log.exception("Couldn't wrap and graph %r", obj)

        # connects with
        for f in context.getConnectedPlaces():
            if self.wftool.getInfoFor(f, 'review_state') != 'published':
                continue
            furl = f.absolute_url()
            vh_root = context.REQUEST.environ.get('VH_ROOT')
            if vh_root:
                furl = furl.replace(vh_root, '')
            feature_obj = URIRef(furl + "#this")
            g.add((feature_subj, SPATIAL['C'], feature_obj))
            g.add((context_subj, RDFS['seeAlso'], URIRef(furl)))

        # dcterms:coverage
        coverage = geoContext(context)
        if coverage:
            g.add((context_subj, DCTERMS['coverage'], Literal(coverage)))

        g = self.references(context, g, context_subj)

        return g