def local_person_uri(name):
        # calculate a local uri for a person based on their name
        slug = slugify(name)
        if slug in ProfileUris.name_conversions:
            slug = ProfileUris.name_conversions[slug]

        uri = local_uri(reverse('people:profile', args=[slug]))
        return uri
    def test_local_uri(self):
        site = Site.objects.get(id=2)
        with override_settings(SITE_ID=site.id):
            # no leading slash
            path = 'some/path'
            luri = local_uri(path)
            self.assertEqual('http://%s/%s' % (site.domain, path),
                             luri)
            # leading slash
            luri = local_uri('/' + path)
            self.assertEqual('http://%s/%s' % (site.domain, path),
                             luri)

        # site with subdomain
        site = Site.objects.get(id=3)
        with override_settings(SITE_ID=site.id):
            luri = local_uri(path)
            self.assertEqual('http://%s/%s' % (site.domain, path),
                             luri)

            # path starts with domain base path (i.e. generated by reverse)
            luri = local_uri('/belfast/' + path)
            self.assertEqual('http://%s/%s' % (site.domain, path),
                             luri)
def egograph_js(request, id):
    "Egograph information as JSON for a single person."
    uri = local_uri(reverse("people:profile", args=[id]), request)
    g = rdf_data()
    person = RdfPerson(g, rdflib.URIRef(uri))
    graph = person.ego_graph(radius=1, types=["Person", "Organization", "Place"])
    # annotate nodes in graph with degree
    #  NOTE: not a directional graph, so in/out degree not available

    graph = annotate_graph(
        graph, fields=["degree", "in_degree", "out_degree", "betweenness_centrality", "eigenvector_centrality"]
    )

    data = json_graph.node_link_data(graph)
    return HttpResponse(json.dumps(data), content_type="application/json")
def profile(request, id):
    "Display a profile page for a single person associated with the Belfast Group."
    uri = local_uri(reverse("people:profile", args=[id]), request)
    g = rdf_data()
    uriref = rdflib.URIRef(uri)
    # check that the generated URI is actually a person in our rdf dataset;
    # if not, 404
    if not (uriref, rdflib.RDF.type, rdfns.SCHEMA_ORG.Person) in g:
        raise Http404
    person = RdfPerson(g, uriref)
    groupsheets = get_rdf_groupsheets(author=uri)  # TODO: move to rdfperson class

    return render(
        request,
        "people/profile.html",
        {"person": person, "groupsheets": groupsheets, "page_rdf_type": "schema:ProfilePage"},
    )
def egograph_node_info(request, id):
    """HTML snippet to provide information about a node in the egograph.
    Intended to be loaded and displayed via AJAX.

    Some overlap with :meth:`belfast.network.views.node_info`.
    """

    # id is the person to whom this node is connected
    uri = local_uri(reverse("people:profile", args=[id]), request)
    g = rdf_data()
    ego_person = RdfPerson(g, rdflib.URIRef(uri))

    # NOTE: some overlap here with networks node_info view

    # id param is the node we want information
    node_id = request.GET.get("id", None)
    if node_id is None:
        raise Http404

    node_uri = rdflib.URIRef(node_id)
    # TODO: better to get relations from gexf or rdf ?
    graph = gexf.read_gexf(settings.GEXF_DATA["full"])
    node = graph.node[node_id]
    context = {"node": node}

    if node.get("type", None) == "Person":
        # init rdf person
        person = RdfPerson(rdf_data(), rdflib.URIRef(node_id))
        context["person"] = person

    # determine relation between node and ego-center
    rels = set(g.predicates(ego_person.identifier, node_uri))
    # TODO: may want to display other relationships?

    # special case: if "mentions", should be a poem; find for display/link
    if rdfns.SCHEMA_ORG.mentions in rels:
        txts = set(g.subjects(rdfns.SCHEMA_ORG.mentions, node_uri)) - set([ego_person.identifier])
        if txts:
            poems = [RdfPoem(g, p) for p in txts]
            # explicitly skip any non-poems, just in case
            context["poems"] = [p for p in poems if rdfns.FREEBASE["book/poem"] in p.rdf_types]

    return render(request, "network/node_info.html", context)
def list_groupsheets(request):
    '''View to display a list of all Group sheets from the RDF data.  Includes
    logic to generate and filter by facets for digital editions, authors, coverage
    dates, and source collections.

    Looks for a :class:`~django.contrib.flatpages.models.FlatPage` for this url,
    and if found, passes to the template display at the top of the list of
    Group sheets.
    '''

    # get flatpage for this url, if any
    flatpage = get_flatpage(request)

    url_args = {}
    filters = {}
    filter_digital = request.GET.get('edition', None)
    if filter_digital is not None:
        filters['has_url'] = True
        url_args['edition'] = 'digital'

    filter_author = request.GET.get('author', None)
    if filter_author is not None:
        # filter is in slug form; use that to build local uri
        url_args['author'] = filter_author
        author_uri = local_uri(reverse('people:profile', args=[filter_author]),
                               request)
        filters['author'] = author_uri

    filter_source = request.GET.get('source', None)
    if filter_source is not None:
        url_args['source'] = filter_source
        # TODO: preferred label / slugs / local identifier for these?
        # currently arg is the uri
        filters['source'] = filter_source

    filter_time = request.GET.get('dates', None)
    if filter_time is not None:
        url_args['dates'] = filter_time
        filters['coverage'] = filter_time

    results = get_rdf_groupsheets(**filters)
    # TODO: support source filter; make more django-like

    # generate labels/totals for 'facet' filters
    digital_count = 0
    authors = defaultdict(int)
    sources = defaultdict(int)
    time_periods = defaultdict(int)
    for r in results:
        # if not already filtered on digital, get a count
        if filter_digital is None and r.url:
            digital_count += 1
        if filter_author is None:
            # use author list to ensure *all* authors are listed properly
            for author in r.author_list:
                authors[author] += 1
        if filter_source is None:
            for s in r.sources:
                sources[s] += 1
        if filter_time is None:
            time_periods[r.coverage] += 1

    # generate lists of dicts for easy sorting in django template
    authors = [{'author': k, 'total': v} for k, v in authors.iteritems()]
    sources = [{'source': k, 'total': v} for k, v in sources.iteritems()]
    time_periods = [{'time_period': k, 'total': v} for k, v in time_periods.iteritems()]

    facets = {'digital': digital_count, 'authors': authors, 'sources': sources,
              'time_periods': time_periods}

    url_suffix = ''
    url_suffix = urllib.urlencode(url_args)
    # if not empty, prepend & for easy combination with other url args
    if url_suffix != '':
        url_suffix = '&%s' % url_suffix

    # generate query args to remove individual filters
    filters = {}
    if filter_digital is not None:
        args = url_args.copy()
        del args['edition']
        filter_args = urllib.urlencode(args)
        filters['digital edition'] = '?' + (filter_args if filter_args else '')
    if filter_author is not None:
        args = url_args.copy()
        del args['author']
        # pull author display name from results
        # TODO: init rdfperson and use that label instead?
        if results:
            for a in results[0].author_list:
                # groupsheets may have multiple authors, so make sure
                # we get the correct label for the active filter
                if str(a.identifier) == author_uri:
                    filters["%s, %s" % (a.lastname, a.firstname)] = '?' + urllib.urlencode(args)
                    break

    if filter_source is not None:
        args = url_args.copy()
        del args['source']
        if results:
            # pull source name from results (TODO: shorter labels)
            for s in results[0].sources:
                # groupsheets may have multiple sources, so make sure
                # we get the correct label
                if str(s.identifier) == filter_source:
                    filters[s.name] = '?' + urllib.urlencode(args)
                    break

    if filter_time is not None:
        args = url_args.copy()
        del args['dates']
        filters[filter_time] = '?' + urllib.urlencode(args)


    return render(request, 'groupsheets/list.html',
                  {'documents': results, 'facets': facets,
                   'url_suffix': url_suffix, 'filters': filters,
                   'flatpage': flatpage})