コード例 #1
0
ファイル: base.py プロジェクト: Priya70/pubrefdb
 def get_data_links(self, request):
     "Return the links data."
     get_url = request.application.get_url
     links = []
     if configuration.PARENT_URL:
         link = dict(href=configuration.PARENT_URL,
                     title=configuration.PARENT_TITLE or 'Parent page')
         if configuration.PARENT_LOGO:
             link['image'] = configuration.PARENT_LOGO
         links.append(link)
     links.append(dict(title='Most recent',
                       href=get_url()))
     links.append(dict(title='Search',
                       href=get_url('search')))
     if self.is_login_admin():
         links.append(dict(title='Administration: Edit PI list',
                           href=get_url('pilist')))
         links.append(dict(title='Administration: Add publication',
                           href=get_url('publication')))
         links.append(dict(title='Administration:PubMed import',
                           href=get_url('pubmed', 'import')))
         links.append(dict(title='Administration:PubMed fetched',
                           href=get_url('pubmed', 'fetched')))
     years = self.get_years()
     for year in reversed(sorted(years.keys())):
         links.append(dict(title="Year (all PIs): %s" % year,
                           href=get_url('year', str(year)),
                           count=years[year]))
     try:
         doc = self.db['pilist']
     except couchdb.http.ResourceNotFound:
         pass
     else:
         pis = doc['pis']
         pis.sort(lambda i, j: cmp(i['name'].lower(), j['name'].lower()))
         for pi in pis:
             name = pi['name']
             key = to_ascii(name).lower().replace(' ', '_')
             links.append(dict(title="Principal Investigator: %s" % name,
                               href=get_url('author', key)))
     ## for item in self.db.view('publication/tags', group=True):
     ##     links.append(dict(title="Tags: %s" % item.key,
     ##                       href=get_url('tag', item.key)))
     links.append(dict(title='Recently modified',
                       href=get_url('modified')))
     links.append(dict(title='Incomplete info',
                       href=get_url('incomplete')))
     links.append(dict(title='Journals',
                       href=get_url('journals')))
     return links
コード例 #2
0
ファイル: base.py プロジェクト: Priya70/pubrefdb
 def normalize_publication(self, publication, get_url):
     """Normalize the contents of the publication:
     Change key '_id' to 'iui' and '_rev' to 'rev'.
     Remove the '_attachments' entry.
     Add the 'href' entry.
     Add the 'alt_href' entry, if slug defined.
     Add 'href' to each author."""
     publication['iui'] = publication.pop('_id')
     publication['rev'] = publication.pop('_rev')
     publication['href'] = get_url(publication['iui'])
     slug = publication.get('slug')
     if slug:
         publication['alt_href'] = get_url(slug)
     publication.pop('_attachments', None)
     for author in publication['authors']:
         name = get_author_name(author)
         name = to_ascii(name.replace(' ', '_')).lower()
         author['href'] = get_url('author', name)
     return publication
コード例 #3
0
ファイル: database.py プロジェクト: Priya70/pubrefdb
def load_pilist(db):
    "Load the PI list, if not already done."
    if db.get('pilist'): return
    # Get the list from a CSV file
    doc = dict(_id='pilist',
               pis=[])
    pis = doc['pis']
    try:
        infile = open('data/pilist.csv')
    except IOError:
        pass
    else:
        reader = csv.reader(infile)
        for record in reader:
            name = record[0].strip()
            if not name: continue
            affiliations = [a.strip() for a in record[1:] if a.strip()]
            pis.append(dict(name=name,
                            normalized_name=to_ascii(name),
                            affiliation=', '.join(affiliations)))
    with MetadataSaver(db, doc):
        pass