Ejemplo n.º 1
0
def main():
    if len(sys.argv) == 2:
        filepath = sys.argv[1]
    else:
        filepath = None
    utils.load_settings(filepath=filepath)
    utils.init_db()
    application = get_application()
    application.listen(settings["PORT"], xheaders=True)
    pid = os.getpid()
    if settings["PIDFILE"]:
        with open(settings["PIDFILE"], "w") as pf:
            pf.write(str(pid))
    logging.info(f"web server PID {pid} at {settings['BASE_URL']}")
    tornado.ioloop.IOLoop.instance().start()
                with PublicationSaver(doc=publ, db=db) as saver:
                    labels = publ['labels'].copy()
                    labels[label] = qualifier
                    saver['labels'] = labels
                count += 1
    print("Label '%s/%s' added to %i publications" % (label, qualifier, count))
    for error in errors:
        print(error)


if __name__ == '__main__':
    parser = utils.get_command_line_parser(
        'Add a label to all publications in a list.')
    parser.add_argument('--label', action='store', dest='label',
                        default=None, help='label to add')
    parser.add_argument('--qualifier', action='store', dest='qualifier',
                        default=None, help='qualifier of label to add')
    parser.add_argument('--file', action='store', dest='idfile',
                        metavar='IDFILE',
                        help='path to file containing publication identifiers')
    args = parser.parse_args()
    utils.load_settings(filepath=args.settings)
    db = utils.get_db()
    identifiers = []
    with open(args.idfile) as infile:
        for line in infile:
            line = line.strip()
            if line: identifiers.append(line)
    print(len(identifiers), 'identifiers')
    add_label_to_publications(db, args.label, args.qualifier, identifiers)
Ejemplo n.º 3
0
            count_files += 1
        else:
            doc = json.loads(itemdata)
            atts = doc.pop('_attachments', dict())
            db.save(doc)
            count_items += 1
            for attname, attinfo in list(atts.items()):
                key = "{0}_att/{1}".format(doc['_id'], attname)
                attachments[key] = dict(filename=attname,
                                        content_type=attinfo['content_type'])
        if count_items % 100 == 0:
            logging.info("%s items loaded...", count_items)
    infile.close()
    designs.regenerate_indexes(db)
    logging.info("undumped %s items and %s files from %s", count_items,
                 count_files, filepath)


if __name__ == '__main__':
    parser = utils.get_command_line_parser('Load tar.gz dump file'
                                           ' into the database.')
    parser.add_argument('dumpfile',
                        metavar='FILE',
                        type=str,
                        help='Dump file to load into the database.')
    args = parser.parse_args()
    utils.load_settings(filepath=args.settings, ignore_logging_filepath=True)
    db = utils.get_db()
    utils.initialize(db)
    undump(db, args.dumpfile)
Ejemplo n.º 4
0
def cli(settings, log):
    utils.load_settings(settings, log=log)
Ejemplo n.º 5
0
    symdifference = pp.Literal("^").set_parse_action(_Symdifference)
    intersection = pp.Literal("#").set_parse_action(_Intersection)
    difference = pp.Literal("-").set_parse_action(_Difference)
    operator = union | symdifference | difference | intersection

    expression = pp.Forward()
    atom = function | identifier | pp.Group(left + expression + right)
    expression <<= atom + (operator + atom)[...]
    expression.set_parse_action(_Expression)
    expression.ignore("!" + pp.rest_of_line)
    return expression


if __name__ == "__main__":
    logging.getLogger().disabled = True
    utils.load_settings()
    db = utils.get_db()

    parser = get_parser()

    variables = dict(blah=Subset(db, year="2010"))

    line = """((label(Clinical Genomics Linköping) +
 label(Clinical Genomics Gothenburg)) +
 label(Clinical Genomics Lund) +
 label(Clinical Genomics Uppsala) +
 label(Clinical Genomics Stockholm) +
 label(Clinical Genomics Umeå) + 
 label(Clinical Genomics Örebro)) #
 (year(2020) + year(2019) +year(2018) + year(2017)) + blah"""
    print(">>>", get_subset(db, line, variables=variables))
Ejemplo n.º 6
0
def main():
    args = get_args()
    utils.load_settings(filepath=args.settings)
    utils.initialize()

    url = tornado.web.url
    handlers = [
        url(r'/', Home, name='home'),
        url(r'/site/([^/]+)',
            tornado.web.StaticFileHandler, {'path': settings['SITE_DIR']},
            name='site'),
        url(r'/publication/(.+).json',
            PublicationJson,
            name='publication_json'),
        url(r'/publication/(.+)', Publication, name='publication'),
        url(r'/publications/(\d{4})', Publications, name='publications_year'),
        url(r'/publications/(\d{4}).json',
            PublicationsJson,
            name='publications_year_json'),
        url(r'/publications', Publications, name='publications'),
        url(r'/publications.json', PublicationsJson, name='publications_json'),
        url(r'/publications/csv', PublicationsCsv, name='publications_csv'),
        url(r'/publications/xlsx', PublicationsXlsx, name='publications_xlsx'),
        url(r'/publications/table/(\d{4})',
            PublicationsTable,
            name='publications_table_year'),
        url(r'/publications/table',
            PublicationsTable,
            name='publications_table'),
        url(r'/publications/acquired',
            PublicationsAcquired,
            name='publications_acquired'),
        url(r'/publications/no_pmid',
            PublicationsNoPmid,
            name='publications_no_pmid'),
        url(r'/publications/no_doi',
            PublicationsNoDoi,
            name='publications_no_doi'),
        url(r'/publications/no_label',
            PublicationsNoLabel,
            name='publications_no_label'),
        url(r'/publications/duplicates',
            PublicationsDuplicates,
            name='publications_duplicates'),
        url(r'/publications/modified',
            PublicationsModified,
            name='publications_modified'),
        url(r'/edit/([^/]+)', PublicationEdit, name='publication_edit'),
        url(r'/xrefs/([^/]+)', PublicationXrefs, name='publication_xrefs'),
        url(r'/add', PublicationAdd, name='publication_add'),
        url(r'/fetch', PublicationFetch, name='publication_fetch'),
        url(r'/blacklist/([^/]+)',
            PublicationBlacklist,
            name='publication_blacklist'),
        url(r'/acquire/([^/]+)',
            PublicationAcquire,
            name='publication_acquire'),
        url(r'/release/([^/]+)',
            PublicationRelease,
            name='publication_release'),
        url(r'/qc/([^/]+)', PublicationQc, name='publication_qc'),
        url(r'/update/([^/]+)/pmid',
            PublicationUpdatePmid,
            name='publication_update_pmid'),
        url(r'/update/([^/]+)/doi',
            PublicationUpdateDoi,
            name='publication_update_doi'),
        url(r'/journals', Journals, name='journals'),
        url(r'/journals.json', JournalsJson, name='journals_json'),
        url(r'/journal/([^/]+).json', JournalJson, name='journal_json'),
        url(r'/journal/([^/]+)', Journal, name='journal'),
        url(r'/journal/([^/]+)/edit', JournalEdit, name='journal_edit'),
        url(r'/labels', LabelsList, name='labels'),
        url(r'/labels.json', LabelsJson, name='labels_json'),
        url(r'/labels/table', LabelsTable, name='labels_table'),
        # These two label path patterns need to be checked first.
        url(r'/label/([^\.]+)/edit', LabelEdit, name='label_edit'),
        url(r'/label/([^\.]+)/merge', LabelMerge, name='label_merge'),
        url(r'/label/([^\.]+).json', LabelJson, name='label_json'),
        url(r'/label/([^\.]+)', Label, name='label'),
        url(r'/label', LabelAdd, name='label_add'),
        url(r'/account/reset', AccountReset, name='account_reset'),
        url(r'/account/password', AccountPassword, name='account_password'),
        url(r'/account/([^/]+).json', AccountJson, name='account_json'),
        url(r'/account/([^/]+)', Account, name='account'),
        url(r'/account/([^/]+)/edit', AccountEdit, name='account_edit'),
        url(r'/account/([^/]+)/disable',
            AccountDisable,
            name='account_disable'),
        url(r'/account/([^/]+)/enable', AccountEnable, name='account_enable'),
        url(r'/accounts', Accounts, name='accounts'),
        url(r'/accounts.json', AccountsJson, name='accounts_json'),
        url(r'/account', AccountAdd, name='account_add'),
        url(r'/search', Search, name='search'),
        url(r'/search.json', SearchJson, name='search_json'),
        url(r'/logs/([^/]+)', Logs, name='logs'),
        url(r'/contact', Contact, name='contact'),
        url(r'/settings', Settings, name='settings'),
        url(r'/login', Login, name='login'),
        url(r'/logout', Logout, name='logout'),
        url(r'/api/publication',
            ApiPublicationFetch,
            name='api_publication_fetch'),
    ]

    os.chdir(settings['ROOT'])
    application = tornado.web.Application(
        handlers=handlers,
        debug=settings.get('TORNADO_DEBUG', False),
        cookie_secret=settings['COOKIE_SECRET'],
        xsrf_cookies=True,
        ui_modules=uimodules,
        template_path='templates',
        static_path='static',
        login_url=r'/login')
    application.listen(settings['PORT'], xheaders=True)
    pid = os.getpid()
    logging.info("web server PID %s at %s", pid, settings['BASE_URL'])
    if args.pidfile:
        with open(args.pidfile, 'w') as pf:
            pf.write(str(pid))
    tornado.ioloop.IOLoop.instance().start()