def auth_route( username=None, password=None, shadow=False, crypted=False ): ws.logger.info('/auth') if not username or not password: redirect('/?logerror=1') mode = 'plain' if shadow: mode = 'shadow' if crypted: mode = 'crypted' # Try to find user in database user = rights.get_user(username) # No such user, or it's an external one if not user or user.get('external', False): # Try to redirect authentication to the external backend if mode == 'plain': response.status = 307 response.set_header('Location', '/auth/external') return 'username={0}&password={1}'.format( urlencode(username), urlencode(password) ) else: #return HTTPError(403, 'Plain authentication required') redirect('/?logerror=3') # Local authentication: check if account is activated if not user.get('enable', False): #return HTTPError(403, 'This account is not enabled') redirect('/?logerror=2') user = check(mode=mode, user=user, password=password) if not user: redirect('/?logerror=1') #return HTTPError(403, 'Forbidden') session.create(user) redirect('/')
def search(): q = request.query.q or '' print 'searching', q, 'in weibo' result = client.weibo_search("", q, 0, 20) pub_result = client.user_search("", q, 0, 20) return dict(query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title='Weibo', results=[ dict(id=e.id, name=e.title, url="http://weibo.com/%s" % e.original_id, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(','), imgurl=e.imgurl) for e in result.entity ], extra_results_list=[ dict(title="Users", items=[ dict(text=pub.title, link="http://weibo.com/u/%s" % pub.original_id) for pub in pub_result.entity ]), ])
def search(): q = request.query.q or '' print 'searching', q, 'in patent' result = client.group_search("", q, 0, 20) pub_result = client.patent_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title='Companies', results=[ dict(id=e.id, name=e.title, url="http://pminer.org/company.do?m=viewCompany&id=%s" % e.original_id, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(','), imgurl=e.imgurl) for e in result.entity ], extra_results_list=[ dict( title="Patents", items=[ dict( text=pub.title, link="http://pminer.org/patent.do?m=viewPatent&id=%s" % pub.original_id) for pub in pub_result.entity ]), ])
def search(dataset): q = request.query.q or '' print 'searching', q, 'in academic' result = client.author_search(dataset, q, 0, 20) pub_result = client.pub_search(dataset, q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title='Experts', results=[ dict(id=e.id, name=e.title, url="http://arnetminer.org/person/-%s.html" % e.original_id, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(','), imgurl=e.imgurl) for e in result.entity ], extra_results_list=[ dict(title="Publications", items=[ dict(text=pub.title, link="http://arnetminer.org/publication/-%s.html" % pub.original_id) for pub in pub_result.entity ]), ])
def search(): q = request.query.q or '' print 'searching', q, 'in weibo' return dict(query=q, count=0, results=[], encoded_query=urlencode({"q": q}))
def search(): q = request.query.q or '' print 'searching', q, 'in weibo' return dict( query=q, count=0, results=[], encoded_query=urlencode({"q": q}) )
def search(): q = request.query.q or '' offset = int(request.query.offset or '0') count = int(request.query.count or '20') print 'searching', q, 'in academic' result = client.author_search("", q, offset, count) pub_result = client.pub_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode('utf8')}), hotqueries=["data mining", "deep learning"], offset=offset, count=count, total_count=result.total_count, trends_enabled=True, influence_enabled=True, results_title='Experts', results=[ dict( id=e.id, name=e.title, url="http://arnetminer.org/person/-%s.html" % e.original_id, description=e.description, stats=dict( (s.type, s.value) for s in e.stat ), topics=[t.title() for t in e.topics.split(',') if t.strip()], imgurl=e.imgurl, integrated=network_integration.query(e.original_id) ) for e in result.entity ], extra_results_list=[ dict( title="Publications", items=[ dict( text=pub.title, link="http://arnetminer.org/publication/-%s.html" % pub.original_id, stats=dict( (s.type, s.value) for s in pub.stat ), authors=client.author_search_by_id("", list(pub.related_entity[0].id)).entity ) for pub in pub_result.entity ] ), ] )
def search(): q = request.query.q or '' offset = int(request.query.offset or '0') count = int(request.query.count or '20') print 'searching', q, 'in Enron' result = client.person_search(q, offset, count) mail_result = client.mail_search(q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode('utf8')}), hotqueries=["data mining", "deep learning"], offset=offset, count=count, total_count=result.total_count, trends_enabled=False, influence_enabled=False, results_title='People', results=[ dict( id = e.original_id, name="%s <%s>" % (e.title, e.url), url="mailto:%s" % e.url, description=e.description, stats=dict( (s.type, s.value) for s in e.stat ), topics=[t.title() for t in e.topics.split(',') if t.strip()], imgurl="http://www.gravatar.com/avatar/" + gravatar(e.url) + "?d=identicon" ) for x,e in enumerate(result.entity) ], extra_results_list=[ dict( title="Mails", items=[ dict( id = "mail/%s" % e.original_id, text=e.title, link="mail/%s" % e.id, stats=dict( (s.type, s.value) for s in e.stat ), authors=[] ) for e in mail_result.entity ] ), ] )
def get_url(routename, **kwargs): ''' customized get_url to allow additional prefix args ''' redirect = kwargs.pop('redirect', False) scriptname = bottle.request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = app.router.build(routename, **kwargs).lstrip('/') url = bottle.urljoin(bottle.urljoin('/', scriptname), location) if prefix and not redirect: url = prefix + '?' + bottle.urlencode({'q': url}) append = '?' if '?' in url: append = '&' url += '{}_={}'.format(append, time.time()) return url
def search(): q = request.query.q or '' print 'searching', q, 'in academic' result = client.searchAuthors(q) return dict( query=q, count=result.total_count, results=[ dict( id=a.naid, name=a.names[0], email=a.email ) for a in result.authors ], encoded_query=urlencode({"q": q}) )
def search(): q = request.query.q or "" offset = int(request.query.offset or "0") count = int(request.query.count or "20") print "searching", q, "in patent" result = client.group_search("", q, offset, count) pub_result = client.patent_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode("utf8")}), hotqueries=["data mining", "search engine", "mobile phone"], offset=offset, count=count, total_count=result.total_count, trends_enabled=True, influence_enabled=True, results_title="Companies", results=[ dict( id=e.id, name=e.title, url="http://pminer.org/company.do?m=viewCompany&id=%s" % e.original_id, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(","), imgurl=e.imgurl, ) for e in result.entity ], extra_results_list=[ dict( title="Patents", items=[ dict( text=pub.title, link=r"http://patft1.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=/netahtml/PTO/srchnum.htm&r=1&f=G&l=50&s1=%22+7627620+%22.PN.&OS=PN/%22+7627620+%22&RS=PN/%22+7627620".replace( "7627620", pub.url ), stats=dict((s.type, s.value) for s in pub.stat), authors=client.inventor_search_by_id("", list(pub.related_entity[0].id)).entity, ) for pub in pub_result.entity ], ) ], )
def search(): q = request.query.q or "" offset = int(request.query.offset or "0") count = int(request.query.count or "20") print "searching", q, "in weibo" result = client.user_search("", q, 0, 20) pub_result = client.weibo_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode("utf8")}), hotqueries=[u"高考", u"网络", u"星座", u"天气"], offset=offset, count=count, total_count=result.total_count, results_title="Users", results=[ dict( id=e.id, name=e.title, url="http://weibo.com/u/%s" % e.url, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(","), imgurl=e.imgurl, ) for e in result.entity ], extra_results_list=[ dict( title="Weibo", items=[ dict( text=pub.title, link="", stats=dict((s.type, s.value) for s in pub.stat), user=client.user_search_by_id("", list(pub.related_entity[0].id)).entity if len(pub.related_entity) else [], ) for pub in pub_result.entity ], ) ], )
def search(): q = request.query.q or '' offset = int(request.query.offset or '0') count = int(request.query.count or '20') print 'searching', q, 'in patent' result = client.group_search("", q, offset, count) pub_result = client.patent_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode('utf8')}), hotqueries=["data mining", "search engine", "mobile phone"], offset=offset, count=count, total_count=result.total_count, trends_enabled=True, influence_enabled=True, results_title='Companies', results=[ dict(id=e.id, name=e.title, url="http://pminer.org/company.do?m=viewCompany&id=%s" % e.original_id, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(','), imgurl=e.imgurl) for e in result.entity ], extra_results_list=[ dict( title="Patents", items=[ dict( text=pub.title, link= r'http://patft1.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=/netahtml/PTO/srchnum.htm&r=1&f=G&l=50&s1=%22+7627620+%22.PN.&OS=PN/%22+7627620+%22&RS=PN/%22+7627620' .replace('7627620', pub.url), stats=dict((s.type, s.value) for s in pub.stat), authors=client.inventor_search_by_id( "", list(pub.related_entity[0].id)).entity) for pub in pub_result.entity ]), ])
def search(): q = request.query.q or '' offset = int(request.query.offset or '0') count = int(request.query.count or '20') print 'searching', q, 'in academic' result = client.author_search("", q, offset, count) pub_result = client.pub_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode('utf8')}), hotqueries=["data mining", "deep learning"], offset=offset, count=count, total_count=result.total_count, trends_enabled=True, influence_enabled=True, results_title='Experts', results=[ dict(id=e.id, name=e.title, url="http://arnetminer.org/person/-%s.html" % e.original_id, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=[t.title() for t in e.topics.split(',') if t.strip()], imgurl=e.imgurl, integrated=network_integration.query(e.original_id)) for e in result.entity ], extra_results_list=[ dict(title="Publications", items=[ dict(text=pub.title, link="http://arnetminer.org/publication/-%s.html" % pub.original_id, stats=dict((s.type, s.value) for s in pub.stat), authors=client.author_search_by_id( "", list(pub.related_entity[0].id)).entity) for pub in pub_result.entity ]), ])
def search(): q = request.query.q or '' print 'searching', q, 'in academic' result = client.author_search("", q, 0, 20) pub_result = client.pub_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title='Experts', results=[ dict( id=e.id, name=e.title, url="http://arnetminer.org/person/-%s.html" % e.original_id, description=e.description, stats=dict( (s.type, s.value) for s in e.stat ), topics=e.topics.split(','), imgurl=e.imgurl, integrated=network_integration.query(e.original_id) ) for e in result.entity ], extra_results_list=[ dict( title="Publications", items=[ dict( text=pub.title, link="http://arnetminer.org/publication/-%s.html" % pub.original_id ) for pub in pub_result.entity ] ), ] )
def search(): q = request.query.q or '' offset = int(request.query.offset or '0') count = int(request.query.count or '20') print 'searching', q, 'in weibo' result = client.user_search("", q, 0, 20) pub_result = client.weibo_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query.encode('utf8')}), hotqueries=[u"高考", u"网络", u"星座", u"天气"], offset=offset, count=count, total_count=result.total_count, results_title='Users', results=[ dict(id=e.id, name=e.title, url="http://weibo.com/u/%s" % e.url, description=e.description, stats=dict((s.type, s.value) for s in e.stat), topics=e.topics.split(','), imgurl=e.imgurl) for e in result.entity ], extra_results_list=[ dict(title="Weibo", items=[ dict(text=pub.title, link="", stats=dict((s.type, s.value) for s in pub.stat), user=client.user_search_by_id( "", list(pub.related_entity[0].id)).entity if len(pub.related_entity) else []) for pub in pub_result.entity ]), ])
def search(): q = request.query.q or '' print 'searching', q, 'in patent' result = client.group_search("", q, 0, 20) pub_result = client.patent_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title='Companies', results=[ dict( id=e.id, name=e.title, url="http://pminer.org/company.do?m=viewCompany&id=%s" % e.original_id, description=e.description, stats=dict( (s.type, s.value) for s in e.stat ), topics=e.topics.split(','), imgurl=e.imgurl ) for e in result.entity ], extra_results_list=[ dict( title="Patents", items=[ dict( text=pub.title, link="http://pminer.org/patent.do?m=viewPatent&id=%s" % pub.original_id ) for pub in pub_result.entity ] ), ] )
def search(): q = request.query.q or '' print 'searching', q, 'in weibo' result = client.weibo_search("", q, 0, 20) pub_result = client.user_search("", q, 0, 20) return dict( query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title='Weibo', results=[ dict( id=e.id, name=e.title, url="http://weibo.com/%s" % e.original_id, description=e.description, stats=dict( (s.type, s.value) for s in e.stat ), topics=e.topics.split(','), imgurl=e.imgurl ) for e in result.entity ], extra_results_list=[ dict( title="Users", items=[ dict( text=pub.title, link="http://weibo.com/u/%s" % pub.original_id ) for pub in pub_result.entity ] ), ] )
def search(dataset): q = request.query.q or '' print 'searching', q, 'in academic' result = client.entity_search(dataset, q) print result return dict( query=q, encoded_query=urlencode({"q": result.query}), count=result.total_count, results_title=dataset, results=[ dict( id=e.id, name=e.title, url="link:entity/%s" % e.id, description=e.description, stats=dict( (s.type, s.value) for s in e.stat ), topics=e.topics, imgurl=e.imgurl ) for e in result.entities ], extra_results_list=[ { "title": extra_list.title, "items": [ { "title": item.title, "link": "link:%s/%s" % (extra_list.title, item.id) } for item in extra_list.item ] } for extra_list in result.extra_list ] )