def parse_counts(biom_file, tree_file=None): counts = [] table = biom.load_table(biom_file) if tree_file: tree = ParsedTree( tree_file, 'newick', taxon_name_re, lineage_prefixes=['k__', 'p__', 'c__', 'o__', 'f__', 'g__', 's__']) tree.set_index_clades(contains=['A', 'T', 'G', 'C']) for obs_id, samp_id in table.nonzero(): lineage = parse_lineage(table, obs_id) if not lineage and tree: lineage = parse_lineage(table, obs_id, parsed_tree=tree, index_only=True) seq_var = parse_sequencing_variant(obs_id) count = table.get_value_by_ids(obs_id, samp_id) counts.append( Count(samp_id=samp_id, obs_id=obs_id, count=count, seq_var=seq_var, lineage=lineage)) return counts
def get(self): self.echo('home.html', { 'title': "首页", 'topic_objs': Commomkvdb.get_topic_by_keys('recent-topic-home'), 'site_counts': Count.get_site_counts(), 'newest_node': Node.get_newest(), 'recent_node': Node.get_recent_node(), 'hot_node': Node.get_hot_node(), #'recent_topic_objs': Commomkvdb.get_comment_topic_by_keys('recent-topic-home'), 'comment_topic_objs': Commomkvdb.get_comment_topic_by_keys('recent-comment-topic-home'), }, layout='_layout.html')
def get(self): self.echo( 'home.html', { 'title': "首页", 'topic_objs': Commomkvdb.get_topic_by_keys('recent-topic-home'), 'site_counts': Count.get_site_counts(), 'newest_node': Node.get_newest(), 'recent_node': Node.get_recent_node(), 'hot_node': Node.get_hot_node(), #'recent_topic_objs': Commomkvdb.get_comment_topic_by_keys('recent-topic-home'), 'comment_topic_objs': Commomkvdb.get_comment_topic_by_keys( 'recent-comment-topic-home'), }, layout='_layout.html')
with session_scope() as session_2: for exp_id, experiment in experiments.items(): for subject in experiment.subjects: for sample in subject.samples: for prep in sample.preparations: for workflow in prep.workflows: try: workflow.count_dict except AttributeError: continue for count in workflow.count_dict[sample.orig_sample_id]: fact = Count(experiment=experiment, subject=subject, sample=sample, sample_site=sample.sampling_site, sample_time=sample.sampling_time, preparation=prep, workflow=workflow, lineage=count.lineage, seq_variant=count.seq_var, count=count.count) session_2.add(fact) end = time.time() print("Main loop took: ", end-start) def parser(session): """Parse individual prep and BIOM files when parsing a study. Note: This method will produce duplicates of sample, subject, and processing data each time a BIOM file is inserted. It also unfortunately duplicates count data (for each processing in the prep data file). This
def post(self, nodeid='1'): n_obj = Node.get_by_key('n-' + str(nodeid)) if not n_obj: self.set_status(404) self.write('404') return errors = [] author = str(self.get_cookie('username', '')) title = self.get_argument('title', '') content = self.get_argument('content', '') t_obj = TOPIC_DICT.copy() if title and content: if len(title) <= TITLE_MAX_S and len(content) <= CONTENT_MAX_S: int_time = int(time()) #check spam u_topic_time = kv.get('u_topic_time:' + author) if u_topic_time: tlist = u_topic_time.split(',') if len(tlist) == MEMBER_RECENT_TOPIC and ( int_time - int(tlist[-1])) < 3600: self.write( u'403:不要发帖太频繁了 <a href="/newpost/%s">请返回</a>' % nodeid) return #check repeat content = textilize(content) #content = safe_encode(content) con_md5 = md5(content.encode('utf-8')).hexdigest() if mc.get('c_' + con_md5): self.write(u'403:请勿灌水 <a href="/newpost/%s">请返回</a>' % nodeid) return else: mc.set('c_' + con_md5, '1', 36000) t_obj['title'] = title t_obj['nodeid'] = str(nodeid) t_obj['nodename'] = n_obj['name'] t_obj['author'] = author t_obj['add'] = int_time t_obj['content'] = content if n_obj['count']: topic_id = int(n_obj['count']) + 1 else: topic_id = 1 if Topic.add(topic_id, t_obj): topic_key = 't-%s-%s' % (str(nodeid), str(topic_id)) #node count +1 n_obj['count'] = str(topic_id) Commomkvdb.save('n-' + str(nodeid), n_obj) #member recent +key #Member.add_key_rencent_topic(author, topic_key) rt_obj = kv.get('topic-' + author) if rt_obj: olist = rt_obj.split(',') if topic_key not in olist: olist.insert(0, topic_key) rt_obj = ','.join(olist[:MEMBER_RECENT_TOPIC]) kv.set('topic-' + author, rt_obj) else: rt_obj = topic_key kv.set('topic-' + author, topic_key) #recent in home +key Commomkvdb.add_key_rencent_topic('recent-topic-home', topic_key) #all topic counter +1 Count.key_incr('all-topic-num') #hot node tqueue = TaskQueue('default') tqueue.add( Task('/task/hotnode/%s/%s' % ('n-' + str(nodeid), str(topic_id)), delay=5)) #notifications mentions = findall_mentions(t_obj['content'], author) if mentions: tqueue.add( Task('/task/mentions/' + topic_key, 'member=' + ','.join(mentions), delay=8)) #set for check spam #u_topic_time = kv.get('u_topic_time:'+author) if u_topic_time: tlist = u_topic_time.split(',') if str(int_time) not in tlist: tlist.insert(0, str(int_time)) u_topic_time = ','.join( tlist[:MEMBER_RECENT_TOPIC]) kv.set('u_topic_time:' + author, u_topic_time) else: u_topic_time = str(int_time) kv.set('u_topic_time:' + author, u_topic_time) ##set new sr_code cur_user = self.cur_user() code_list = [cur_user['code'], u_topic_time] u_comment_time = kv.get('u_comment_time:' + author) if u_comment_time: code_list.append(u_comment_time) self.set_cookie('usercode', md5(''.join(code_list)).hexdigest(), path="/", expires_days=365) #del cache clear_cache_multi([ 'get_topic_by_keys:recent-topic-home', 'get_topic_by_keys:topic-' + author, 'get_comment_topic_by_keys:recent-topic-home', 'get_comment_topic_by_keys:recent-comment-topic-home', 'cur_user:'******'/' + topic_key) return else: errors.append("服务器出现错误,请稍后再试") else: t_obj['title'] = title t_obj['content'] = content errors.append(u"注意标题和内容的最大字数:%s %d" % (len(title), len(content))) else: errors.append("标题和内容必填") self.echo('newpost.html', { 'title': "发新帖子", 'errors': errors, 'n_obj': n_obj, 't_obj': t_obj, }, layout='_layout.html')
def post(self, nodeid, topicid): author = str(self.get_cookie('username', '')) content = self.get_argument('content', '') if author and content and len(content) <= COMMENT_MAX_S: int_time = int(time()) #check spam u_comment_time = kv.get('u_comment_time:' + author) if u_comment_time: tlist = u_comment_time.split(',') if len(tlist) == MEMBER_RECENT_TOPIC and ( int_time - int(tlist[-1])) < 3600: self.write(u'403:不要回复太频繁了 <a href="/t-%s-%s">请返回</a>' % (nodeid, topicid)) return #check repeat content = textilize(content) #content = safe_encode(content) con_md5 = md5(content.encode('utf-8')).hexdigest() if mc.get('c_' + con_md5): self.write(u'403:请勿灌水 <a href="/t-%s-%s">请返回</a>' % (nodeid, topicid)) return else: mc.set('c_' + con_md5, '1', 36000) ## t_key = 't-%s-%s' % (str(nodeid), str(topicid)) t_obj = Topic.get_by_key(t_key) if t_obj['cnum']: id_num = int(t_obj['cnum']) + 1 else: id_num = 1 c_key = 't-%s-%s-%d' % (str(nodeid), str(topicid), id_num) c_obj = COMMENT_DICT.copy() c_obj['author'] = author c_obj['add'] = int_time c_obj['content'] = content if Commomkvdb.save(c_key, c_obj): #topic commont count +1 t_obj['cnum'] = id_num t_obj['reply'] = author t_obj['edit'] = int_time Commomkvdb.save(t_key, t_obj) #member recent +key #Member.add_key_rencent_comment_topic(author, t_key) rt_obj = kv.get('comment-topic-' + author) if rt_obj: olist = rt_obj.split(',') if t_key in olist: olist.remove(t_key) olist.insert(0, t_key) kv.set('comment-topic-' + author, ','.join(olist[:MEMBER_RECENT_TOPIC])) else: kv.set('comment-topic-' + author, t_key) #recent comment in home +key Commomkvdb.add_key_rencent_topic('recent-comment-topic-home', t_key) #all comment counter +1 Count.key_incr('all-comment-num') #notifications if t_obj['author'] != author: mentions = findall_mentions( c_obj['content'] + ' @%s ' % t_obj['author'], author) else: mentions = findall_mentions(c_obj['content'], author) if mentions: tqueue = TaskQueue('default') tqueue.add( Task('/task/mentions/' + t_key, 'member=' + ','.join(mentions), delay=5)) #set for check spam #u_comment_time = kv.get('u_comment_time:'+author) if u_comment_time: tlist = u_comment_time.split(',') if str(int_time) not in tlist: tlist.insert(0, str(int_time)) u_comment_time = ','.join(tlist[:MEMBER_RECENT_TOPIC]) kv.set('u_comment_time:' + author, u_comment_time) else: u_comment_time = str(int_time) kv.set('u_comment_time:' + author, u_comment_time) ##set new sr_code cur_user = self.cur_user() code_list = [cur_user['code']] u_topic_time = kv.get('u_topic_time:' + author) if u_topic_time: code_list.append(u_topic_time) if u_comment_time: code_list.append(u_comment_time) self.set_cookie('usercode', md5(''.join(code_list)).hexdigest(), path="/", expires_days=365) #del cache cachekeys = [ 'get_topic_by_keys:recent-comment-topic-home', 'get_topic_by_keys:comment-topic-' + author, 'get_comment_topic_by_keys:recent-topic-home', 'get_comment_topic_by_keys:recent-comment-topic-home', 'cur_user:'******'recent-topic-home') if tks and t_key in tks.split(','): cachekeys.append('get_topic_by_keys:recent-topic-home') if id_num < EACH_PAGE_COMMENT_NUM: cachekeys.append('get_comments:%s:1' % t_key) else: cachekeys.append('get_comments:%s:%d' % (t_key, [ i for i in range(1, id_num, EACH_PAGE_COMMENT_NUM) ][-1])) clear_cache_multi(cachekeys) self.redirect('/' + t_key) return else: self.set_status(403) self.write('错误: 403 (请返回填写内容 或 内容太长了)')
def post(self, nodeid='1'): n_obj = Node.get_by_key('n-'+str(nodeid)) if not n_obj: self.set_status(404) self.write('404') return errors = [] author = str(self.get_cookie('username','')) title = self.get_argument('title','') content = self.get_argument('content','') t_obj = TOPIC_DICT.copy() if title and content: if len(title)<=TITLE_MAX_S and len(content)<=CONTENT_MAX_S: int_time = int(time()) #check spam u_topic_time = kv.get('u_topic_time:'+author) if u_topic_time: tlist = u_topic_time.split(',') if len(tlist)== MEMBER_RECENT_TOPIC and (int_time-int(tlist[-1])) < 3600: self.write(u'403:不要发帖太频繁了 <a href="/newpost/%s">请返回</a>' % nodeid) return #check repeat content = textilize(content) #content = safe_encode(content) con_md5 = md5(content.encode('utf-8')).hexdigest() if mc.get('c_'+con_md5): self.write(u'403:请勿灌水 <a href="/newpost/%s">请返回</a>' % nodeid) return else: mc.set('c_'+con_md5, '1', 36000) t_obj['title'] = title t_obj['nodeid'] = str(nodeid) t_obj['nodename'] = n_obj['name'] t_obj['author'] = author t_obj['add'] = int_time t_obj['content'] = content if n_obj['count']: topic_id = int(n_obj['count']) + 1 else: topic_id = 1 if Topic.add(topic_id, t_obj): topic_key = 't-%s-%s' % (str(nodeid), str(topic_id)) #node count +1 n_obj['count'] = str(topic_id) Commomkvdb.save('n-'+str(nodeid), n_obj) #member recent +key #Member.add_key_rencent_topic(author, topic_key) rt_obj = kv.get('topic-'+author) if rt_obj: olist = rt_obj.split(',') if topic_key not in olist: olist.insert(0, topic_key) rt_obj = ','.join(olist[:MEMBER_RECENT_TOPIC]) kv.set('topic-'+author, rt_obj) else: rt_obj = topic_key kv.set('topic-'+author, topic_key) #recent in home +key Commomkvdb.add_key_rencent_topic('recent-topic-home', topic_key) #all topic counter +1 Count.key_incr('all-topic-num') #hot node tqueue = TaskQueue('default') tqueue.add(Task('/task/hotnode/%s/%s' % ('n-'+str(nodeid), str(topic_id)), delay=5)) #notifications mentions = findall_mentions(t_obj['content'], author) if mentions: tqueue.add(Task('/task/mentions/'+topic_key, 'member='+','.join(mentions), delay=8)) #set for check spam #u_topic_time = kv.get('u_topic_time:'+author) if u_topic_time: tlist = u_topic_time.split(',') if str(int_time) not in tlist: tlist.insert(0, str(int_time)) u_topic_time = ','.join(tlist[:MEMBER_RECENT_TOPIC]) kv.set('u_topic_time:'+author, u_topic_time) else: u_topic_time = str(int_time) kv.set('u_topic_time:'+author, u_topic_time) ##set new sr_code cur_user = self.cur_user() code_list = [cur_user['code'],u_topic_time] u_comment_time = kv.get('u_comment_time:'+author) if u_comment_time: code_list.append(u_comment_time) self.set_cookie('usercode', md5(''.join(code_list)).hexdigest(), path="/", expires_days = 365 ) #del cache clear_cache_multi(['get_topic_by_keys:recent-topic-home','get_topic_by_keys:topic-' + author, 'get_comment_topic_by_keys:recent-topic-home', 'get_comment_topic_by_keys:recent-comment-topic-home','cur_user:'******'/'+topic_key) return else: errors.append("服务器出现错误,请稍后再试") else: t_obj['title'] = title t_obj['content'] = content errors.append(u"注意标题和内容的最大字数:%s %d" % (len(title), len(content))) else: errors.append("标题和内容必填") self.echo('newpost.html', { 'title': "发新帖子", 'errors':errors, 'n_obj': n_obj, 't_obj': t_obj, }, layout='_layout.html')
def post(self, nodeid, topicid): author = str(self.get_cookie('username','')) content = self.get_argument('content','') if author and content and len(content)<=COMMENT_MAX_S: int_time = int(time()) #check spam u_comment_time = kv.get('u_comment_time:'+author) if u_comment_time: tlist = u_comment_time.split(',') if len(tlist)== MEMBER_RECENT_TOPIC and (int_time-int(tlist[-1])) < 3600: self.write(u'403:不要回复太频繁了 <a href="/t-%s-%s">请返回</a>' % (nodeid, topicid)) return #check repeat content = textilize(content) #content = safe_encode(content) con_md5 = md5(content.encode('utf-8')).hexdigest() if mc.get('c_'+con_md5): self.write(u'403:请勿灌水 <a href="/t-%s-%s">请返回</a>' % (nodeid, topicid)) return else: mc.set('c_'+con_md5, '1', 36000) ## t_key = 't-%s-%s' % (str(nodeid), str(topicid)) t_obj = Topic.get_by_key(t_key) if t_obj['cnum']: id_num = int(t_obj['cnum']) + 1 else: id_num = 1 c_key = 't-%s-%s-%d' % (str(nodeid), str(topicid), id_num) c_obj = COMMENT_DICT.copy() c_obj['author'] = author c_obj['add'] = int_time c_obj['content'] = content if Commomkvdb.save(c_key, c_obj): #topic commont count +1 t_obj['cnum'] = id_num t_obj['reply'] = author t_obj['edit'] = int_time Commomkvdb.save(t_key, t_obj) #member recent +key #Member.add_key_rencent_comment_topic(author, t_key) rt_obj = kv.get('comment-topic-'+author) if rt_obj: olist = rt_obj.split(',') if t_key in olist: olist.remove(t_key) olist.insert(0, t_key) kv.set('comment-topic-'+author, ','.join(olist[:MEMBER_RECENT_TOPIC])) else: kv.set('comment-topic-'+author, t_key) #recent comment in home +key Commomkvdb.add_key_rencent_topic('recent-comment-topic-home', t_key) #all comment counter +1 Count.key_incr('all-comment-num') #notifications if t_obj['author'] != author: mentions = findall_mentions(c_obj['content']+' @%s '%t_obj['author'], author) else: mentions = findall_mentions(c_obj['content'], author) if mentions: tqueue = TaskQueue('default') tqueue.add(Task('/task/mentions/'+t_key, 'member='+','.join(mentions), delay=5)) #set for check spam #u_comment_time = kv.get('u_comment_time:'+author) if u_comment_time: tlist = u_comment_time.split(',') if str(int_time) not in tlist: tlist.insert(0, str(int_time)) u_comment_time = ','.join(tlist[:MEMBER_RECENT_TOPIC]) kv.set('u_comment_time:'+author, u_comment_time) else: u_comment_time = str(int_time) kv.set('u_comment_time:'+author, u_comment_time) ##set new sr_code cur_user = self.cur_user() code_list = [cur_user['code']] u_topic_time = kv.get('u_topic_time:'+author) if u_topic_time: code_list.append(u_topic_time) if u_comment_time: code_list.append(u_comment_time) self.set_cookie('usercode', md5(''.join(code_list)).hexdigest(), path="/", expires_days = 365 ) #del cache cachekeys = ['get_topic_by_keys:recent-comment-topic-home', 'get_topic_by_keys:comment-topic-'+author, 'get_comment_topic_by_keys:recent-topic-home', 'get_comment_topic_by_keys:recent-comment-topic-home','cur_user:'******'recent-topic-home') if tks and t_key in tks.split(','): cachekeys.append('get_topic_by_keys:recent-topic-home') if id_num<EACH_PAGE_COMMENT_NUM: cachekeys.append('get_comments:%s:1' % t_key) else: cachekeys.append('get_comments:%s:%d' % (t_key, [i for i in range(1,id_num,EACH_PAGE_COMMENT_NUM)][-1])) clear_cache_multi(cachekeys) self.redirect('/'+t_key) return else: self.set_status(403) self.write('错误: 403 (请返回填写内容 或 内容太长了)')