def post(fposts, insieme): with open(fposts) as f: testo = ''.join(f.read().replace('\n', ' ')) posts = testo.split('<POST>') l = [] for spost in posts: pot = spost.strip() l.append(pot) insi = list(insieme) el = [] for com in l: import re com = re.sub(r'[^\w\s]', ' ', com) cm = com.lower() for x in insi: y = x.lower() z = y.center(len(y) + 2) if z in cm: el.append(com) re = [] for frase in el: cosa = frase.split() re.append(cosa[0]) risultato = set(re) return risultato
def parse_data(self): def get_command_response(tuple): nonsense = [] if tuple[3] != '': nonsense.append(tuple[3]) if tuple[5] != '': nonsense.append(tuple[5]) return CommandResponse(nonsense, tuple[7]) commands = {} re = [] tuple_list = self.retrieve_all_values() for tuple in tuple_list[1:]: if not self.check_tuple_valid(tuple): continue # Check if humour. if tuple[2].lower()[6:] == self.command_type[1]: tuple[1] = tuple[1].strip() + '@' # Remove command type string. tuple[2] = tuple[2][:5] if tuple[1] not in commands: commands[tuple[1]] = {tuple[2]: [get_command_response(tuple)]} else: if tuple[2] in commands[tuple[1]]: commands[tuple[1]][tuple[2]].append( get_command_response(tuple)) else: commands[tuple[1]][tuple[2]] = [ get_command_response(tuple) ] for command, v in commands.items(): for lang, response in v.items(): re.append(Command(lang, command, response)) return re
def segword_2(data): wordlis = sorted(wordlexicon, key=lambda i: len(i), reverse=True) # 按长度大小排序 # print(wordlis) re = [] for i in range(len(data)): s1 = data[i] #print(s1) s2 = '' maxlen = 5 w = s1[:maxlen] # 逆向 #print(w) while (w): if len(w) != 1: if w in wordlis: s2 = s2 + w + ' ' s1 = s1[len(w):] w = s1[:maxlen] #print(s2,s1) else: w = w[:len(w) - 1] #print(w) else: s2 = s2 + w + ' ' s1 = s1[len(w):] w = s1[:maxlen] #print(s2, s1) re.append(s2) print(re) return re
def segword(data): wordlis = sorted(wordlexicon, key=lambda i: len(i), reverse=True) # 按长度大小排序 #print(wordlis) re = [] for i in range(len(data)): s1 = data[i] #print(s1) s2 = '' maxlen = 5 w = s1[-maxlen:] # 逆向 while (w): if len(w) != 1: if w in wordlis: s2 = w + ' ' + s2 n = len(s1) - len(w) s1 = s1[0:n] w = s1[-maxlen:] #print(s2,s1) else: w = w[1:len(w)] #print(w) else: s2 = w + ' ' + s2 n = len(s1) - len(w) s1 = s1[0:n] w = s1[-maxlen:] #print(s2, s1) re.append(s2) print(re) return re
def cal_AIS(self, diagnose_description): AIS = {} re = [] diagnose_description = diagnose_description.upper() for body_part, content in self.criterion_obj.get_critersions().items(): if any([True if i in diagnose_description else False for i in content["exclude"]]): continue if not any([True if i in diagnose_description else False for i in content["include"]]): continue max_score = 0 max_part = body_part for _score_, _keywords_ in content["score"].items(): for _keyword_ in _keywords_: if isinstance(_keyword_[0][0], list): for i in _keyword_[0]: for j in _keyword_[1]: if i[0] in diagnose_description and j in diagnose_description and _score_ > max_score: max_score = _score_ else: if all([True if m in diagnose_description else False for m in _keyword_[0]]): for j in _keyword_[1]: if j in diagnose_description and _score_ > max_score: max_score = _score_ re.append([body_part, max_score]) return re if re else ["NULL", 0]
def LoadExpData(request): re = [] if request.method == "GET": userID = request.GET.get('userID') user = Allusers.objects.values("username").filter(id=userID).all() sam_ids = Tension.objects.values("sample_id").filter(get_people=userID, status=1).all() with open( 'D:/Download/Firefox/zhongzhijituan-master_testAndAssign/zhongzhijituan-master/username.txt', "r+") as f: f.seek(0) f.truncate() # 清空文件 f.write(user[0]['username']) for sam_id in sam_ids: datas = Sample.objects.values( "sample_id", "sample_actual_id", "brand_grade", "d").filter(sample_id=sam_id['sample_id']).all() temp = datas[0] temp['username'] = user[0]['username'] try: Share.objects.create(sample_actual_id=temp['sample_actual_id'], brand_grade=temp['brand_grade'], d=temp['d'], username=temp['username'], state=0) except: pass re.append(temp) result = {"rows": re, "total": len(re)} return HttpResponse(json.dumps(result))
def diff_group_pps(): for uid in get_group_uids(): get_one_pp(uid) old_pp_dic = get_group_pps() re = [] for i in range(len(pp_dic)): dic = pp_dic[i] old_dic = old_pp_dic[i] for (k, v) in dic.items(): if old_dic[k] != v: delta = float(v[1]) - float(old_dic[k][1]) if delta > 1.0: re.append("(" + mod[i] + ") " + v[0] + " +" + str(round(delta, 2)) + "pp\nfrom " + str(old_dic[k][1]) + "pp to " + str(v[1]) + "pp") try: db = pymysql.connect(DB_IP, DB_USER, DB_PSWD, "osu") cursor = db.cursor() sql = """UPDATE group_pps SET username="******", pp="%s" where uid="%d" and mode="%d" """ % ( v[0], v[1], k, i) cursor.execute(sql) db.commit() db.close() except: traceback.print_exc() return re
def gen_pretrain_targets(raw_tokens, id2word, max_predictions_per_seq): assert max_predictions_per_seq > 0 assert len(raw_tokens) > 0 pred_num = min(max_predictions_per_seq, max(1, int(round(len(raw_tokens) * 0.15)))) re = [] covered_pos_set = set() for _ in range(pred_num): cur_pos = np.random.randint(0, len(raw_tokens)) if cur_pos in covered_pos_set: continue covered_pos_set.add(cur_pos) prob = np.random.uniform() if prob < 0.8: replace_token = '<MASK>' elif prob < 0.9: replace_token = raw_tokens[cur_pos] # itself else: while True: fake_pos = np.random.randint(0, len(id2word)) # random one replace_token = id2word[fake_pos] if raw_tokens[cur_pos] != replace_token: break re.append(BiReplacement(position=cur_pos, replace_token=replace_token)) return re
def choose_constituents(self, complexity): re = [] factor = 1 # Limits option additions if complexity < 1: factor = 0 for item in grammar: # For each possible grammar relationship, itemname = item[1] iteminfo = item[0] for g in iteminfo: # For possible form, if self.satisfies(g[0]): # If the parent satisfies the rule if g[1] == "M" or ( random.random() < factor * float(g[1]) ): # If the dependent is mandatory or chosen at random, constituent = Template( g[2], eval(g[3].replace("parent", "self")).split(","), eval(g[4].replace("parent", "self")), g[5], self, itemname).choose( ) # Find a dependent word that works if constituent is None: # If no adequate vocabulary is selected return None re.append(constituent) break return re
def genFunc(s,st,ed): global trans,free_regs,init_free_regs re = [] s0s = s[st].split(' ') argnum = int(s0s[2]) arglist = [] trans = {} ret = s0s[3] free_regs = copy.deepcopy(init_free_regs) re.append(s0s[1]+':') for i in range(0,argnum): trans[s[st+1+i]] = 'T%d'%newplace() re.append('move T%d $a%d'%(rnum,i)) free_regs.remove('$a%d'%i) trans[ret] = '$v0' free_regs.remove('$v0') if '$a0' in free_regs: free_regs.remove('$a0') st_variables = get_st_variables_for_func(s0s[1]) for i in range(st+argnum+1,ed): re.extend(transexp(s[i],st_variables)) re.extend(transexp('return',st_variables)) s_res,t_res,st_variables = assign(re[1:],st_variables) re = remove_useless_return(re) finalfunc = trans_final_func(re,s_res,t_res,st_variables) final_out.extend(finalfunc) return re
def get_blog(request): if request.method == 'GET': # 获取导航栏博客类别列表 group = list(models.BlogGroup.objects.all().values('id', 'kind', 'href')) return HttpResponse(json.dumps(group)) elif request.method == 'POST': # 获取博客目录 # 性能待优化! href = json.loads(request.body.decode('utf-8'))['blogHref'] item = list(models.BlogItem.objects.filter(href=href).order_by('item_order', 'content_order') .values('title', 'name')) chosenItem = [] # 数据库中当前导航类别的blog chosenTitle = [] # 数据中当前导航类别的大标题 for i in item: chosenItem.append(i) chosenTitle.append(i['title']) titles = sorted(list(set(chosenTitle)), key=chosenTitle.index) # 去重排序后的chosenTitle re = [] for title in titles: name = [] for c in chosenItem: if c['title'] == title: name.append(c['name']) re.append({ 'title': title, 'name': name }) return HttpResponse(json.dumps(re))
def lz_analyze(self, interval=100, analyze_count=2): cmd = "lz-analyze %d" % interval # send analyze per second self.p.stdin.write(cmd + "\n") sleep_per_try = interval / 100 tries = 0 success_count = 0 re = [] while tries <= analyze_count and self.p is not None: time.sleep(sleep_per_try) tries += 1 # Readline loop while True: s = self.stdout_thread.readline() #print s if (len(s) > 3): success_count += 1 re.append(s) a = re[len(re) - 1].split(" info move ") print a[0] # No output, so break readline loop and sleep and wait for more if s == "": print "success: %d" % success_count break if success_count: cmd = "" self.p.stdin.write(cmd + "\n") time.sleep(sleep_per_try) (so, se) = self.drain() print "stdout" print "".join(so) print "stderr" print "".join(se) return re raise Exception("Failed to send command '%s' to Leela" % (cmd))
def chew_code(src, i, aligned, p): if not isinstance(src, list): src = P.cutoff_comments(src) if i: rr = ['# mul_add %s' % i] else: rr = [] for j in src: for k in evaluate_row(j, i, aligned): if k: rr.append(k) if k == 'jmp tail': break if not p: return rr re = [] for x in rr: if x[0] == '#': re.append(x) else: re.append(E.apply_s_permutation(x, p)) return re
def change_lexi2dict(lexi: Tuple, ex_vocab: dict, ex_vectors: np.ndarray, vocab: dict, vectors: np.ndarray) -> dict: lexi = dict(lexi[0], **lexi[1], **lexi[2]) pinyin = Pinyin() def _get_phrase_vector(item: str) -> np.ndarray: index = ex_vocab.get(item).index ex_vector = ex_vectors[index] vector = np.array([vectors[vocab.get(s).index] for s in item]) vector = np.mean(vector, axis=0) return 0.9 * vector + 0.1 * ex_vector words = ''.join(lexi.keys()) uword = set(words) word_dict = dict() for word in uword: re = [] rev = [] for (item, count) in lexi.items(): if item.find(word) != -1: re.append({'item': item, 'count': count}) rev.append(_get_phrase_vector(item)) word_dict.update( {word: { 'r_words': re, 'r_words_v': rev, 'count': len(re), }}) return word_dict
def parse_data(self): def get_command_response(tuple): nonsense = [] if tuple[3] != '': nonsense.append(tuple[3]) if tuple[5] != '': nonsense.append(tuple[5]) return CommandResponse(nonsense, tuple[7]) commands = {} re = [] tuple_list = self.retrieve_all_values() for tuple in tuple_list[1:]: if not self.check_tuple_valid(tuple): continue # Check if humour. if tuple[2].lower()[6:] == self.command_type[1]: tuple[1] = tuple[1].strip() + '@' # Remove command type string. tuple[2] = tuple[2][:5] if tuple[1] not in commands: commands[tuple[1]] = {tuple[2]: [get_command_response(tuple)]} else: if tuple[2] in commands[tuple[1]]: commands[tuple[1]][tuple[2]].append(get_command_response(tuple)) else: commands[tuple[1]][tuple[2]] = [get_command_response(tuple)] for command, v in commands.items(): for lang, response in v.items(): re.append(Command(lang, command, response)) return re
def plantCalendarSchedulingSelect(): ''' 工厂日历 :return: ''' if request.method == 'GET': data = request.values try: re = [] oclass = db_session.query(Scheduling).all() for oc in oclass: dir = {} dir['ID'] = oc.ID dir['start'] = oc.SchedulingTime dir['title'] = oc.PRName + ": 第" + oc.SchedulingNum[6:] + "批" dir['color'] = "#9FDABF" re.append(dir) ocl = db_session.query(plantCalendarScheduling).all() for o in ocl: dic = {} dic['ID'] = str(o.ID) dic['start'] = str(o.start) dic['title'] = o.title.split(":")[0] dic['color'] = o.color re.append(dic) return json.dumps(re, cls=AlchemyEncoder, ensure_ascii=False) except Exception as e: logger.error(e) insertSyslog("error", "工厂日历查询报错Error:" + str(e), current_user.Name) return json.dumps("工厂日历查询报错", cls=AlchemyEncoder, ensure_ascii=False)
def get_re_jobid(jobid): conn = MySQLdb.connect(host='20.0.2.15', user='******', db='JOB', passwd='123456', port=3306) print 'connect succeed' resu = [] # print "shshshhhhhhhhhhhhhhhhhhhhhhhhhhh" try: cursor = conn.cursor() if int(jobid) <= 10000000: database = "JOB_log_all" elif int(jobid) <= 40500000: database = "JOB_log_4050" elif int(jobid) <= 41000000: database = "JOB_log_4100" elif int(jobid) <= 41500000: database = "JOB_log_4150" elif int(jobid) <= 42000000: database = "JOB_log_4200" else: database = "JOB_log" #Modified by tyzhang. To maintain all the trace in mysql. sql1 = "select JOBID,JOB_NAME,PROGRAM_NAME,STARTTIME,ENDTIME,RUNTIME,CNC,CORE,NODELIST from " + database + " where jobid=" + str( jobid) + " and RUNTIME<>0 order by PROGRAM_NAME" #sql1="select JOBID,USER,STATE,QUEUE,CNC,STARTTIME,ENDTIME,RUNTIME,PROGRAM_NAME,NODELIST from JOB_log where "+ite+" and "+item3+" order by PROGRAM_NAME" #print sql1 cursor.execute(sql1) result1 = cursor.fetchall() conn.commit() cursor.close() #print result1 re = [] for date in result1: re.append(date) re.sort() #print re if len(re) < 1: print "NONE" else: for val in re: id = val[0] jname = val[1] pname = val[2] stime = str(val[3]) etime = str(val[4]) atime = val[5] cnc = val[6] core = val[7] node = val[8] node = node.split(",") resu.append( [id, jname, pname, stime, etime, atime, cnc, core, node]) except Exception as e: print e conn.rollback() return resu
def ee_cal(racemic, enantiomer, max_in, noise_l): re = [] en = [] for i in range(0, len(racemic) - 1): for j in range(0, len(enantiomer) - 1): if enantiomer[j, 0] - 0.025 < racemic[i, 0] and racemic[ i, 0] < enantiomer[j, 0] + 0.025: en.append(enantiomer[j]) re.append(racemic[i]) re = np.array(re) en = np.array(en) print(np.shape(re)) print(np.shape(en)) n_factor = re[:, 1] / max_in R = (noise_l * n_factor) / en[:, 1] ee = ((1 - R) / (1 + R))**0.5 mean = statistics.mean(ee) st = statistics.stdev(ee) print("Mean is % s" % (round(mean, 4)), "Standard Deviation is % s" % (round(st, 4))) plt.title(("Mean is % s" % (round(mean, 4)), "Standard Deviation is % s" % (round(st, 4)))) plt.xlabel('Freq') plt.ylabel('ee') plt.scatter(re[:, 0], ee, label="number of transitions % s" % (len(ee))) plt.legend(loc='best') #plt.ylim(0,1) plt.show()
def chew_code(src, amd, i, aligned, p): if not isinstance(src, list): src = P.cutoff_comments(src) if i: rr = ['# mul_add %s' % i] if p: e = '# ' for x in range(len(p)): e += 's%X->W%X ' % (x, p[x]) rr.append(e) else: rr = [] for j in src: k = evaluate_row(j, i, amd, aligned) if k and (k != [None]) and (k != ['']): rr += k if p: re = [] for x in rr: if x[0] == '#': re.append(x) else: re.append(E.apply_s_permutation(x, p)) return re return rr
def remove_stopwords(text): st_words = ["در", "از", "با", "که", "من", "ان"] re = [] for word in text.split(): if word not in st_words: re.append(word) return " ".join(re)
def _handleUrl(url, i): try: f = urllib2.urlopen(url,data=None, timeout=40) data = f.read() except: data = None if data is None: return '' soup = BeautifulSoup(data) #title = soup.select('.core_title_txt')[0].string names = soup.select('.d_name') names = map(lambda name:name.a.string, names) contts = soup.select('cc') contts = map(lambda contt:contt.div.contents, contts) re = []; for c in contts: txt = _getContent(c) re.append(txt) title_txt = "" ## "\n\n"+str(i)+title+"\n\n" "\n\n" + '##'*40 + + url + "\n\n" txt = '' for k,v in zip(names, re): this_t = str(v).strip() if k == extraId: continue if v is None or this_t == '': pass else: txt += "【" + str(k) + "】 : \n\t\t" + this_t + "\n\n" #+ return txt
def getneighbor(personlist, text): ''' Merge adjacent words that represent people. ''' word = text.split(' ') re = [] for personitem in personlist: temptec = {} if personitem[1] - 3 > 0: temptec['-3'] = word[personitem[1] - 3] else: temptec['-3'] = None if personitem[1] - 2 > 0: temptec['-2'] = word[personitem[1] - 2] else: temptec['-2'] = None if personitem[1] - 1 > 0: temptec['-1'] = word[personitem[1] - 1] else: temptec['-1'] = None temptec['0'] = personitem[0] if personitem[1] + 1 < len(word): temptec['1'] = word[personitem[1] + 1] else: temptec['1'] = None if personitem[1] + 2 < len(word): temptec['2'] = word[personitem[1] + 2] else: temptec['2'] = None if personitem[1] + 3 < len(word): temptec['3'] = word[personitem[1] + 3] else: temptec['3'] = None re.append(temptec) return re
def _get_relation_set(self, drs): drs = drs.split() re = [] for i in range(len(drs)): item = drs[i] if item in self.ignore: pass elif self.kp.match(item): pass elif self.pp.match(item): pass elif self.xp.match(item): pass elif self.ep.match(item): pass elif self.sp.match(item): pass else: if i + 3 < len(drs) and drs[i + 3] == ")": re.append(" ".join(drs[i:i + 4])) #re.append(" ".join([drs[i], "X", "X",")"])) elif i + 2 < len(drs) and drs[i + 2] == ")": re.append(" ".join(drs[i:i + 3])) #re.append(" ".join([drs[i], "X",")"])) return re
def posseg(content, POS=[]): pos = posseg.cut(content) re = [] for x in pos: # print(x.word,x.flag) if x.flag in POS: re.append(x.word) return re
def getListMajor(major): import re if major == "N/A" or major == "NA": return ["N/A"] list = re.split(', | and | or |And ', major) re = [] for m in list: re.append(getMajor(m)) return re
def getDets(word,wordsPro): re = ['null','the'] if not isPlural(word): if isAorAn(word,wordsPro): re.append('an') else: re.append('a') return re
def get_head_parral_words(head, parse, words): re=[] for i in range(len(parse)): if parse[i][0]==head+1 and parse[i][1]=='COO': # print("the parral head:is ",words[i] ) re.append((i,words[i])) return re
def getResForList(predList, threshold): re = [] for i in predList: if i > threshold: re.append(1) else: re.append(0) return re
def get_project_headers(lines): re = [] for l in lines: if (l.strip() == 'EndProject' or len(l) < 10): continue guides = parse_methods.guidre.findall(l) names = parse_methods.namere.findall(l) re.append(slnProjInfo(names[0], names[1], guides[1], guides[0])) return re
def on_test_end(self, trainer, pl_module): print(np.array(self.output['test_acc']).mean()) if osp.exists(f"{pl_module.model_name}.p"): re = pickle.load(open(f"{pl_module.model_name}.p", "rb")) re.append(np.array(self.output['test_acc']).mean()) pickle.dump(re, open(f"{pl_module.model_name}.p", "wb")) else: re = [np.array(self.output['test_acc']).mean()] pickle.dump(re, open(f"{pl_module.model_name}.p", "wb"))
def line_num(text_lines): ''''每行的开始位置及结束位置''' re=[] text_num = 0 for line in text_lines: temp=[text_num , text_num+len(line)-1] text_num += len(line) re.append(temp) return tuple(re)
def polrealpifag(vol,vol1,vol2,vol3): re = [] for n in vol: for r,t in n.items(): for m in vol1: for rm,tm in m.items(): if rm == vol2[0] and tm == t and r == vol3[0]: re.append(dict([(r,t)])) return re
def MV(ans_list): """ ans_list[i][j]: 问题i 用户j的回答 """ re = [] for i in ans_list: collection_ans = Counter(i) re.append(collection_ans.most_common(1)[0][0]) return re
def read_target(path): re = [] with open(path, 'r') as f: content = f.read() for line in content.split('\n'): if line != '' and line[:1] != '#': #re.append('http://' + line) re.append(line) return re
def t2v(text): # Vectorize the text. re=[] for i in tokener(text.lower()): if w2v.isinkey(i): re.append(w2v[i]) while len(re)<20: re.append(torch.zeros(50,dtype=torch.float32)) return torch.cat(re,0).reshape(1,-1,50)[:,:20,:]
def xml2dict(b): re = [] DOMTree = xml.dom.minidom.parseString(b) dom_urls = DOMTree.getElementsByTagName("url") for dom_url in dom_urls: u = dom_url.getElementsByTagName("loc")[0] pu = u.childNodes[0].data re.append(pu) return re
def get_path(self, values=None): if values is None: values = {} re = [] for converter, args, data in self.route: if converter: re.append(util.unicode_type(values[data])) else: re.append(data) return ''.join(re)
def get_itemids (self, pname, destid): """See documentation in folder.py""" ret = {} stag = self.get_config().make_sync_label(pname, destid) for locid, con in self.get_contacts().iteritems(): if stag in con.get_sync_tags(): t, remid = con.get_sync_tags(stag)[0] re.append({locid : remid}) return ret
def commands(self, *coms): re = [] for com in coms: r = subprocess.call(com) re.append(r) com = ' '.join(com) if not r: logger.info('run %s success' % com) else: logger.error('run %s failed(%d)' % (com, r)) return re
def get_st_variables_for_func(funcname): #defs = s[1:defnum+1] re = [] for i in range(0,defnum): ss = s[i+1+lnum].split(' ') if ss[0] == 'localdef' and ss[1] == funcname: re.append((ss[2],1)) size_table[ss[2]] = 1 if ss[0] == 'localdefarr' and ss[1] == funcname: re.append((ss[2],int(ss[3]))) size_table[ss[2]] = ss[3] ARRAYS.append(ss[2]) return re
def search_case(switch_list): re = [] for i in switch_list: case_begin = 0 case_end = 0 case_side = 0 while 1: case_begin = i[0].find('case' , case_end) case_end = i[0].find(':' , case_begin+4) if case_begin == -1: break if ((i[0][case_begin-1] == '\n' or i[0][case_begin-1] == ' ' or i[0][case_begin-1] == ' ' or i[0][case_begin-1] == '}' or i[0][case_begin-1] == ';' or i[0][case_begin-1] == ')') and (i[0][case_begin+4] == ' ' or i[0][case_begin+4] == ' ' or i[0][case_begin+4] == '\n' or i[0][case_begin+4] == '{' or i[0][case_begin+4] == '(')): re.append([case_begin-1+i[1] , case_end+i[1] , 'case' , case_side]) case_side += 1 default_begin = i[0].find('default') default_end = i[0].find(':' , default_begin+7) if default_begin != -1: if ((i[0][default_begin-1] == '\n' or i[0][default_begin-1] == ' ' or i[0][default_begin-1] == ' ' or i[0][default_begin-1] == '}' or i[0][default_begin-1] == ';' or i[0][default_begin-1] == ')') and (i[0][default_begin+7] == ' ' or i[0][default_begin+7] == ' ' or i[0][default_begin+7] == '\n' or i[0][default_begin+7] == ':')): re.append([default_begin-1+i[1] , default_end+i[1] , 'default' , case_side]) if len(re) != 0: for j in range(0 , len(re)): if j != len(re)-1: if i[0].find('break' , re[j][1] , re[j+1][0])!=-1: re[j].append('y') else: re[j].append('n') if j == len(re)-1: if i[0].find('break' , re[j][1])!=-1: re[j].append('y') else: re[j].append('n') return re
def __get_categories(self): cursor = None try: cursor = self.__conn.cursor() sql = r"select distinct id from url order by id" cursor.execute(sql) dbset = cursor.fetchall() re = [] for row in dbset: re.append( row[0] ) cursor.close() return re except Exception as e: if cursor: cursor.close() print e.message
def getDiskInfo(): x = [] info = psutil.disk_partitions() #print info for parttion in info: x.append( parttion[1]) re = [] for y in x: w = '' w = "mountpath="+str( y )+"," #w = w + str(psutil.disk_usage(y)) w = w + str(psutil.disk_usage(y)[3]) re.append(w) disk_percent = '\"disk_percent\":\"'+ str(re) + '\",' return disk_percent
def appsetting(request): now = datetime.datetime.now() print "now is %s" % now Progames = Progame.objects.all() re = [] for t in Progames: Re = {} project = [] Re['id'] = t.id Re['AppName'] = t.AppName Re['ProName'] = t.ProName Re['Domain'] = t.Domain Re['AppType'] = t.Apptype print "Re is %s" % Re re.append(Re) return render_to_response('appsetting.html', {'Res': re})
def closure(self): re = [] if self.dot == len(self.stmt): return re word = self.stmt[self.dot] if word in non_terminal_set: if word in StateItem.dic: return StateItem.dic[word] for s in bnf_dic[word]: re.append(StateItem(word, s, 0)) StateItem.dic[word] = re return re
def search_return(text): '''返回:[[起点,终点],return or exit]''' re=[] return_begin = 0 return_end = 0 while 1: temp = [] return_begin = text.find('return' , return_end) return_end=text.find(';',return_begin+6) if return_begin == -1: break if ((text[return_begin-1] == '\n' or text[return_begin-1] == ' ' or text[return_begin-1] == ' ' or text[return_begin-1] == '}' or text[return_begin-1] == ';' or text[return_begin-1] == ')') and (text[return_begin+6] == ' ' or text[return_begin+6] == '(' or text[return_begin+6] == ' ')): temp.append(return_begin-1) temp.append(return_end) temp.append('return') re.append(tuple(temp)) exit_begin = 0 exit_end = 0 while 1: temp=[] exit_begin = text.find('exit' , exit_end) exit_end = text.find(';',exit_begin+4) if exit_begin == -1: break if ((text[exit_begin-1] == '\n' or text[exit_begin-1] == ' ' or text[exit_begin-1] == ' ' or text[exit_begin-1] == '}' or text[exit_begin-1] == ';' or text[exit_begin-1] == ')') and (text[exit_begin+4] == ' ' or text[exit_begin+4] == '(' or text[exit_begin+4] == ' ')): temp.append(exit_begin-1) temp.append(exit_end) temp.append('exit') re.append(temp) return tuple(re)
def cmd_filter(self, _subject, course_id = -1, course_name = "", id = -1, name = "", before = "", after = "", new = ""): re = [] subjects = _subject.split(' ') for course in self.courses: if course["id"] not in COURSE_BLACKLIST: if not (course_id != -1 and course_id != course["id"]): for subject in subjects: if course["id"] not in eval(subject.upper() + "_BLACKLIST"): for term in course[subject]: if not (id != -1 and id != term["id"]): if not (name != "" and name not in term["title"]): if not (course_name != "" and course_name not in course["name"]): if not (before and term["time_stamp"] > before): if not (after and term["time_stamp"] < after): if not (new and not (term["state"] == "new" or term["state"] == "not_finish")): re.append(term) return re
def parse_data(self): commands = {} re = [] tuple_list = self.retrieve_all_values() for tuple in tuple_list[1:]: if not self.check_tuple_valid(tuple): continue if tuple[0] not in commands: commands[tuple[0]] = {tuple[1]: [tuple[2]]} else: if tuple[1] in commands[tuple[0]]: commands[tuple[0]][tuple[1]].append(tuple[2]) else: commands[tuple[0]][tuple[1]] = [tuple[2]] for command, v in commands.items(): for lang, response in v.items(): re.append(NlpAction(lang, command, response)) return re
def search_while(text): re = [] while_begin = 0 while_end = 0 while 1: temp = [] while_begin = text.find('while' , while_end) while_end = text.find('(' , while_begin+5) if while_begin == -1: break if ((text[while_begin-1] == '\n' or text[while_begin-1] == ' ' or text[while_begin-1] == ' ' or text[while_begin-1] == ';' or text[while_begin-1] == '}' or text[while_begin-1] == ')') and (text[while_begin+5] == ' ' or text[while_begin+5] == ' ' or text[while_begin+5] == '(')): while_begin = while_end lift = 0 right = 0 end = 0 for i in range(while_begin , len(text)-1): if text[i] == '(': lift += 1 elif text[i] == ')': right += 1 end = i if lift != 0 and right != 0: if lift == right: temp.append(while_begin-1) temp.append(end) re.append(temp) break for x in re: temp = text[x[1]+1:].lstrip() if temp.startswith(';'): x.append('do') else: x.append('while') return tuple(re)
def __query_by_url(self,url): cursor = None try: cursor = self.__conn.cursor() sql = r"select * from url where url='%s'" % url cursor.execute(sql) dbset = cursor.fetchall() re = [] for row in dbset: item = DBRowUrl() item.id = row[0] item.title = row[1] item.url = row[2] item.post_time = time.localtime(row[3]) item.category_id = row[4] item.create_time = time.localtime(row[5]) re.append( item ) cursor.close() return re except Exception as e: if cursor: cursor.close() print e.message
def search_if(text): re=[] if_begin=0 if_end=0 while 1: temp = [] if_begin = text.find('if' , if_end) if_end = text.find('(' , if_begin+2) if if_begin==-1: break if ((text[if_begin-1] == '\n' or text[if_begin-1] == ' ' or text[if_begin-1] == ' ' or text[if_begin-1] == ';' or text[if_begin-1] == '}' or text[if_begin-1] == ')') and (text[if_begin+2] == ' ' or text[if_begin+2] == ' ' or text[if_begin+2] == '(')): if_begin = if_end lift = 0 right = 0 end = 0 for i in range(if_begin , len(text)-1): if text[i] == '(': lift += 1 elif text[i] == ')': right += 1 end = i if lift != 0 and right != 0: if lift == right: temp.append(if_begin-1) temp.append(end) temp.append('if') re.append(temp) break return tuple(re)
def search_for(text): re = [] for_begin = 0 for_end = 0 while 1: temp=[] for_begin = text.find('for' , for_end) for_end = text.find('(' , for_begin+3) if for_begin == -1: break if ((text[for_begin-1] == '\n' or text[for_begin-1] == ' ' or text[for_begin-1] == ' ' or text[for_begin-1] == ';' or text[for_begin-1] == '}' or text[for_begin-1] == ')') and (text[for_begin+3] == ' ' or text[for_begin+3] == ' ' or text[for_begin+3] == '(')): #ifBegin=ifEnd lift = 0 right = 0 end = 0 for i in range(for_begin , len(text)-1): if text[i] == '(': lift += 1 elif text[i] == ')': right += 1 end = i if lift != 0 and right != 0: if lift == right: temp.append(for_begin-1) temp.append(end) temp.append('for') re.append(temp) break return tuple(re)
def search_switch(text): re = [] switch_begin = 0 switch_end = 0 while 1: temp = [] switch_begin = text.find('switch' , switch_end) switch_end = text.find('{' , switch_begin+6) if switch_begin == -1: break if ((text[switch_begin-1] == '\n' or text[switch_begin-1] == ' ' or text[switch_begin-1] == ' ' or text[switch_begin-1] == ';' or text[switch_begin-1] == '}' or text[switch_begin-1] == ';' or text[switch_begin-1] == ')') and (text[switch_begin+6] == ' ' or text[switch_begin+6] == ' ' or text[switch_begin+6] == '(')): temp.append(switch_end) lift = 0 right = 0 isBegin = False end = 0 for i in range(switch_end , len(text)-1): if text[i] == '{': lift += 1 elif text[i] == '}': right += 1 end = i if lift != 0 and right != 0: if lift == right: temp.append(end) re.append([text[temp[0]:temp[1]+1],temp[0],temp[1]]) break return tuple(re)
def _handleUrl(url, i): try: f = urllib2.urlopen(url,data=None, timeout=40) data = f.read() except: data = None if data is None: return ['',''] #data = open('D:/work/work_desk/now/1.html').read() soup = BeautifulSoup(data) title = soup.select('.core_title_txt')[0].string names = soup.select('.d_name') names = map(lambda name:name.a.string, names) contts = soup.select('cc') contts = map(lambda contt:contt.div.contents, contts) re = []; for c in contts: txt = _getContent(c) re.append(txt) title_txt = "\n\n"+str(i)+title+"\n\n" ## "\n\n" + '##'*40 + + url + "\n\n" print title_txt txt = '' i = 0 for k,v in zip(names, re): if v is None or str(v).strip() == '': pass else: txt = txt + str(v).strip() + "\n\n" #+ str(k) + " : " i = i + 1 if i > 50: break return [title_txt, txt]
def parse_rule(rule): """Parse a rule and return it as list of tuples in the form ``(converter, arguments, variable)``. If the converter is `None` it's a static url part, otherwise it's a dynamic one. based on werkzeug.routing """ pos = 0 end = len(rule) do_match = _rule_re.match used_names = set() re = [] while pos < end: m = do_match(rule, pos) if m is None: break data = m.groupdict() if data['static']: re.append((None, None, data['static'])) variable = data['variable'] converter = data['converter'] or 'str' # if isinstance(converter, util.basestring_type): # TODO create hook for custom converts # converter = {'str': converter_default, # 'int': converter_int, # 'uint': converter_uint, # }[converter] if variable in used_names: raise ValueError('variable name %r used twice.' % variable) used_names.add(variable) re.append((converter, data['args'] or None, variable)) pos = m.end() if pos < end: remaining = rule[pos:] if '>' in remaining or '<' in remaining: raise ValueError('malformed url rule: %r' % rule) re.append((None, None, remaining)) return re
def is_path_list (value): re = [] for p in split_list(value): re.append(is_path(p)) return ','.join(re)
def is_ip_list (value): re = [] for e in split_list(value): re.append(is_ip(e)) return ','.join(re)
def is_ip_or_netmask_list (value): re = [] for e in split_list(value): re.append(is_ip_or_netmask(e)) return ','.join(re)
def build_soap_call_file(method, arguments, encoding=SOAP_ENCODING, envelope_attrib=None, typed=None): """ Builds a soap call. @param method: method for the soap call. If set to None, the method element will be omitted and arguments will be added directly to the body (error message) @param arguments: arguments for the call @param encoding: encoding for the call @param envelope_attrib: envelope attribute @param typed: True if typed @type method: string or None @type arguments: dict or ElementTree.Element @type encoding: string @type envelope_attrib: list @type typed: boolean or None @return: soap call @rtype: string """ envelope = ElementTree.Element("s:Envelope") if envelope_attrib: for n in envelope_attrib: envelope.attrib.update({n[0]: n[1]}) else: # envelope.attrib.update({'s:encodingStyle': # "http://schemas.xmlsoap.org/soap/encoding/"}) envelope.attrib.update({'xmlns:s': "http://schemas.xmlsoap.org/soap/envelope/"}) ''' <s:Header> <credentials xmlns="http://www.sonos.com/Services/1.1"> <deviceProvider>Sonos</deviceProvider> </credentials> </s:Header> ''' header = ElementTree.SubElement(envelope, "s:Header") credentials = ElementTree.SubElement(header, 'credentials') credentials.attrib.update({'xmlns': "http://www.sonos.com/Services/1.1"}) deviceProvider = ElementTree.SubElement(credentials, "deviceProvider") deviceProvider.text = 'Sonos' body = ElementTree.SubElement(envelope, "s:Body") if method: # RadioTime does not appear to cater for a namespace prefix on the method name # (note that it returns the default response for a call it can't process, so for getMetadata it returns the root metadata) if method.startswith('{') and method.rfind('}') > 1: ns, method_name = method[1:].split('}') else: ns = None method_name = method re = ElementTree.SubElement(body, method_name) if ns: re.attrib.update({'xmlns': ns}) if encoding: re.set("%sencodingStyle" % NS_SOAP_ENV, encoding) else: re = body # print "~~~~~~~~~~~~~~~~~~~~~~~~" # print "~~~~~~~~~~~~~~~~~~~~~~~~" # print "method" # print method # print "~~~~~~~~~~~~~~~~~~~~~~~~" # print "~~~~~~~~~~~~~~~~~~~~~~~~" # print "arguments" # print arguments if isinstance(arguments, dict): type_map = {str: 'xsd:string', unicode: 'xsd:string', int: 'xsd:int', long: 'xsd:int', float: 'xsd:float', bool: 'xsd:boolean'} # if method == '{http://www.sonos.com/Services/1.1}getMetadata': # order = ['id', 'index', 'count', 'recursive'] # else: # order = arguments.keys() # for arg_name in order: for arg_name, arg_val in arguments.iteritems(): if arg_name in arguments: arg_val = arguments[arg_name] arg_type = type_map[type(arg_val)] if arg_type == 'xsd:string' and type(arg_val) == unicode: # arg_val = arg_val.encode('utf-8') # NOTE - if this conditional is allowed to convert to utf-8, the conversion to utf-8 in the # return call will convert the utf-8 a second time pass if arg_type == 'xsd:int' or arg_type == 'xsd:float': arg_val = str(arg_val) if arg_type == 'xsd:boolean': arg_val = '1' if arg_val else '0' e = ElementTree.SubElement(re, arg_name) if typed and arg_type: if not isinstance(type, ElementTree.QName): arg_type = ElementTree.QName( "http://www.w3.org/1999/XMLSchema", arg_type) e.set('%stype' % NS_XSI, arg_type) e.text = arg_val else: re.append(arguments) # preamble = """<?xml version="1.0" encoding="utf-8"?>""" preamble = "" #TODO: CHANGE THIS BACK? return '%s%s' % (preamble, ElementTree.tostring(envelope, 'utf-8'))
def build_soap_call(method, arguments, encoding=SOAP_ENCODING, envelope_attrib=None, typed=None): """ Builds a soap call. @param method: method for the soap call. If set to None, the method element will be omitted and arguments will be added directly to the body (error message) @param arguments: arguments for the call @param encoding: encoding for the call @param envelope_attrib: envelope attribute @param typed: True if typed @type method: string or None @type arguments: dict or ElementTree.Element @type encoding: string @type envelope_attrib: list @type typed: boolean or None @return: soap call @rtype: string """ envelope = ElementTree.Element("s:Envelope") if envelope_attrib: for n in envelope_attrib: envelope.attrib.update({n[0]: n[1]}) else: envelope.attrib.update({'s:encodingStyle': "http://schemas.xmlsoap.org/soap/encoding/"}) envelope.attrib.update({'xmlns:s': "http://schemas.xmlsoap.org/soap/envelope/"}) body = ElementTree.SubElement(envelope, "s:Body") if method: re = ElementTree.SubElement(body, method) if encoding: re.set("%sencodingStyle" % NS_SOAP_ENV, encoding) else: re = body # append the arguments if isinstance(arguments, dict): type_map = {str: 'xsd:string', unicode: 'xsd:string', int: 'xsd:int', long: 'xsd:int', float: 'xsd:float', bool: 'xsd:boolean'} for arg_name, arg_val in arguments.iteritems(): arg_type = type_map[type(arg_val)] if arg_type == 'xsd:string' and type(arg_val) == unicode: # arg_val = arg_val.encode('utf-8') # NOTE - if this conditional is allowed to convert to utf-8, the conversion to utf-8 in the # return call will convert the utf-8 a second time pass if arg_type == 'xsd:int' or arg_type == 'xsd:float': arg_val = str(arg_val) if arg_type == 'xsd:boolean': arg_val = '1' if arg_val else '0' e = ElementTree.SubElement(re, arg_name) if typed and arg_type: if not isinstance(type, ElementTree.QName): arg_type = ElementTree.QName( "http://www.w3.org/1999/XMLSchema", arg_type) e.set('%stype' % NS_XSI, arg_type) e.text = arg_val else: re.append(arguments) preamble = """<?xml version="1.0" encoding="utf-8"?>""" return '%s%s' % (preamble, ElementTree.tostring(envelope, 'utf-8'))