def Read(self, filename): pdbfile = open(filename) sys.stderr.write("Opened '%s' for reading as PDB\n" % filename) while(1): line = pdbfile.readline() if line == '': break if line[0:4] == 'ATOM' or line[0:6] == 'HETATM': type = string.strip(line[0:6]) anum = string.atoi(string.strip(line[7:11])) atom = string.strip(line[12:17]) residue = string.strip(line[17:20]) chain = string.strip(line[21:22]) rnum = string.atoi(string.strip(line[23:26])) x = string.atof(string.strip(line[31:38])) y = string.atof(string.strip(line[39:46])) z = string.atof(string.strip(line[47:54])) self.records.append(PDBRecord(type, anum, atom, residue, chain, rnum)) self.crds.append((x,y,z)) self.crds = Numeric.array(self.crds)
def LoadFile(self,FileName): fp = open(str(FileName),'r') self.FileName = FileName lines = fp.readlines() NumAtom = string.atoi(lines[0]) self.NumAtom = NumAtom AtomStr = []*4 self.AtomName = [None]*NumAtom self.AtomId = [None]*NumAtom self.AtomPos = [None]*NumAtom self.AtomVel = [None]*NumAtom x = 0.0 y = 0.0 z = 0.0 for i in range(NumAtom): self.AtomId[i] = i+1 AtomStr = string.split(lines[i+2])[0:4] self.AtomName[i] = AtomStr[0] x = string.atof(AtomStr[1]) y = string.atof(AtomStr[2]) z = string.atof(AtomStr[3]) self.AtomPos[i] = Vector(x,y,z) self.AtomVel[i] = Vector()
def extractDM(Shash,Dhash,outfile): outfile = open(outfile,'w') Smuts = sorted(Shash.keys()) outfile.write('Background'+"\t"+'Fit'+"\t"+"\t".join(Smuts)+"\n") countben = 0 for mut1 in Smuts: if Shash[mut1] > 1: countben += 1; continue outfile.write(mut1+"\t"+str(Shash[mut1])) for mut2 in Smuts: Dmut= '' fit = '' #fit actually means fraction folded Smut1Fit = fit2energy(cap(floor(atof(Shash[mut1])))) Smut2Fit = fit2energy(cap(floor(atof(Shash[mut2])))) if Dhash.has_key(mut1+'-'+mut2): Dmut = mut1+'-'+mut2 elif Dhash.has_key(mut2+'-'+mut1): Dmut = mut2+'-'+mut1 if Dmut != '': #fit = fit2energy(cap(floor(atof(Dhash[Dmut]))))/Smut1Fit/Smut2Fit*Shash[mut1] # fit = Dhash[Dmut]/Shash[mut2] #Another way of calculating, this way without capping and flooring matches anders if fit >= 1 or fit <= 0: fit = 'NA' else: fit = -1.9858775*296/1000*log((1-fit)/fit) - (-1.9858775*296/1000*log((1-Shash[mut1])/Shash[mut1])) # else: fit = 'NA' outfile.write("\t"+str(fit)) outfile.write("\n") outfile.close() print "There is a total of %d beneficial mutations" % countben
def HexamerFeatures(seq,hash_matrix): if len(seq) < 6: return(0,0) frame_sequence = list(seq) frame_seq_length = len(frame_sequence) CDS_array = [] for o in range(0,3): frame_TempStr = '' frame_TempStr = InitCodonSeq(o,frame_seq_length-2,3,frame_sequence) frame_array = frame_TempStr.split(' ') ## codon array frame_array.pop() other_num = 0 frame_array_Len = len(frame_array) - 1 for j in range(frame_array_Len): temp2 = frame_array[j]+frame_array[j+1] temple4 = re.compile('[atcg]{6}') if temple4.match(temp2): other_num = string.atof(other_num) + string.atof(hash_matrix[temp2]) frame_array_Len = frame_array_Len + 2 other_num = other_num / frame_array_Len CDS_array.append(other_num) Mscore = max(CDS_array) score_distance = 0 for m in range(0,3): #problem location score_distance += Mscore - CDS_array[m] score_distance = score_distance/float(2) return(Mscore,score_distance)
def _read_data(self, fp, pos): fp.seek(pos) size = fp.readline().split() self.nrow = atoi(size[0]) self.ncol = atoi(size[1]) self.shape = (self.nrow, self.ncol) self.nnz = atoi(size[2]) self.irow = np.empty(self.nnz, dtype=np.int) self.jcol = np.empty(self.nnz, dtype=np.int) if self.dtype is not None: self.values = np.empty(self.nnz, dtype=self.dtype) # Read in data k = 0 for line in fp.readlines(): line = line.split() self.irow[k] = atoi(line[0]) - 1 self.jcol[k] = atoi(line[1]) - 1 if self.dtype == np.int: self.values[k] = atoi(line[2]) elif self.dtype == np.float: self.values[k] = atof(line[2]) elif self.dtype == np.complex: self.values[k] = complex(atof(line[2]), atof(line[3])) k += 1 return k
def get_detailed_store(html, store_cat): store_list = [] start = 0 while True: sub_html, start, end = common.extract_closure(html, ur"<li\b", ur"</li>") if end == 0: break # 得到单个门店的页面代码 html = html[end:] entry = common.init_store_entry(brand_id, brandname_e, brandname_c) m = re.findall(ur'<div class="store-title -h3a">(.+?)</div>', sub_html) if len(m) > 0: entry[common.name_e] = common.reformat_addr(m[0]) m = re.findall(ur'<div class="store-address">(.+?)</div>', sub_html, re.S) if len(m) > 0: addr = common.reformat_addr(m[0]) # 最后一行是否为电话号码? terms = addr.split(", ") tel = common.extract_tel(terms[-1]) if tel != "": addr = ", ".join(terms[:-1]) entry[common.tel] = tel entry[common.addr_e] = addr # 获得门店类型 # store_type = [store_cat] type_html, type_start, type_end = common.extract_closure(sub_html, ur'<ul class="service-list">', ur"</ul>") if type_end != 0: store_type = [m for m in re.findall(ur'<li class="service-item">(.+?)</li>', type_html)] store_type.insert(0, store_cat) entry[common.store_type] = ", ".join(store_type) else: entry[common.store_type] = store_cat # 获得经纬度 m = re.findall(ur'data-latitude="(-?\d+\.\d+)"', sub_html) if len(m) > 0: entry[common.lat] = string.atof(m[0]) m = re.findall(ur'data-longitude="(-?\d+\.\d+)"', sub_html) if len(m) > 0: entry[common.lng] = string.atof(m[0]) entry[common.city_e] = common.extract_city(data[common.city_e])[0] entry[common.country_e] = common.reformat_addr(data[common.country_e]).strip().upper() gs.field_sense(entry) print "%s: Found store: %s, %s (%s, %s, %s)" % ( brandname_e, entry[common.name_e], entry[common.addr_e], entry[common.city_e], entry[common.country_e], entry[common.continent_e], ) db.insert_record(entry, "stores") store_list.append(entry) return store_list
def dealline(rdlin,sepin): rdlsplit=rdlin.split(sepin) x0=string.atof(rdlsplit[1]) y0=string.atof(rdlsplit[2]) x1=string.atof(rdlsplit[3]) y1=string.atof(rdlsplit[4]) return x0,y0,x1,y1
def load(self, filename): self.seqs = [] idx = 0 fp = open(filename, "r") header = fp.readline().strip("\n").split("\t") feaidx = [header.index(fld) for fld in header if fld.startswith("f") or fld.startswith("g")] self.featname = [fld for fld in header if fld.startswith("f") or fld.startswith("g")] seq_idx = header.index("seq") pnt_idx = header.index("ptn") x_idx = header.index("x") y_idx = header.index("y") seq = Sequence() pre_seq = -1 for line in fp.readlines(): pt = myPoint() line = line.strip("\n") dr = line.split('\t') pt.x = string.atof(dr[x_idx]) pt.y = string.atof(dr[y_idx]) for f in feaidx: pt.addvalue(header[f], string.atof(dr[f])) if (dr[seq_idx] != pre_seq and pre_seq != -1): self.seqs.append(seq) seq = Sequence() pre_seq = dr[seq_idx] seq.addPoint(pt) self.seqs.append(seq)
def signOR(ntot,ns,curs): ORv={} snpor = curs.fetchall() desc = curs.description ntot += 1 founds=0; for OR in snpor: ### check all CI for (name, value) in zip(desc, OR) : ORv[name[0]] = value pval = ORv['Pvalue'] ci = ORv['CI'] # print pval #print ci if founds == 0: if ci is not None: # print 'y',ci c0=string.atof(ci.split("-", 1)[0]) c1=string.atof(ci.split("-", 1)[1]) if(c1 >1 and c0> 1) or (c0 <1 and c1 < 1): founds +=1 elif pval is not None and pval <=0.05: #print 'z', pval founds +=1 # elif pval is None and ci is not None: else: break ns += founds return (ntot,ns)
def process_item(self,item,spider): if spider.name != 'ershoufang5i5j': return item #判断是否存在历史价格数据 #这里要用字典类型中的get方法来判断,否则会报错。 #原因是如果housePrice不存在,那么字典旧无法索引找到此项,返回错误;而使用get方法则返回空。 if not item.get('housePrice'): return item #打开写入的文件和CSV写入模块 self.file = open('ershoufang5i5j.csv','ab') csvWriter = csv.writer(self.file) #格式化item为CSV格式数据 for house in item['housePrice']: house_area = item['houseArea'][:-2] price_chengjiao_tmp = item['housePrice'][house]['price_chengjiao'] price_guapai_tmp = item['housePrice'][house]['price_guapai'] price_chengjiao = string.atof(price_chengjiao_tmp) * string.atof(house_area) price_guapai = string.atof(price_guapai_tmp) * string.atof(house_area) house_name = item['houseName'].strip() line = (house,house_name,item['houseCity'],price_chengjiao,price_guapai,house_area,item['houseAddress'],item['houseBaiduLatitude'],item['houseBaiduLongitude'],item['houseTitle']) csvWriter.writerow(line) return item
def monitor(url, price_accpet): idx = 0 timenow = time.strftime("%Y-%m-%d %X", time.localtime()) while True: #for idx in range(COUNTER): try: page = urllib2.urlopen(url, timeout=1000) page = unicode(page.read(), "gb2312", "ignore").encode("gb2312", "ignore") soup = BeautifulSoup(page, fromEncoding="gb18030") global subject_header subject_header = soup.html.head.title.string print subject_header subject_header = ''.join(subject_header.encode('utf-8').split('【')[:-1]) print subject_header skuid = url.split('/')[-1].split('.')[0] f = urllib2.urlopen('http://p.3.cn/prices/get?skuid=J_'+skuid, timeout=5) except Exception, ex: print ex, 'timenow:%s,couldnot open this %s' % (timenow, url) continue price = json.loads(f.read()) f.close() #print price_promotion price_promotion = price[0]['p'] print price_promotion, price_accpet if string.atof(price_promotion) < string.atof(price_accpet): message = ''.join(['价格降低到 ', (price_promotion.encode('utf-8'))]) subject_header = ''.join([subject_header, message]) print subject_header, '----', message send_mail(message) break time.sleep(60)
def __handle_value(self,value,iskey=False): tp=self.__value_type(value) if tp=="datetime": year,month,day=atoi(value[:4])-1850,atoi(value[5:7]),atoi(value[8:10]) hour,minute,second=atoi(value[11:13]),atoi(value[14:16]),atoi(value[17:19]) d=date(year=year,month=month,day=day).toordinal() t=(hour*60+minute)*60+second return "T"+struct.pack("H",d)+struct.pack("I",t) elif tp=="date": year,month,day=atoi(value[:4])-1850,atoi(value[5:7]),atoi(value[8:10]) d=date(year=year,month=month,day=day).toordinal() return "D"+struct.pack("H",d) elif tp=="int": return "I"+struct.pack("I",atoi(value)) elif tp=="float": return "F"+struct.pack("f",atof(value)) elif tp=="@": pattern=re.compile("(\d+)@(\d+\.\d+)") match=pattern.match(value) volume,price=atoi(match.group(1)),atof(match.group(2)) return "A"+struct.pack("I",volume)+"@"+struct.pack("f",price) else: if iskey: if not self.table_key.has_key(value): self.table_key[value]=len(self.table_key)+1 return "M"+struct.pack("B",self.table_key[value])[0] else: return "S"+str(value)
def gui_updateMarkers(self): M=self.GM.Marker M.equalize() n=M.number for i in range(n): gm=self.guiMarkers[i] s=self.guiVar[i][1].get() if s==0: M.status[i]='off' else: M.status[i]='on' a=gm[3].getcurselection() M.size[i]=string.atoi(gm[5].get(0.0,Tkinter.END)) M.symbol[i]=gm[3].getcurselection() M.color[i]=gm[4].get() M.id[i]=gm[6].get(0.0,Tkinter.END)[:-1] M.id_size[i]=string.atoi(gm[7].get(0.0,Tkinter.END)) x=string.atof(gm[8].get(0.0,Tkinter.END)) y=string.atof(gm[9].get(0.0,Tkinter.END)) M.xoffset[i]=x M.yoffset[i]=y M.id_color[i]=gm[10].get() M.id_font[i]=string.atoi(gm[11].getcurselection()) M.line[i]=string.lower(gm[12].getcurselection()) if M.line[i]=='none' : M.line[i]=None M.line_type[i]=gm[13].getcurselection() M.line_size[i]=string.atof(gm[14].get(0.0,Tkinter.END)) M.line_color[i]=gm[15].get()
def loadData(self, fName): # a CVRP has a demand for each node self.nodeAttributes += [ 'demand' ] self.globalAttributes += [ 'capacity', 'maximum duration' ] self.nodes = [] self.attributes = {} # load a Christofides, Mingozzi and Toth instance cpt = 0 for line in file.readlines(file(fName)): line = line.split() if len(line) > 3: self.attributes['directed'] = False self.attributes['capacity'] = string.atoi(line[1]) self.attributes['maximum duration'] = string.atoi(line[2]) elif len(line) >= 2: thisNode = {} thisNode['index'] = cpt thisNode['label'] = str(cpt) thisNode['is depot'] = True if cpt == 0 else False thisNode['demand'] = string.atoi(line[2])\ if len(line) > 2 else 0 thisNode['x'] = string.atof(line[0]) thisNode['y'] = string.atof(line[1]) self.nodes.append(thisNode) cpt += 1 else: continue
def rescale_canvas_cb(self, value): value = string.atof(value) cur_value = math.log10(self.current_zoom_factor) if abs(cur_value - value) > .5 * string.atof(self.zscale['resolution']): factor = math.pow(10.0, value) / self.current_zoom_factor self.zoom_by_factor(factor, 0)
def set_to_sequence(): sl_nodes = nuke.selectedNodes() sl = nuke.Panel('Set Sequence') sl.addSingleLineInput('Start', '') sl.addSingleLineInput('End', '') ret = sl.show() if not ret: return False start = sl.value('Start') if ('.' in start): i_start = string.atof(start) else: i_start = string.atoi(start) end = sl.value("End") if ('.' in end): i_end = string.atof(end) else: i_end = string.atoi(end) for node in sl_nodes: source_file = node['file'].getValue() dir_name = os.path.dirname(source_file) base_name = os.path.basename(source_file) name_list = base_name.split('.') name_list[-2] = '#' * len(name_list[-2]) new_name = os.path.join(dir_name, '.'.join(name_list)) node['file'].setValue(new_name.replace('\\', '/')) node['first'].setValue(i_start) node['last'].setValue(i_end) node['on_error'].setValue(1) return True
def usageAct(self): name=self.__name pid=self.__pid useAge="top -bn 1 -p %s |grep %s | awk '{print $9}'"%(pid,pid) vcpustime="virsh vcpuinfo %s |grep 'CPU time'|awk '{split($3,b,\"s\");print b[1]}'"%(name) usedtime="virsh cpu-stats %s --total|awk 'NR>2{print $2}'|awk 'BEGIN{sum=0}{sum+=$1}END{print sum}'"%(name) files=os.popen(useAge) useAgeTmp=files.readline() if useAgeTmp: useAge=string.atof(useAgeTmp) else: useAge=0 files=os.popen(usedtime) usedTimeTmp=files.readline() if usedTimeTmp: usedTime=string.atof(usedTimeTmp) else: usedTime=0 files=os.popen(vcpustime) sts="" while True: strs=files.readline() if strs: sts+=strs[:-1]+"," else: break self.__dataSlot["cpu"]["core_time"]=sts self.__dataSlot["cpu"]["usedTime"]="%.3f"%(usedTime) self.__dataSlot["cpu"]["useAge"]=useAge
def main ( opt ) : vcfInfile = opt[0] if vcfInfile[-3:] == '.gz' : I = os.popen( 'gzip -dc %s' % vcfInfile ) else : I = open ( opt.vcfInfile ) newdata,trainData = {}, {} while 1 : lines = I.readlines( 100000 ) if not lines : break for line in lines : if re.search(r'^#', line) : continue col= line.strip('\n').split() vcfinfo = { d.split('=')[0] : d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 } vq = string.atof( vcfinfo['VQ'] ) culprit = string.atof( vcfinfo[ vcfinfo['CU'] ] ) if vcfinfo['CU'] not in newdata : newdata[vcfinfo['CU']] = [] if vcfinfo['CU'] not in trainData : trainData[vcfinfo['CU']] = [] if re.search(r'_TRAIN_SITE', col[7]) : trainData[vcfinfo['CU']].append( [vq, culprit] ) else : newdata[vcfinfo['CU']].append( [vq, culprit] ) I.close() figName = 'fig' if len(opt) > 1 : figName = opt[1] Draw ( newdata, trainData, figName )
def inputsqx1(databasename,sqxfilename,rampid,verid): #读入竖曲线 #输入纬地数据的纵断面地面线 theproject=incadproject() theproject.openproject(databasename) if not theproject.haveramp(rampid): print("\n错误,未找到指定的匝道") if theproject.havevertical(rampid,verid): grd=theproject.vertical(rampid,verid) grd.removeall() else: grd=theproject.newvertical(rampid,verid,"旧路地面线作为交点") #读入竖曲线设计线文件 fi=file(sqxfilename) index=0 ff=fi.readlines() for lines in ff: if index <> 0: #忽略文件头 di=lines.split() ch=string.atof(di[0]) high=string.atof(di[1]) grd.append(ch,high,0) print "读入桩号/高程:", ch,high index=index+1 grd.cal() grd.save() fi.close() theproject.closeproject() print "共读入交点<",index,">" print "完成读入纬地设计线输入"
def read_positions( self ): j = 1 for s in self.species: for i in range( 0, self.species[s] ): if j > 1: self.getline() # end if # POSCAR or CHGCAR try: ( x, y, z, xm, ym, zm, no, symb ) = self.line() except: ( x, y, z, no, symb ) = self.line() xm = ym = zm = 'T' # end try x = string.atof( x ) y = string.atof( y ) z = string.atof( z ) no = string.atoi( no ) # without renumbering self.geom.add( AtomPos( symbol = symb, no = no, vec = [ x, y, z ], moveable = [ xm, ym, zm ] ), False ) if self.debug: print " %05d Process:" % self.lc(), "%4d" % j, "%3d" % i, "%2s" % s, self.line() # end if j += 1
def get_md(request): def total_IO(s): total = 0 list_by_sharp = s.split("#") for l in list_by_sharp: v = string.atof(l.split(":")[1]) total = total + v return total result = {} rediscli = ThRedisClient("localhost") qin = request.GET["query"].split(",") tstart = request.GET["stime"] if tstart == "latest": for id in qin: temp = {} try: iinfo = rediscli.get1byinstance(id, -1).split("$") temp["cpu"] = iinfo[0] + "%" mem_usage = round((string.atof(iinfo[2]) - string.atof(iinfo[1])) / string.atof(iinfo[2]) * 100, 2) temp["mem"] = mem_usage if mem_usage <= 100 else 100 temp["netin"] = string.atoi(iinfo[3].split(":")[1]) / 1024 / 1024 temp["netout"] = string.atoi(iinfo[4].split(":")[1]) / 1024 / 1024 result[id] = temp except Exception, e: result[id] = None
def single(): infile = open('Doc/SMutList','r') outfile = open('result/SingleSub.txt','w') header = "\t".join(['Substitution-WTaa','Substitution-Pos','Substitution-Mutaa','InputCount','SelectionCount(SumOfTriplicates)']) outfile.write(header+"\n") Shash = {} for line in infile.xreadlines(): if 'Mut' in line: continue line = line.rstrip().rsplit("\t") mut = line[0] DNA = line[1] sel1 = int(line[3]) sel2 = int(line[4]) sel3 = int(line[5]) SEL = line[6] wtaa = mut[0] pos = str(int(mut[1:-1])+1) mutaa = mut[-1] out = "\t".join([wtaa,pos,mutaa,DNA,SEL]) outfile.write(out+"\n") DNAfreq = atof(DNA)/atof(line[7]) Selfreq = atof(SEL)/atof(line[12]) fit = Selfreq/DNAfreq Shash[wtaa+pos+mutaa] = fit infile.close() outfile.close() return Shash
def parseGSA(self, words): if len(words[1]): (self.mode, self.LATLON) = self.update(self.mode, string.atoi(words[1]), self.LATLON) self.fix = ["none", "2d", "3d"][string.atoi(words[1]) - 1] else: self.fix = "none" usedSatellites = [int(i) for i in words[2:13] if i != ""] for prn, sat in self.sats.iteritems(): if prn in usedSatellites: sat.used = True sat.gsaLast = self.gsaLast elif self.gsaLast - sat.gsaLast > 2: sat.used = False self.gsaLast += 1 if words[14] != "": self.pdop = string.atof(words[14]) if words[15] != "": self.hdop = string.atof(words[15]) if words[16] != "": self.vdop = string.atof(words[16]) if self.vdop == 0: self.vdop = None
def fromXYZrecord(cls,aline): AtomStr = string.split(aline)[0:4] name = AtomStr[0] x = string.atof(AtomStr[1]) y = string.atof(AtomStr[2]) z = string.atof(AtomStr[3]) return cls(name,[x,y,z])
def do_lat_lon(self, words): if len(words[0]) == 0 or len(words[1]) == 0: # empty strings? return if words[0][-1] == "N": words[0] = words[0][:-1] words[1] = "N" if words[0][-1] == "S": words[0] = words[0][:-1] words[1] = "S" if words[2][-1] == "E": words[2] = words[2][:-1] words[3] = "E" if words[2][-1] == "W": words[2] = words[2][:-1] words[3] = "W" if len(words[0]): lat = string.atof(words[0]) frac, intpart = math.modf(lat / 100.0) lat = intpart + frac * 100.0 / 60.0 if words[1] == "S": lat = -lat (self.lat, self.LATLON) = self.update(self.lat, lat, self.LATLON) if len(words[2]): lon = string.atof(words[2]) frac, intpart = math.modf(lon / 100.0) lon = intpart + frac * 100.0 / 60.0 if words[3] == "W": lon = -lon (self.lon, self.LATLON) = self.update(self.lon, lon, self.LATLON)
def AUTOatof(input_string): #Sometimes AUTO messes up the output. I.e. it gives an #invalid floating point number of the form x.xxxxxxxE #instead of x.xxxxxxxE+xx. Here we assume the exponent #is 0 and make it into a real real number :-) try: value=string.atof(input_string) except (ValueError): try: if input_string[-1] == "E": # This is the case where you have 0.0000000E value=string.atof(strip(input_string)[0:-1]) elif input_string[-4] == "-": # This is the case where you have x.xxxxxxxxx-yyy value=0.0 else: print "Encountered value I don't understand" print input_string print "Setting to 0" value=0.0 except: print "Encountered value which raises an exception while processing!!!" print input_string print "Setting to 0" value=0.0 return value
def TF_rotate( self, c = [] ): s = c[0] n = string.atoi( c[1] ) # direction u = numpy.array( [ 0.0, 0.0, 0.0 ] ) u[0] = string.atof( c[2] ) u[1] = string.atof( c[3] ) u[2] = string.atof( c[4] ) # angle t = string.atof( c[5] ) self.cart() atom = self.get( n ) if atom.symbol != s: raise Warning( "Symbol mismatch" ) # end if origo = atom.position print " TF/rotate origo" atom.info() print " TF/rotate",u,t R = ROT(u,t) # copy atoms for atom in self.atoms: dv = atom.position - origo dv = numpy.dot(R,dv) atom.position = dv + origo # end for self.direct() return self
def down(each, j, data): url = 'http://www.worldweather.cn/zh/json/'+str(each)+'_zh.xml' html = urllib2.urlopen(url).read() con = json.loads(html)['city'] name = con['cityName'] memName = con['member']['memName'] data.write(j, 0, name) data.write(j, 1, memName) data.write(j, 2, each) month_list = con['climate']['climateMonth'] list_len = len(month_list) for i in range(list_len): #print i['month'],i['minTemp'],i['maxTemp'],i['rainfall'],i['raindays'] minTemp = month_list[i]['minTemp'] maxTemp = month_list[i]['maxTemp'] rainfall = month_list[i]['rainfall'] raindays = month_list[i]['raindays'] print month_list[i]['month'], minTemp, maxTemp, rainfall, raindays if not func(minTemp): minTemp = 0 if not func(maxTemp): maxTemp = 0 if not func(rainfall): rainfall = 0 if not func(raindays): raindays = 0 data.write(j, 4*i+3, string.atof(minTemp)) data.write(j, 4*i+4, string.atof(maxTemp)) data.write(j, 4*i+5, string.atof(rainfall)) data.write(j, 4*i+6, string.atof(raindays))
def storeTimedCurrentCostDatav2(self, reftimestamp, ccdb, hist): global trc trc.FunctionEntry("storeTimedCurrentCostDatav2") # months for i in range(1, 10): key = "m0" + str(i) ccdb.StoreMonthData(self.GetOldMonth(reftimestamp, i), atoi(hist['mths'][key])) for i in range(10, 13): key = "m" + str(i) ccdb.StoreMonthData(self.GetOldMonth(reftimestamp, i), atoi(hist['mths'][key])) # days for i in range(1, 10): key = "d0" + str(i) ccdb.StoreDayData(self.GetOldDay(reftimestamp, i), atoi(hist['days'][key])) for i in range(10, 32): key = "d" + str(i) ccdb.StoreDayData(self.GetOldDay(reftimestamp, i), atoi(hist['days'][key])) # hours for i in range(2, 9, 2): key = "h0" + str(i) ccdb.StoreHourData(self.GetOldHour(reftimestamp, i - 2), atof(hist['hrs'][key])) for i in range(10, 27, 2): key = "h" + str(i) ccdb.StoreHourData(self.GetOldHour(reftimestamp, i - 2), atof(hist['hrs'][key])) trc.FunctionExit("storeTimedCurrentCostDatav2")
def __init__(self,iptFile,ctFile,truncTime): # Read epidemic and truncate to truncTime self.infectives = [] self.labels = [] epiFile = open(iptFile,'r') for line in epiFile: toks = line.split() label = atoi(toks[0]) I = atof(toks[1]) N = atof(toks[2]) R = atof(toks[3]) if N <= truncTime: # Take individuals who have been notified by truncTime if R > truncTime: # If R > truncTime, set R = truncTime R = truncTime self.infectives.append(Infective(label,I,N,R)) self.labels.append(label) epiFile.close() # Read in XML conFile = Uri.OsPathToUri(ctFile) xmlSrc = DefaultFactory.fromUri(conFile,stripElements=[(EMPTY_NAMESPACE,'*',1)]) self.doc = NonvalidatingReader.parse(xmlSrc) # Remove from the contact DOM any contact info # for individuals that are not present in labels self.labels = set(self.labels) for contact in self.doc.documentElement.xpath(u'tc:contact',explicitNss={u'tc':u'tracedcontacts'}): contactLabel = atoi(contact.getAttributeNS(None,u'id')) if contactLabel not in self.labels: self.doc.documentElement.removeChild(contact)
def CompareRates(opath, level, varname, input_rates, index_html, ratetype=""): if options.latex: ofile = open( opath + 'compare_' + ratetype + varname + '_' + level + '.tex', 'w') else: ofile = open( opath + 'compare_' + ratetype + varname + '_' + level + '.html', 'w') table = ResultTable() if options.do_plots: hname_frac = ratetype + varname + '_fraction_' + level hist_frac = GetHist(hname_frac, varname, 'frac') hname_sfrac = ratetype + varname + '_sigma_fraction_' + level hist_sfrac = GetHist(hname_sfrac, varname, 'sigma_frac') canv_frac = GetCanvas(hname_frac, hname_frac) #canv_sfrac = GetCanvas(hname_sfrac, hname_sfrac) hname_chfrac = ratetype + varname + '_chain_fraction_' + level hist_chfrac = GetHist(hname_chfrac, varname, 'chain_frac') canv_chfrac = GetCanvas(hname_chfrac, hname_chfrac, 5000, 2500) chfrac_maxchains = 0 column_labels = [] if options.labels != '': column_labels = options.labels.split(',') else: for i in xrange(0, len(input_rates)): column_labels.append(str(i)) if len(column_labels) != len(input_rates): print "Number of column labels must be equal to the number of inputs" for i in xrange(0, len(input_rates)): column_labels.append(str(i)) # Write headers counter = -1 table.add_header('Chain') table.add_column_type('chain_name') table.add_display(True) for result in input_rates: counter += 1 label1 = column_labels[counter] table.add_header(label1) table.add_column_type('rate') table.add_display(True) table.add_header('%s_PS' % label1) table.add_column_type('rate_ps') table.add_display((options.show_pspt or options.show_ps)) table.add_header('%s_PT' % label1) table.add_column_type('rate_pt') table.add_display(options.show_pspt) if options.ratio_columns != '': for col in options.ratio_columns.split(','): label_denom = column_labels[int(col) - 1] counter = 0 # counts the results for rate in input_rates: counter += 1 label_num = column_labels[counter - 1] if counter == int(col): # Don't make raio with self continue table.add_header('%s/%s' % (label_num, label_denom)) table.add_column_type('rate_ratio') table.add_display(True) table.add_header('%s-%s' % (label_num, label_denom)) table.add_column_type('rate_diff') table.add_display(options.show_diff) table.add_header('rate_isZero') table.add_column_type('rate_isZero') table.add_display(False) # Loop over all results and get a complete list of triggers all_chains = set() for result in input_rates: #for chain in result.GetChainNames(level) : # all_chains.add(chain) for ch in result.GetChains(level): if string.count(ratetype, "Groups"): if string.count(ch.GetName(), "str"): pass #print "STREAM",ch.GetName(),ch.IsGroup() if ch.IsGroup(): if options.regex != '': if re.match(options.regex, ch.GetName()): all_chains.add(ch.GetName()) else: all_chains.add(ch.GetName()) else: if not ch.IsGroup(): if options.regex != '': if re.match(options.regex, ch.GetName()): all_chains.add(ch.GetName()) else: all_chains.add(ch.GetName()) rateTable = [] # Write values for chain in all_chains: # Check for PS of 0 and -1 # Check ALL input rates for this condition skip_chain = False if options.hide_zero_PS: rate_counter = 0 skip_counter = 0 thischain = result.GetChain(chain) for result in input_rates: rate_counter += 1 var = getattr(thischain, "prescale") if var == None: var = getattr(thischain, "chain_prescale") if var == 0: skip_counter += 1 if var < 0: skip_counter += 1 if skip_counter == rate_counter: skip_chain = True if skip_chain: continue table.add_entry('Chain', chain) counter = -1 isZero = False if varname == 'rate': # Set isZero to true now. Set it to false if at least one of the inputs has a non zero rate isZero = True for result in input_rates: counter += 1 if chain not in result.GetChainNames(level): nblanks = 1 table.add_entry('%s_PS' % column_labels[counter], 0) table.add_entry('%s_PT' % column_labels[counter], 0) for i in xrange(0, nblanks): table.add_entry(column_labels[counter + i], " ") continue thischain = result.GetChain(chain) Var = getattr(thischain, varname) # Check for zero rate if varname == 'rate': if Var > 0: isZero = False Err = 0 if varname == 'rate': Err = getattr(thischain, varname + "err") if options.use_mhz: Var *= 1000 Err *= 1000 scale_factor = 1 if options.scale != '': scales = options.scale.split(',') scale_counter = -1 for scale in scales: scale_counter += 1 if counter == scale_counter: scale_factor = string.atof(scale) if varname == "rate": Var = Var * scale_factor Err = Err * scale_factor if options.hlt_rej and level != 'L1': try: # Ignoring error on lower chain (probably smaller and correlated) lower_chain = thischain.GetLowerChain() if not result.HasChain(lower_chain): raise TrigCostAnalysis.VarNotAvailable( "Empty lower chain for " + chain) Var_lower = result.GetChain(lower_chain).GetAttrWithCheck( varname) if Var_lower > 0: Var /= Var_lower Err /= Var_lower else: Var = 0 Err = 0 if Var > 0: print "Zero lower chain (1)?! ", chain, lower_chain, Var, Var_lower except TrigCostAnalysis.VarNotAvailable, e: Var = 0 Err = 0 print "Missing lower chain for ", chain ps = thischain.GetPrescale() pt = thischain.GetAttrWithDefault("passthrough", -1) if level != 'L1' and result.source != 'XML_prediction': lowerchain = thischain.GetAttrWithDefault("lowerchain", "none") if lowerchain != 'none': if result.HasChain(lowerchain): lchain = result.GetChain(thischain.lowerchain) ps *= lchain.prescale if level == 'EF': if result.HasChain(lchain.lowerchain): llchain = result.GetChain(lchain.lowerchain) ps *= llchain.prescale table.add_entry('%s_PS' % column_labels[counter], '%.2f' % ps) table.add_entry('%s_PT' % column_labels[counter], '%.2f' % pt) if options.show_err: table.add_entry(column_labels[counter], '%6.2f +- %6.2f' % (Var, Err)) else: table.add_entry(column_labels[counter], '%6.2f' % Var) # Write ratios and differences if options.ratio_columns != '': for col in options.ratio_columns.split(','): counter = 0 # counts the results ratio_rate = input_rates[int(col) - 1] label_denom = column_labels[int(col) - 1] for rate in input_rates: counter += 1 label_num = column_labels[counter - 1] if counter == int(col): # Don't make raio with self continue # Get labels if chain not in ratio_rate.GetChainNames(level) \ or chain not in rate.GetChainNames(level) : nblanks = 1 for i in xrange(0, nblanks): table.add_entry('%s/%s' % (label_num, label_denom), " ") table.add_entry('%s-%s' % (label_num, label_denom), " ") continue Var1 = rate.GetChain(chain).GetAttrWithCheck(varname) Var2 = ratio_rate.GetChain(chain).GetAttrWithCheck(varname) Err1 = 0 Err2 = 0 if varname == 'rate': Err1 = rate.GetChain(chain).GetAttrWithCheck(varname + "err") Err2 = ratio_rate.GetChain(chain).GetAttrWithCheck( varname + "err") scale_factor_1 = 1 scale_factor_2 = 1 if options.scale != '': scales = options.scale.split(',') scale_counter = -1 for scale in scales: scale_counter += 1 if int(col) == scale_counter: scale_factor_1 = string.atof(scale) if counter == scale_counter: scale_factor_2 = string.atof(scale) if options.apply_PS: if rate.GetChain(chain).GetAttrWithCheck( "prescale") > 0: scale_factor_1 = scale_factor_1 / rate.GetChain( chain).GetAttrWithCheck("prescale") if ratio_rate.GetChain(chain).GetAttrWithCheck( "prescale") > 0: scale_factor_2 = scale_factor_2 / ratio_rate.GetChain( chain).GetAttrWithCheck("prescale") if options.hlt_rej and level != 'L1': # Ignoring error on lower chain (probably smaller and correlated) try: lower_chain1 = rate.GetChain(chain).GetLowerChain() if not result.HasChain(lower_chain1): raise TrigCostAnalysis.VarNotAvailable( "Empty lower chain for " + chain) Var1_lower = rate.GetChain( lower_chain).GetAttrWithCheck(varname) lower_chain2 = ratio_rate.GetChain( chain).GetLowerChain() if not result.HasChain(lower_chain2): raise TrigCostAnalysis.VarNotAvailable( "Empty lower chain for " + chain) Var2_lower = ratio_rate.GetChain( lower_chain2).GetAttrWithCheck(varname) if Var1_lower > 0 and Var2_lower > 0: Var1 /= Var1_lower Err1 /= Var1_lower Var2 /= Var2_lower Err2 /= Var2_lower else: Var1 = 0 Var2 = 0 Err1 = 0 Err2 = 0 if Var1_lower > 0 and Var1 > 0: print "Zero lower chain (2)?! ", chain, Var1, Var1_lower if Var2_lower > 0 and Var2 > 0: print "Zero lower chain (3)?! ", chain, Var2, Var2_lower except TrigCostAnalysis.VarNotAvailable, e: pass if varname == "rate": Var1 = Var1 * scale_factor_1 Err1 = Err1 * scale_factor_1 Var2 = Var2 * scale_factor_2 Err2 = Err2 * scale_factor_2 diff = Var1 - Var2 ratio = 0.0 if Var2 != 0.0: ratio = Var1 / Var2 ratio_err = 0.0 if Var1 != 0.0 and Var2 != 0.0: ratio_err = ratio * math.sqrt((Err1 / Var1) * (Err1 / Var1) + (Err2 / Var2) * (Err2 / Var2)) ratio_diff = 0.0 if Err1 != 0.0: ratio_diff = (Var1 - Var2) / Err1 if options.do_plots and Var1 != 0.0 and Var2 != 0.0: if hist_frac != None: hist_frac.Fill(ratio) if hist_sfrac != None: hist_sfrac.Fill(ratio_diff) if hist_chfrac != None: chfrac_maxchains += 2 hist_chfrac.SetBinContent(chfrac_maxchains, ratio) hist_chfrac.SetBinError(chfrac_maxchains, ratio_err) hist_chfrac.GetXaxis().SetBinLabel( chfrac_maxchains, chain) # print "%d %s"% (chfrac_maxchains,chain) if options.show_err: table.add_entry('%s/%s' % (label_num, label_denom), '%6.2f +- %6.2f' % (ratio, ratio_err)) else: table.add_entry('%s/%s' % (label_num, label_denom), '%6.2f' % ratio) table.add_entry('%s-%s' % (label_num, label_denom), '%6.2f' % diff)
def make_point(column, mb_inball, tg_inball, mb_inball_hr, tg_inball_hr, match_type, match_showtype, rgg, dxgg, match_nowscore): print mb_inball print tg_inball print mb_inball_hr print tg_inball_hr if (mb_inball.isdigit() or mb_inball == '-1'): mb_inball = int(mb_inball) if (tg_inball.isdigit() or tg_inball == '-1'): tg_inball = int(tg_inball) if (mb_inball_hr.isdigit() or mb_inball_hr == '-1'): mb_inball_hr = int(mb_inball_hr) if (tg_inball_hr.isdigit() or tg_inball_hr == '-1'): tg_inball_hr = int(tg_inball_hr) if (mb_inball < 0): #全场取消 return { "column": column, "ben_add": 0, "status": 3, "mb_inball": str(mb_inball), "tg_inball": str(tg_inball) } elif (mb_inball == "" and mb_inball_hr < 0): #半场取消 return { "column": column, "ben_add": 0, "status": 3, "mb_inball": str(mb_inball_hr), "tg_inball": str(tg_inball_hr) } elif (mb_inball == "" and mb_inball_hr == ""): return { "column": column, "ben_add": 0, "status": 3, "mb_inball": str(mb_inball_hr), "tg_inball": str(tg_inball_hr) } ben_add = 0 status = 2 #默认为输 print column if (column == 'match_bzm'): #标准盘独赢 --标配的主场赢 if (mb_inball > tg_inball): status = 1 elif (column == 'match_bzg'): #--标配的客场赢 if (mb_inball < tg_inball): status = 1 elif (column == 'match_bzh'): ##--标配的和局 if (mb_inball == tg_inball): status = 1 elif (column == 'match_ho'): #主让球盘 m = rgg.split('/') #让球 ben_add = 1 temp = 0 if (len(m) == 2): for k in m: k = string.atof(k) if (match_showtype.lower() == 'h'): #?? mb_temp = mb_inball tg_temp = tg_inball + k else: mb_temp = mb_inball + k tg_temp = tg_inball if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp > tg_temp): temp += 1 elif (mb_temp == tg_temp): temp += 0.5 else: temp += 0 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: rgg = string.atof(rgg) if (match_showtype.lower() == 'h'): mb_temp = mb_inball tg_temp = tg_inball + rgg else: mb_temp = mb_inball + rgg tg_temp = tg_inball if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp > tg_temp): status = 1 elif (mb_temp == tg_temp): status = 8 else: status = 2 elif (column == 'match_ao'): #让球盘 m = rgg.split('/') ben_add = 1 temp = 0 if (len(m) == 2): for k in m: k = string.atof(k) if (match_showtype.lower() == 'h'): mb_temp = mb_inball tg_temp = tg_inball + k else: mb_temp = mb_inball + k tg_temp = tg_inball if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp < tg_temp): temp += 1 elif (mb_temp == tg_temp): temp += 0.5 else: temp += 0 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: rgg = string.atof(rgg) if (match_showtype.lower() == 'h'): mb_temp = mb_inball tg_temp = tg_inball + rgg else: mb_temp = mb_inball + rgg tg_temp = tg_inball if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp < tg_temp): status = 1 elif (mb_temp == tg_temp): status = 8 else: status = 2 #---------------------------大小,单双竞彩 elif (column == 'match_dxdpl'): #大小盘 m = dxgg.split('/') ben_add = 1 total = mb_inball + tg_inball temp = 0 if (len(m) == 2): for t in m: t = string.atof(t) if (total > t): temp += 1 elif (total == t): temp += 0.5 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: dxgg = string.atof(dxgg) if (total > dxgg): status = 1 elif (total == dxgg): status = 8 else: status = 2 elif (column == 'match_dxxpl'): #大小盘 m = dxgg.split('/') ben_add = 1 total = mb_inball + tg_inball print mb_inball print tg_inball temp = 0 if (len(m) == 2): for t in m: t = string.atof(t) if (total < t): temp += 1 elif (total == t): temp += 0.5 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: dxgg = string.atof(dxgg) if (total < dxgg): status = 1 elif (total == dxgg): status = 8 else: status = 2 elif (column == 'match_dsdpl'): if ((mb_inball + tg_inball) % 2 == 1): status = 1 elif (column == 'match_dsspl'): if ((mb_inball + tg_inball) % 2 == 0): status = 1 #--------------------------------------主场赢 elif (column == 'match_bmdy'): #上半场独赢 if (mb_inball_hr > tg_inball_hr): status = 1 mb_inball = mb_inball_hr tg_inball = tg_inball_hr elif (column == 'match_bgdy'): if (mb_inball_hr < tg_inball_hr): status = 1 mb_inball = mb_inball_hr tg_inball = tg_inball_hr elif (column == 'match_bhdy'): if (mb_inball_hr == tg_inball_hr): status = 1 mb_inball = mb_inball_hr tg_inball = tg_inball_hr #------------------------------------- elif (column == 'match_bho'): m = rgg.split('/') ben_add = 1 temp = 0 if (len(m) == 2): for k in m: k = string.atof(k) if (match_showtype.lower() == 'h'): mb_temp = mb_inball_hr tg_temp = tg_inball_hr + k else: mb_temp = mb_inball_hr + k tg_temp = tg_inball_hr if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp > tg_temp): temp += 1 elif (mb_temp == tg_temp): temp += 0.5 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: rgg = string.atof(rgg) if (match_showtype.lower() == 'h'): mb_temp = mb_inball_hr tg_temp = tg_inball_hr + rgg else: mb_temp = mb_inball_hr + rgg tg_temp = tg_inball_hr if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp > tg_temp): status = 1 elif (mb_temp == tg_temp): status = 8 else: status = 2 mb_inball = mb_inball_hr tg_inball = tg_inball_hr elif (column == 'match_bao'): m = rgg.split('/') ben_add = 1 temp = 0 if (len(m) == 2): for k in m: k = string.atof(k) if (match_showtype.lower() == 'h'): mb_temp = mb_inball_hr tg_temp = tg_inball_hr + k else: mb_temp = mb_inball_hr + k tg_temp = tg_inball_hr if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= int(n[0]) tg_temp -= int(n[1]) if (mb_temp < tg_temp): temp += 1 elif (mb_temp == tg_temp): temp += 0.5 else: temp += 0 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: rgg = string.atof(rgg) if (match_showtype.lower() == 'h'): mb_temp = mb_inball_hr tg_temp = tg_inball_hr + rgg else: mb_temp = mb_inball_hr + rgg tg_temp = tg_inball_hr if (match_type == 2 and not match_nowscore == 'unneed'): # 如果是滚球,减去下注比分 n = match_nowscore.split(':') n[0] = string.atof(n[0]) n[1] = string.atof(n[1]) mb_temp -= n[0] tg_temp -= n[1] if (mb_temp < tg_temp): status = 1 elif (mb_temp == tg_temp): status = 8 else: status = 2 mb_inball = mb_inball_hr tg_inball = tg_inball_hr #------------------------------------------------ elif (column == 'match_bdpl'): m = dxgg.split('/') ben_add = 1 temp = 0 total = mb_inball_hr + tg_inball_hr if (len(m) == 2): for t in m: t = string.atof(t) if (total > t): temp += 1 elif (total == t): temp += 0.5 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: dxgg = string.atof(dxgg) if (total > dxgg): status = 1 elif (total == dxgg): status = 8 else: status = 2 mb_inball = mb_inball_hr tg_inball = tg_inball_hr elif (column == 'match_bxpl'): m = dxgg.split('/') ben_add = 1 total = mb_inball_hr + tg_inball_hr temp = 0 if (len(m) == 2): for t in m: t = string.atof(t) if (total < t): temp += 1 elif (total == t): temp += 0.5 else: temp += 0 if (temp == 0.5): status = 5 elif (temp == 1.5): status = 4 elif (temp == 2): status = 1 elif (temp == 0): status = 2 else: dxgg = string.atof(dxgg) if (total < dxgg): status = 1 elif (total == dxgg): status = 8 else: status = 2 mb_inball = mb_inball_hr tg_inball = tg_inball_hr #---------------------以下是波胆盘 波胆竞猜 elif (column == 'match_bd10'): #波胆 if ((mb_inball == 1) and (tg_inball == 0)): status = 1 elif (column == 'match_bd20'): #波胆 if ((mb_inball == 2) and (tg_inball == 0)): status = 1 elif (column == 'match_bd21'): #波胆 if ((mb_inball == 2) and (tg_inball == 1)): status = 1 elif (column == 'match_bd30'): #波胆 if ((mb_inball == 3) and (tg_inball == 0)): status = 1 elif (column == 'match_bd31'): #波胆 if ((mb_inball == 3) and (tg_inball == 1)): status = 1 elif (column == 'match_bd32'): #波胆 if ((mb_inball == 3) and (tg_inball == 2)): status = 1 elif (column == 'match_bd40'): #波胆 if ((mb_inball == 4) and (tg_inball == 0)): status = 1 elif (column == 'match_bd41'): #波胆 if ((mb_inball == 4) and (tg_inball == 1)): status = 1 elif (column == 'match_bd42'): #波胆 if ((mb_inball == 4) and (tg_inball == 2)): status = 1 elif (column == 'match_bd43'): #波胆 if ((mb_inball == 4) and (tg_inball == 3)): status = 1 elif (column == 'match_bd00'): #波胆 if ((mb_inball == 0) and (tg_inball == 0)): status = 1 elif (column == 'match_bd11'): #波胆 if ((mb_inball == 1) and (tg_inball == 1)): status = 1 elif (column == 'match_bd22'): #波胆 if ((mb_inball == 2) and (tg_inball == 2)): status = 1 elif (column == 'match_bd33'): #波胆 if ((mb_inball == 3) and (tg_inball == 3)): status = 1 elif (column == 'match_bd44'): #波胆 if ((mb_inball == 4) and (tg_inball == 4)): status = 1 elif (column == 'match_bdup5'): if ((mb_inball >= 5) or (tg_inball >= 5)): status = 1 elif (column == 'match_bdg10'): if ((mb_inball == 0) and (tg_inball == 1)): status = 1 elif (column == 'match_bdg20'): if ((mb_inball == 0) and (tg_inball == 2)): status = 1 elif (column == 'match_bdg21'): if ((mb_inball == 1) and (tg_inball == 2)): status = 1 elif (column == 'match_bdg30'): if ((mb_inball == 0) and (tg_inball == 3)): status = 1 elif (column == 'match_bdg31'): if ((mb_inball == 1) and (tg_inball == 3)): status = 1 elif (column == 'match_bdg32'): if ((mb_inball == 2) and (tg_inball == 3)): status = 1 elif (column == 'match_bdg40'): if ((mb_inball == 0) and (tg_inball == 4)): status = 1 elif (column == 'match_bdg41'): if ((mb_inball == 1) and (tg_inball == 4)): status = 1 elif (column == 'match_bdg42'): if ((mb_inball == 2) and (tg_inball == 4)): status = 1 elif (column == 'match_bdg43'): if ((mb_inball == 3) and (tg_inball == 4)): status = 1 elif (column == 'match_hr_bd10'): if ((mb_inball_hr == 1) and (tg_inball_hr == 0)): status = 1 elif (column == 'match_hr_bd20'): if ((mb_inball_hr == 2) and (tg_inball_hr == 0)): status = 1 elif (column == 'match_hr_bd21'): if ((mb_inball_hr == 2) and (tg_inball_hr == 1)): status = 1 elif (column == 'match_hr_bd30'): if ((mb_inball_hr == 3) and (tg_inball_hr == 0)): status = 1 elif (column == 'match_hr_bd31'): if ((mb_inball_hr == 3) and (tg_inball_hr == 1)): status = 1 elif (column == 'match_hr_bd32'): if ((mb_inball_hr == 3) and (tg_inball_hr == 2)): status = 1 elif (column == 'match_hr_bd40'): if ((mb_inball_hr == 4) and (tg_inball_hr == 0)): status = 1 elif (column == 'match_hr_bd41'): if ((mb_inball_hr == 4) and (tg_inball_hr == 1)): status = 1 elif (column == 'match_hr_bd42'): if ((mb_inball_hr == 4) and (tg_inball_hr == 2)): status = 1 elif (column == 'match_hr_bd43'): if ((mb_inball_hr == 4) and (tg_inball_hr == 3)): status = 1 elif (column == 'match_hr_bd00'): if ((mb_inball_hr == 0) and (tg_inball_hr == 0)): status = 1 elif (column == 'match_hr_bd11'): if ((mb_inball_hr == 1) and (tg_inball_hr == 1)): status = 1 elif (column == 'match_hr_bd22'): if ((mb_inball_hr == 2) and (tg_inball_hr == 2)): status = 1 elif (column == 'match_hr_bd33'): if ((mb_inball_hr == 3) and (tg_inball_hr == 3)): status = 1 elif (column == 'match_hr_bd44'): if ((mb_inball_hr == 4) and (tg_inball_hr == 4)): status = 1 elif (column == 'match_hr_bdup5'): if ((mb_inball_hr >= 5) or (tg_inball_hr >= 5)): status = 1 elif (column == 'match_hr_bdg10'): if ((mb_inball_hr == 0) and (tg_inball_hr == 1)): status = 1 elif (column == 'match_hr_bdg20'): if ((mb_inball_hr == 0) and (tg_inball_hr == 2)): status = 1 elif (column == 'match_hr_bdg21'): if ((mb_inball_hr == 1) and (tg_inball_hr == 2)): status = 1 elif (column == 'match_hr_bdg30'): if ((mb_inball_hr == 0) and (tg_inball_hr == 3)): status = 1 elif (column == 'match_hr_bdg31'): if ((mb_inball_hr == 1) and (tg_inball_hr == 3)): status = 1 elif (column == 'match_hr_bdg32'): if ((mb_inball_hr == 2) and (tg_inball_hr == 3)): status = 1 elif (column == 'match_hr_bdg40'): if ((mb_inball_hr == 0) and (tg_inball_hr == 4)): status = 1 elif (column == 'match_hr_bdg41'): if ((mb_inball_hr == 1) and (tg_inball_hr == 4)): status = 1 elif (column == 'match_hr_bdg42'): if ((mb_inball_hr == 2) and (tg_inball_hr == 4)): status = 1 elif (column == 'match_hr_bdg43'): if ((mb_inball_hr == 3) and (tg_inball_hr == 4)): status = 1 #------------------入球数竞猜 elif (column == 'match_total01pl'): total = mb_inball + tg_inball if ((total >= 0) and (total <= 1)): status = 1 elif (column == 'match_total23pl'): total = mb_inball + tg_inball if ((total >= 2) and (total <= 3)): status = 1 elif (column == 'match_total46pl'): total = mb_inball + tg_inball if ((total >= 4) and (total <= 6)): status = 1 elif (column == 'match_total7uppl'): total = mb_inball + tg_inball if ((total >= 7)): status = 1 #------------------半全场竞猜和派彩 elif (column == 'match_bqmm'): #主/主 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] > tg_inball[1]) and (mb_inball[0] > tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqmh'): #主/和 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] == tg_inball[1]) and (mb_inball[0] > tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqmg'): #主/客 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] < tg_inball[1]) and (mb_inball[0] > tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqhm'): #和/主 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] > tg_inball[1]) and (mb_inball[0] == tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqhh'): #和/和 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] == tg_inball[1]) and (mb_inball[0] == tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqhg'): #和/客 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] < tg_inball[1]) and (mb_inball[0] == tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqgm'): #客/主 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] > tg_inball[1]) and (mb_inball[0] < tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqgh'): #客/和 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] == tg_inball[1]) and (mb_inball[0] < tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) elif (column == 'match_bqgg'): #客/客 mb_inball = mb_inball.split('/') tg_inball = tg_inball.split('/') if ((mb_inball[1] < tg_inball[1]) and (mb_inball[0] < tg_inball[0])): status = 1 mb_inball = int(mb_inball[1]) tg_inball = int(tg_inball[1]) date = { "column": column, "ben_add": ben_add, "status": status, "mb_inball": '%d' % mb_inball, "tg_inball": '%d' % tg_inball } return date #end of model
def main(argv): qFaLen = LoadFaLen(argv[1]) figPrefix = 'test' if len(argv) > 2: figPrefix = argv[2] if argv[0][-3:] == '.gz': I = os.popen('gzip -dc %s' % argv[0]) else: I = open(argv[0]) data, distance, nr, aa, bb = [], {}, {}, {}, {} s = set() print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA' while 1: # VCF format lines = I.readlines(100000) if not lines: break for line in lines: col = line.strip('\n').split() if re.search(r'^#CHROM', line): col2sam = {i + 9: sam for i, sam in enumerate(col[9:])} if re.search(r'^#', line): continue key = col[0] + ':' + col[1] if key in s: continue s.add(key) #if re.search(r'^PASS', col[6] ) : continue if not re.search(r'^PASS', col[6]): continue #if not re.search(r'_TRAIN_SITE', col[7]) : continue fmat = {k: i for i, k in enumerate(col[8].split(':'))} if 'VS' not in fmat or 'QR' not in fmat: continue annotations = [] for i, sample in enumerate(col[9:]): sampleId = col2sam[9 + i] if len(sample.split(':')[fmat['AA']].split(',')) != 4: print >> sys.stderr, '[WARNING] %s\n%s' % ( line, sample.split(':')[fmat['AA']]) continue qr = sample.split(':')[fmat['QR']].split(',')[-1] if qr == '.': continue qId, qSta, qEnd = qr.split('-') qSta = string.atoi(qSta) qEnd = string.atoi(qEnd) if sampleId not in qFaLen: raise ValueError( '[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId) if qId not in qFaLen[sampleId]: raise ValueError('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n') qSta = int(qSta * 100 / qFaLen[sampleId][qId] + 0.5) qEnd = int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5) if qSta > 100 or qEnd > 100: raise ValueError( '[ERROR] Query size Overflow! sample : %s; scaffold : %s' % (sampleId, qId)) leg = qSta if 100 - qEnd < qSta: leg = qEnd nn = string.atof(sample.split(':')[fmat['FN']]) n = int(1000 * nn + 0.5) / 10.0 alt = string.atoi(sample.split(':')[fmat['AA']].split(',') [1]) # Alternate perfect bot = string.atoi(sample.split(':')[fmat['AA']].split(',') [3]) # Both imperfect pro = string.atoi( sample.split(':')[fmat['RP']].split(',')[0]) # Proper Pair ipr = string.atoi(sample.split(':')[fmat['RP']].split(',') [1]) # ImProper Pair annotations.append([leg, n, alt, bot, pro, ipr]) leg, n, alt, bot, pro, ipr = np.median(annotations, axis=0) if leg not in distance: distance[leg] = [0, 0] if n not in nr: nr[n] = [0, 0] if alt not in aa: aa[alt] = [0, 0] if bot not in bb: bb[bot] = [0, 0] distance[leg][0] += 1 nr[n][0] += 1 aa[alt][0] += 1 bb[bot][0] += 1 data.append([leg, alt, pro, ipr, n, bot]) I.close() data = np.array(data) print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect' print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std( axis=0), '\nMedian: ', np.median(data, axis=0) print >> sys.stderr, '25 Percentile:', np.percentile( data, 25, axis=0), '\n50 Percentile:', np.percentile( data, 50, axis=0), '\n75 Percentile:', np.percentile(data, 75, axis=0) DrawFig( figPrefix, \ np.array (Accum( distance )), \ np.array (Accum( nr, True )), \ np.array (Accum( aa )), \ np.array (Accum( bb )) )
time = 0 while (1): if C * (F + S) > X * F: time += X / S print 'Case #%r: %.7f' % (count, time) return else: time += C / S S += F def parseline(line): return line.rstrip().split(' ') if __name__ == "__main__": if len(sys.argv) == 1: fin = open('input.txt') else: fin = open(sys.argv[1]) count = string.atoi(fin.readline().rstrip()) count = 1 for line in fin: data = parseline(line) C = string.atof(data[0]) F = string.atof(data[1]) X = string.atof(data[2]) calTime(C, F, X, count) count += 1
def handleSamplingParams(self, params): import string return list(params[:3]) + [string.atof(str(s)) for s in params[3:]]
i = 0 a = [] b = [] c = [] d = [] while True: line = file1.readline() if not line: break list = line.split() if len(list) > 0: if i % 4 == 0: a.append(string.atof(list[5])) elif i % 4 == 1: b.append(string.atof(list[5])) elif i % 4 == 2: c.append(string.atof(list[5])) else: d.append(string.atof(list[5])) i += 1 outfile = open(name + ".dat", 'w') i = 0
# # Compare the demo tixwidgets.py to the original Tcl program and you will # appreciate the advantages. # import string from Tkinter import * from Tkinter import _flatten, _cnfmerge, _default_root # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: raise ImportError, "This version of Tix.py requires Tk 4.0 or higher" import _tkinter # If this fails your Python may not be configured for Tk TixVersion = string.atof( tkinter.TIX_VERSION ) # If this fails your Python may not be configured for Tix # WARNING - TixVersion is a limited precision floating point number # Some more constants (for consistency with Tkinter) WINDOW = 'window' TEXT = 'text' IMAGETEXT = 'imagetext' # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. class TixWidget(Widget): """A TixWidget class is used to package all (or most) Tix widgets.
def apply(self): DD['legX'] = string.atof(self.labels[0].get()) DD['legY'] = string.atof(self.labels[1].get()) replot()
def Read(filename, asFloat=1, savespace=1, doByteSwap=1, compliant=1, allHDU=1, HDU=None, memmap=None): """if savespace is set, the array will maintain its type if asfloat is set. If doByteSwap is not set, no byteswap will be done if little endian - if this is the case, the file is not actually fits compliant if memmap is used, the file is memmapped rather than read. If this is used, it is recommended that memmap="r" and that the file is non-byte-swapped. If allHDU==0 then will only read the current HDU. """ if type(filename) == type(""): flist = glob.glob(os.path.expanduser(filename)) if len(flist) == 0: raise Exception("No files found to load") else: filename = flist[0] if len(flist) > 1: print "Ignoring %s" % str(flist[1:]) print "Loading %s" % flist[0] file = open(filename, "rb") filelen = os.path.getsize(filename) else: file = filename filename = str(filename) cur = file.tell() file.seek(0, 2) #go to end of file filelen = file.tell() file.seek(cur, 0) done = 0 returnVal = [] hduno = 0 while done == 0: rawHeader = [] header = {} buffer = file.read(2880) if buffer[:6] != 'SIMPLE' and buffer[:8] != "XTENSION": if compliant: print "Warning - non compliant FITS file - error reading the %dth HDU - this should start with SIMPLE or XTENSION, but starts with:" % ( len(returnVal) / 2 + 1) print buffer[:80] raise Exception(error + 'Not a simple fits file: %s' % filename) else: print "WARNING - non compliant FITS file for %dth HDU" % ( len(returnVal) / 2 + 1) return returnVal while (1): for char in range(0, 2880, 80): line = buffer[char:char + 80] rawHeader.append(line) key = string.strip(line[:8]).strip("\0") if key: val = line[9:] val = string.strip(val).strip("\0") if val: if val[0] == "'": try: pos = string.index(val, "'", 1) val = val[1:pos] except: val = val[1:] else: pos = string.find(val, '/') if pos != -1: val = val[:pos] if header.has_key(key): header[key] += val else: header[key] = val if header.has_key('END'): break buffer = file.read(2880) naxis = string.atoi(header['NAXIS']) shape = [] for i in range(1, naxis + 1): shape.append(string.atoi(header['NAXIS%d' % i])) shape.reverse() if len(shape) > 0: numPix = 1 for i in shape: numPix = numPix * i else: numPix = 0 bitpix = string.atoi(header['BITPIX']) if bitpix == 8: typ = numpy.uint8 elif bitpix == 16: typ = numpy.int16 elif bitpix == 32: typ = numpy.int32 elif bitpix == -32: typ = numpy.float32 bitpix = 32 elif bitpix == -64: typ = numpy.float64 bitpix = 64 elif bitpix == -16: typ = numpy.uint16 bitpix = 16 elif bitpix == -8: typ = numpy.uint8 bitpix = 8 elif bitpix == 64: typ = numpy.int64 bitpix = 64 else: print("Unknown BITPIX: %d" % bitpix) numByte = numPix * bitpix / 8 if HDU is None or hduno == HDU: if memmap is None: data = numpy.fromfile(file, typ, count=numPix) else: nel = reduce(lambda x, y: x * y, shape) data = numpy.memmap(filename, dtype=typ, mode=memmap, offset=file.tell())[:nel] file.seek(bitpix / 8 * nel, 1) else: file.seek(int((numByte + 2880 - 1) // 2880) * 2880, 1) data = None #data = file.read(numByte) #data = numpy.fromstring(data, dtype=typ) #data.savespace(1) if data is not None: if len(shape) > 0: data.shape = shape if numpy.little_endian and doByteSwap: if header.has_key("UNORDERD") and header["UNORDERD"] == 'T': pass else: data.byteswap(True) if asFloat: bscale = string.atof(header.get('BSCALE', '1.0')) bzero = string.atof(header.get('BZERO', '0.0')) if savespace: if bscale != 1: data *= bscale #array(bscale,typecode=typ) if bzero != 0: data += bzero #array(bzero,typecode=typ) else: data = data * bscale + bzero returnVal.append({'raw': rawHeader, 'parsed': header}) returnVal.append(data) ntoread = 2880 - numByte % 2880 if ntoread != 0 and ntoread != 2880: file.read(ntoread) #print "Read 1 hdu at %d/%d"%(file.tell(),filelen) if file.tell() == filelen or (allHDU == 0 and data is not None): done = 1 hduno += 1 return returnVal #( { 'raw' : rawHeader, 'parsed' : header}, data )
def parseFValue(self, d): val = string.split(d, ',') ret = '' for val in val: ret = ret + val return string.atof(ret)
kraken_fp = open(kraken_output_M_S_path) kraken_fp_start = kraken_fp.tell() blast_fp = open(kraken_output_M_S_blastout_path) out_fp = open(output_path, 'w') blast_reads_taxid_dic = {} best_eval_dic = {} while True: line = blast_fp.readline() if not line: break linesplit = line.split('\t') taxon_id = linesplit[3] read_id = linesplit[0] e_value = linesplit[13].strip() e_value = string.atof(e_value) if blast_reads_taxid_dic.has_key(read_id): best_eval = best_eval_dic[read_id] if e_value <= best_eval: blast_reads_taxid_dic[read_id].append(taxon_id) else: blast_reads_taxid_dic[read_id] = [taxon_id] best_eval_dic[read_id] = e_value while True: line = kraken_fp.readline() if not line: break linesplit = line.split('\t') read_id = linesplit[1] taxon_id = linesplit[2]
file = open('4.out') file2 = open('4.outdoor', 'w') import string while True: line = file.readline() if not line: break list = line.split() newline = list[0] + ' ' + str(string.atof(list[5]) * 1000) + '\n' file2.write(newline)
def getLastSolutionFromWekaTopKAllOne(rRate,inputFile,outputFile,K,buysRate,MINCLICK): file_out = open(outputFile+"-"+str(K)+"-"+str(buysRate)+"-"+str(MINCLICK)+".dat",'w') file_read = open(inputFile) sessionIds = getTestSessionIds('D:\Data\\feature\\yoochoose-test-sessionId.dat') buysFrequentness = getBuysFrequentness('D:\Data\yoochoose-buys-analyse-sorted.dat') start = -1 recieveRate = rRate buys = {} for line in file_read: if line.find("inst#")!=-1: start = 0 continue if start == -1: continue if len(line)>5: data = (line.split(":")[2]).split(" ") answer = data[0] rate = string.atof(data[1]) if answer=="yes" and rate > recieveRate: buys[sessionIds[start]] = 1 start +=1 file_click = open('D:\Data\yoochoose-test.dat') sessionId = "-1" itemId = "-1" clickInfo = {} oneClickBuysRate = getDicFromFile('D:\Data\yoochoose-buysRate-oneClick.dat') eachNumber = 0 oneNumberBuy = 0 oneNumberAll = 0 items = [] for line in file_click: data = line.split(",") if data[0]!=sessionId: if sessionId != "-1": if eachNumber==1: oneNumberAll+=1 if oneClickBuysRate.has_key(itemId) and oneClickBuysRate[itemId]>buysRate: buys[sessionId] = 1 #if buysFrequentness.has_key(itemId) and buysFrequentness[itemId]>10: # buys[sessionId] = 1 if buys.has_key(sessionId): if eachNumber==1: oneNumberBuy+=1 outString = sessionId+";"; clickInfo = sorted(clickInfo.iteritems(),key=lambda s:s[1],reverse=True) choosedNumber = 0 if eachNumber==1: outString+=clickInfo_[0][0] else: for clickInfo_ in clickInfo: if clickInfo_[1]>1 and buysFrequentness.has_key(clickInfo_[0]) and buysFrequentness[clickInfo_[0]] > MINCLICK: if choosedNumber==0: outString+=clickInfo_[0] else: outString+=","+clickInfo_[0] choosedNumber +=1 if choosedNumber==K: break if outString!=(sessionId+";") : file_out.write(outString+"\n") clickInfo = {} eachNumber = 0 if clickInfo.has_key(data[2]): clickInfo[data[2]]+=1 else: clickInfo[data[2]] = 1 sessionId = data[0] itemId = data[2] eachNumber += 1 print [oneNumberBuy,oneNumberAll,1.0*oneNumberBuy/oneNumberAll] #[5, 314545, 1.589597672829007e-05] file_out.close()
def ReadXmlFile(filename): def GetData(nodelist, name): for val in nodelist: if val.nodeName != name: continue svalue = '' icount = int(0) for s in val.childNodes: icount = icount + 1 svalue = s.data if icount == 1: return svalue else: raise Exception('Wrong child list') raise Exception('Failed to find data') # create and initialize parser myf = open(filename) dom = xml.dom.minidom.parse(myf) trig_map = dict() group_list = dict() for top in dom.getElementsByTagName('trigger'): #Get triggers, store info for lev in top.getElementsByTagName('level'): for trsig in lev.getElementsByTagName('signature'): tr = Trigger() tr.name = GetData(trsig.childNodes, 'sig_name') tr.rate = GetData(trsig.childNodes, 'rate') tr.rerr = GetData(trsig.childNodes, 'rate_err') tr.PS = GetData(trsig.childNodes, 'prescale') trig_map[tr.name] = tr #Get groups for sig in top.getElementsByTagName('cplx_signature'): if GetData(sig.childNodes, 'type') == 'Group': gr = group() gr.name = GetData(sig.childNodes, 'sig_name') gr.rate = string.atof(GetData(sig.childNodes, 'rate')) group_list[gr.name] = gr #Now get cumulative rates for sig in top.getElementsByTagName('cplx_signature'): if GetData(sig.childNodes, 'type').find('Cumulative') != -1: cumu_name = GetData(sig.childNodes, 'sig_name') group_name = cumu_name[:cumu_name.find('cumu') - 1] cumu_index = -1 try: cumu_index = int(cumu_name.split('_')[-1]) except ValueError: print 'ERROR - Trailing character on cumulative group is not an integer' continue cumutr = group() cumutr.name = cumu_name cumutr.rate = GetData(sig.childNodes, 'rate') #Get triggers in this group for comp in sig.getElementsByTagName('components'): for compsig in comp.getElementsByTagName('sig'): cumutr.triggers.append(trig_map[GetData( compsig.childNodes, 'sig_name')]) if len(cumutr.triggers) != cumu_index + 1: print 'Error in parsing triggers for group ' + cumu_name continue if cumu_name.count('rev') > 0: group_list[group_name].rev_cumu_group[cumu_index] = cumutr else: group_list[group_name].cumu_group[cumu_index] = cumutr return group_list, trig_map
symtol = 0 # tolerance for symmetry check for opt in options: if opt[0] == '-3': quad = 0 elif opt[0] == '-4': quad = 1 elif opt[0] == '-o': order = string.atoi(opt[1]) if order != 1 and order != 2: sys.stderr.write("order must be 1 or 2!\n") sys.exit(1) elif opt[0] == '-n': meshsize = string.atoi(opt[1]) elif opt[0] == '-t': symtol = string.atof(opt[1]) hexmaterial = newMaterial("2d-elastic", HexagonalElasticity("hex", 1.0, 0.5, 0.0, 0.0, 0.0), Orientation("unrotated", EulerAngle(0, 0, 0))) mesh = Mesh() mesh.activate_equation(forcebalance_eqn) #mesh.activate_equation(plane_stress_eqn) displacement.set_in_plane(mesh, 1) mesh.define_field(displacement) mesh.activate_field(displacement) #mesh.activate_field(displacement.out_of_plane())
data['brandname_c']) store_id = string.atoi(s['physicalStoreId']) if store_id in store_map: item = store_map[store_id] cm.dump( 'Duplicated: %s, %s' % (item[cm.addr_e], item[cm.country_e]), log_name) continue entry[cm.country_e] = cm.html2plain(s['country']).strip().upper() entry[cm.city_e] = cm.html2plain(s['city']).strip().upper() entry[cm.addr_e] = cm.reformat_addr(s['address']) val = str(s['latitude']) if val is not None and val.strip() != '': entry[cm.lat] = string.atof(val) val = str(s['longitude']) if val is not None and val.strip() != '': entry[cm.lng] = string.atof(val) entry[cm.name_e] = cm.html2plain(s['name']) entry[cm.tel] = s['phone1'].strip() if s['phone1'] else '' entry[cm.zip_code] = s['postalCode'].strip() entry[cm.store_type] = s['sections'].strip() gs.field_sense(entry) ret = gs.addr_sense(entry[cm.addr_e], entry[cm.country_e]) if ret[1] is not None and entry[cm.province_e] == '': entry[cm.province_e] = ret[1] if ret[2] is not None and entry[cm.city_e] == '': entry[cm.city_e] = ret[2]
def run(cluster, detect): import os import MySQLdb import os, sys, anydbm, time #from config import datb, dataloc #from config_bonn import cluster #cluster = sys.argv[1] #detect = sys.argv[2] SUBARUDIR = '/nfs/slac/g/ki/ki05/anja/SUBARU/' output = SUBARUDIR + cluster + '/PHOTOMETRY_' + detect + '_aper/' + cluster + 'spec.cat' #print datb + cluster import lib os.system("mkdir " + cluster) import string #if string.find(cluster,'MACS') != -1: # name = cluster[4:8] #else: # name = cluster from glob import glob file = './spec/' + cluster + '.zcat' if glob(file): zcat_harald = open(file, 'r').readlines() else: zcat_harald = [] file = './nedspec/' + cluster + '.zcat' if glob(file): zcat_ned = open(file, 'r').readlines() else: zcat_ned = [] if len(zcat_harald) or len(zcat_ned): if len(zcat_harald) > len(zcat_ned): zcat = zcat_harald else: zcat = zcat_ned import re op = open('op', 'w') import os SeqNr = 0 for line in zcat: import re ll = re.split('\s+', line) if ll[0] == '': ll = ll[1:] import string if string.find(ll[0], ':') != 0: ll = [''] + ll temp = ['', 'temp'] #print len(ll), 'length' print ll if string.find(ll[1], ':') != -1: for ele in ll[1:]: temp.append(ele) ll = temp #print ll print ll id = ll[1] #line[0:6] agalra = ll[2] #line[8:20] agaldec = ll[3] #line[22:34] z = ll[4] #line[37:43] if string.find(agalra, ':') != -1: #print id, agalra, agaldec, z rlist = ['', '', ''] dlist = ['', '', ''] rlist[0] = agalra[0:2] rlist[1] = agalra[3:5] rlist[2] = agalra[6:] dsign = agaldec[0] dmul = float(dsign + '1') dlist[0] = agaldec[1:3] dlist[1] = agaldec[4:6] dlist[2] = agaldec[7:] import string #print rlist, dlist, dsign radeg = (360 / 24.0) * string.atof(rlist[0]) + ( 360.0 / (24.0 * 60)) * string.atof(rlist[1]) + ( 360.0 / (24.0 * 60.0 * 60.0)) * string.atof(rlist[2]) spectrara = radeg decdeg = dmul * (string.atof(dlist[0]) + (1 / 60.0) * string.atof(dlist[1]) + string.atof(dlist[2]) * (1 / (60.0 * 60.0))) else: radeg = float(agalra) decdeg = float(agaldec) spectradec = decdeg spectraz = z label = id #decdiff =decdeg - 24.0695846901 #radiff = radeg - 215.925761617995 #print decdiff, radiff #op.write(str(radiff) + " " + str(decdiff)+ "\n") p = re.compile('\S') m = p.findall(label) if len(m) == 0: label = 'nolab' SeqNr += 1 if string.find( spectraz, '?' ) == -1 and spectraz != '' and spectraz != '-1' and 0 < float( spectraz) < 3: print radeg, decdeg, spectraz op.write( str(SeqNr) + ' ' + str(radeg) + " " + str(decdeg) + " " + str(spectraz) + "\n") op.close() os.system('asctoldac -i op -o ' + output + ' -c ./photconf/zspec.conf') return True else: return False
tree = string.rstrip(allLines[2]) outFileBre.write(tree) outFileBre.write("\nttag !;") i = 4 while i < (numberLines - 1): allLines_splitted = string.split(allLines[i]) #get pbs values and make sure are clean pbsValues_splitted = allLines_splitted[2].split(",") pbsValues_splitted.remove(";") #get BS BS = 0 #Bremer support for item in pbsValues_splitted: a = string.atof(item) #get PBSi BS += a #get PBSi and absPBSi PBS = 0 c = 0 for item in pbsValues_splitted: PBSi = string.atof(item) #get PBSi absPBSi = abs(PBSi) #get absPBSi b = (absPBSi - BS) c += b #Suma|PBSi| - BS if (BS != 0): pci2 = c / BS #(Suma|PBSi| - BS)/BS
def fetch_stores(data): global national_added url = data['url'] try: body = cm.get_data(url) except Exception: cm.dump('Error in fetching stores: %s' % url, log_name) return [] store_list = [] if national_added: pat = re.compile(ur'<div\s+class\s*=\s*"retailer_address"') else: pat = re.compile( ur'<div\s+class\s*=\s*"retailer_address(\s+national)?"') national_added = True for m in re.finditer(pat, body): entry = cm.init_store_entry(data['brand_id'], data['brandname_e'], data['brandname_c']) entry[cm.country_e] = data['country'] entry[cm.province_e] = data['state'] entry[cm.city_e] = cm.extract_city(data['city'])[0] if u'national' in m.group(): entry[cm.store_type] = u'National Distributor' else: entry[cm.store_type] = u'Retailer' sub = cm.extract_closure(body[m.start():], ur'<div\b', ur'</div>')[0] name_list = [ cm.reformat_addr(tmp) for tmp in re.findall(ur'<h3 itemprop="name">([^<>]+)</h3>', sub) ] entry[cm.name_e] = ', '.join(name_list) addr_list = [ cm.reformat_addr(tmp) for tmp in re.findall( ur'<span itemprop="street-address">([^<>]+)</span>', sub) ] city_addr = '' m1 = re.search(ur'<span itemprop="locality">([^<>]+)</span>', sub) if m1 is not None: city_addr = cm.html2plain(m1.group(1)).strip() m1 = re.search(ur'<span itemprop="postal-code">([^<>]+)</span>', sub) if m1 is not None: entry[cm.zip_code] = cm.html2plain(m1.group(1)).strip() if city_addr != '': city_addr = u'%s %s' % (entry[cm.zip_code], city_addr) if city_addr != '': addr_list.append(city_addr) m1 = re.search(ur'<span itemprop="region">([^<>]+)</span>', sub) if m1 is not None: addr_list.append(cm.html2plain(m1.group(1)).strip()) if entry[cm.province_e] == '': entry[cm.province_e] = cm.html2plain( m1.group(1)).strip().upper() entry[cm.addr_e] = ', '.join(addr_list) m1 = re.search(ur'<span itemprop="tel">([^<>]+)</span>', sub) if m1 is not None: entry[cm.tel] = m1.group(1).strip() m1 = re.search(ur'Fax\s*:\s*([^<>]+)', sub) if m1 is not None: entry[cm.fax] = m1.group(1).strip() m1 = re.search(ur'll=(-?\d+\.\d+),(-?\d+\.\d+)', sub) if m1 is not None: entry[cm.lat] = string.atof(m1.group(1)) entry[cm.lng] = string.atof(m1.group(2)) gs.field_sense(entry) ret = gs.addr_sense(entry[cm.addr_e], entry[cm.country_e]) if ret[1] is not None and entry[cm.province_e] == '': entry[cm.province_e] = ret[1] if ret[2] is not None and entry[cm.city_e] == '': entry[cm.city_e] = ret[2] gs.field_sense(entry) cm.dump( '(%s / %d) Found store: %s, %s (%s, %s)' % (data['brandname_e'], data['brand_id'], entry[cm.name_e], entry[cm.addr_e], entry[cm.country_e], entry[cm.continent_e]), log_name) db.insert_record(entry, 'stores') store_list.append(entry) return store_list
raw = json.loads(cm.get_data(url)) except Exception, e: cm.dump('Error in fetching stores: %s' % url, log_name) return () store_list = [] for s in raw: entry = cm.init_store_entry(data['brand_id'], data['brandname_e'], data['brandname_c']) entry[cm.addr_e] = cm.reformat_addr(s['address']) entry[cm.city_e] = cm.extract_city(s['city'])[0] entry[cm.country_e] = cm.html2plain(s['country']).strip().upper() entry[cm.name_e] = cm.html2plain(s['name']).strip().upper() entry[cm.hours] = cm.reformat_addr(s['hours']) try: entry[cm.lat] = string.atof(s['latitude']) except (ValueError, KeyError, TypeError) as e: cm.dump('Error in fetching lat-lng: %s' % str(e), log_name) try: entry[cm.lng] = string.atof(s['longitude']) except (ValueError, KeyError, TypeError) as e: cm.dump('Error in fetching lat-lng: %s' % str(e), log_name) entry[cm.store_class] = s['public_type'].strip() entry[cm.tel] = s['telf'].strip() entry[cm.zip_code] = s['zip'].strip() gs.field_sense(entry) ret = gs.addr_sense(entry[cm.addr_e], entry[cm.country_e]) if ret[1] is not None and entry[cm.province_e] == '': entry[cm.province_e] = ret[1] if ret[2] is not None and entry[cm.city_e] == '':
def _scaleCommand(self, strVal): if not self.fScaleCommand: return # convert scale val to float self.set(string.atof(strVal)) """
Gstr_ageInput = a if Gstr_ageInput == "-x": fatal('ageSpec') if len(Gstr_ageInput) != 4: if Gb_showErr: print >> sys.stderr, "Invalid length of <ageString>. Must be of form 'XXXM' where" print >> sys.stderr, "'X' is a number and 'A' is either 'D', 'W', 'M', or 'Y'." print >> sys.stderr, "\n" print >> sys.stderr, "Examples of valid <ageStrings>: 034D, 002W, 007Y, etc." print >> sys.stderr, "\n" fatal('ageStringLen') Gstr_ageString = Gstr_ageInput[0:3] Gstr_ageFact = Gstr_ageInput[3] if Gstr_ageFact != 'D' and Gstr_ageFact != 'M' and Gstr_ageFact != 'Y' and Gstr_ageFact != 'W': fatal('ageStringF') Gf_ageInput = string.atof(Gstr_ageString) Gf = Gf_ageInput Gf_ageInDays = { 'D': lambda Gf: Gf * 1.0, 'W': lambda Gf: Gf * 7.0, 'M': lambda Gf: Gf * 30.42, 'Y': lambda Gf: Gf * 365.25 }[Gstr_ageFact](Gf) print "%d" % Gf_ageInDays, sys.exit(0)
ANC = first_result_lines[15][2] NLS = first_result_lines[16][2] print "precision_micro - " + str(precision_micro) print "recall_micro - " + str(recall_micro) print "p8_precision_micro - " + str(p8_precision_micro) print "r8_recall_micro - " + str(r8_recall_micro) print "p5_precision_micro - " + str(p5_precision_micro) print "r5_recall_micro - " + str(r5_recall_micro) print "mediate_size_micro - " + str(mediate_size_micro) print "sample_cluster_num_micro - " + str(sample_cluster_num_micro) print "ANC - " + str(ANC) print "NLS - " + str(NLS) y_value_precision_micro.append( string.atof(first_result_lines[0][2])) y_value_recall_micro.append(string.atof(first_result_lines[1][2])) y_value_9_precision_micro.append( string.atof(first_result_lines[2][2])) y_value_9_recall_micro.append(string.atof( first_result_lines[3][2])) y_value_8_precision_micro.append( string.atof(first_result_lines[4][2])) y_value_8_recall_micro.append(string.atof( first_result_lines[5][2])) y_value_7_precision_micro.append( string.atof(first_result_lines[6][2])) y_value_7_recall_micro.append(string.atof( first_result_lines[7][2])) y_value_6_precision_micro.append( string.atof(first_result_lines[8][2]))
sys.exit(-1) #for ix in argvs: # if '.vasp' in ix: # print ' Read ', ix, '. It is converted to ctrl file now.' ext2 = argvs[1] print ext2 # ext1=re.sub('POSCAR_','',ix) # ext2=re.sub('.vasp','',ext1) # break # elif '.cif' in ix: # ext1 = re.sub('.cif','',ix) # ext2 = re.sub('.vasp','',ext1) # break for ix in argvs: if '--alat=' in ix: alatin = string.atof(eval(re.sub('--alat=', '', ix))) else: alatin = None ratioa = 1.00000000000 titleinput = 'ctrls.' + ext2 vaspread = open(argvs[1]).read().split('\n') plat1 = vaspread[2].split() plat2 = vaspread[3].split() plat3 = vaspread[4].split() print ' ' + vaspread[7] alat_val = convctrl.vasp2ctrl_alat(vaspread) #unit if (alatin): ratioa = alatin / alat_val #conversion by given --alat=alatin all_atom, NBAS_val = convctrl.vasp2ctrl_atomcount(vaspread) #atom_list = convctrl.vasp2ctrl_atom(vaspread,alat_val,NBAS_val)
def stockCheckUpdate(request): try: if isTokenExpired(request): json2Dict = json.loads(request.body) identifier = json2Dict['identifier'] stocks = TakeStockOrder.objects.filter(identifier=identifier) if len(stocks) > 0: stock = stocks[0] else: stockUpdate = setStatus(300, {}) return HttpResponse(json.dumps(stockUpdate), content_type='application/json') if 'warehouseID' in json2Dict: if isValid(json2Dict['warehouseID']): warehouse_id = int(json2Dict['warehouseID']) stock.warehouse_id = warehouse_id if 'personID' in json2Dict: if isValid(json2Dict['personID']): person_id = int(json2Dict['personID']) stock.person_id = person_id if 'originator' in json2Dict: if isValid(json2Dict['originator']): originator = json2Dict['originator'] stock.originator = originator if 'financeReviewer' in json2Dict: if isValid(json2Dict['financeReviewer']): finance_reviewer = json2Dict['financeReviewer'] stock.finance_reviewer = finance_reviewer if 'managerReviewer' in json2Dict: if isValid(json2Dict['managerReviewer']): manager_reviewer = json2Dict['managerReviewer'] stock.manager_reviewer = manager_reviewer if 'summary' in json2Dict: if isValid(json2Dict['summary']): summary = json2Dict['summary'] stock.summary = summary if 'state' in json2Dict: if isValid(json2Dict['state']): state = int(json2Dict['state']) stock.state = state if 'printNum' in json2Dict: if isValid(json2Dict['printNum']): print_num = int(json2Dict['printNum']) stock.print_num = print_num stock.save() if 'stockCommodities' in json2Dict: stockCommodities = json2Dict['stockCommodities'] for stockCommodity in stockCommodities: if 'stockCommoditiyID' in stockCommodity: stockCommoditiyID = stockCommodity['stockCommoditiyID'] stockCommodityObjs = TakeStockOrderCommodity.objects.filter( id=stockCommoditiyID) if len(stockCommodityObjs) > 0: stockCommodityObj = stockCommodityObjs[0] else: stockUpdate = setStatus(300, {}) return HttpResponse( json.dumps(stockUpdate), content_type='application/json') else: continue if 'commoditySpecificationID' in stockCommodity: if isValid(stockCommodity['commoditySpecificationID']): commodity_specification_id = int( stockCommodity['commoditySpecificationID']) stockCommodityObj.commodity_specification_id = commodity_specification_id if 'inventoryNum' in stockCommodity: if isValid(stockCommodity['inventoryNum']): inventory_num = int(stockCommodity['inventoryNum']) stockCommodityObj.inventory_num = inventory_num if 'realNum' in stockCommodity: if isValid(stockCommodity['realNum']): real_num = int(stockCommodity['realNum']) stockCommodityObj.real_num = real_num if 'profitOrLossNum' in stockCommodity: if isValid(stockCommodity['profitOrLossNum']): profit_or_loss_num = int( stockCommodity['profitOrLossNum']) stockCommodityObj.profit_or_loss_num = profit_or_loss_num if 'advanceMoney' in stockCommodity: if isValid(stockCommodity['advanceMoney']): unit_price = atof(stockCommodity['advanceMoney']) stockCommodityObj.unit_price = unit_price if 'money' in stockCommodity: if isValid(stockCommodity['money']): money = atof(stockCommodity['money']) stockCommodityObj.money = money stockCommodityObj.save() stockJSON = getStock(stock) stockCheckUpdate = setStatus(200, stockJSON) else: return notTokenExpired() except Exception, e: logErr = basic_log.Logger('error') logErr.log(traceback.format_exc()) transaction.rollback() stockCheckUpdate = setStatus(500, traceback.format_exc())
def fetch_stores(data): url = data['store_url'] param = { 'store_country': data['country_code'], 'store_city': data['city_code'] } try: body = cm.post_data(url, param) except Exception: cm.dump('Error in fetching stores: %s, %s' % (url, param), log_name) return [] store_list = [] for s in re.findall(ur'<marker\b([^<>]+)/\s*>', body): entry = cm.init_store_entry(data['brand_id'], data['brandname_e'], data['brandname_c']) m = re.search(ur'store_name="([^"]+)"', s) if m is not None: entry[cm.name_e] = cm.reformat_addr(m.group(1)) entry[cm.country_e] = data['country_code'] entry[cm.city_e] = data['city'] addr_list = [] for key in ['store_mall_name', 'store_address', 'store_zip_code']: m = re.search(ur'%s="([^"]+)"' % key, s) if m is not None: tmp = cm.reformat_addr(m.group(1)) if tmp != '': addr_list.append(tmp) entry[cm.addr_e] = ', '.join(addr_list) m = re.search(ur'store_zip_code="([^"]+)"', s) if m is not None: entry[cm.zip_code] = m.group(1).strip() m = re.search(ur'store_telephone="([^"]+)"', s) if m is not None: entry[cm.tel] = m.group(1).strip() m = re.search(ur'store_fax="([^"]+)"', s) if m is not None: entry[cm.fax] = m.group(1).strip() m = re.search(ur'store_email="([^"]+)"', s) if m is not None: entry[cm.email] = m.group(1).strip() m = re.search(ur'store_latitude="([^"]+)"', s) if m is not None: entry[cm.lat] = string.atof(m.group(1).strip()) m = re.search(ur'store_longitude="([^"]+)"', s) if m is not None: entry[cm.lng] = string.atof(m.group(1).strip()) gs.field_sense(entry) ret = gs.addr_sense(entry[cm.addr_e], entry[cm.country_e]) if ret[1] is not None and entry[cm.province_e] == '': entry[cm.province_e] = ret[1] gs.field_sense(entry) cm.dump( '(%s / %d) Found store: %s, %s (%s, %s)' % (data['brandname_e'], data['brand_id'], entry[cm.name_e], entry[cm.addr_e], entry[cm.country_e], entry[cm.continent_e]), log_name) db.insert_record(entry, 'stores') store_list.append(entry)
m3 = re.search(ur'<div class="address1">([^<>]+)', m2) val = cm.html2plain(m3.group(1)).strip() if m3 else '' if val != '': addr_list.append(val) m3 = re.search(ur'<div class="address2">([^<>]+)', m2) val = cm.html2plain(m3.group(1)).strip() if m3 else '' if val != '': addr_list.append(val) entry[cm.addr_e] = ', '.join(addr_list) m3 = re.search(ur'<div class="zipcode">([^<>]+)', m2) entry[cm.zip_code] = m3.group(1).strip() if m3 else '' try: m3 = re.search(ur'<div class="latitude">([^<>]+)', m2) entry[cm.lat] = string.atof(m3.group(1)) if m3 else '' except (ValueError, KeyError, TypeError) as e: cm.dump('Error in fetching lat: %s' % str(e), log_name) try: m3 = re.search(ur'<div class="longitude">([^<>]+)', m2) entry[cm.lng] = string.atof(m3.group(1)) if m3 else '' except (ValueError, KeyError, TypeError) as e: cm.dump('Error in fetching lng: %s' % str(e), log_name) m3 = re.search(ur'<a href="([^"]+)"\s*>DETAILS', m2) if m3: d = data.copy() d['url'] = m3.group(1) entry = fetch_store_details(d, entry) gs.field_sense(entry)
def fetch_stores(data): print '(%s/%d) Found city: %s' % (data['brandname_e'], data['brand_id'], data['city_e']) url = data['url'] try: html = cm.get_data(url) except Exception: print 'Error occured: %s' % url dump_data = {'level': 0, 'time': cm.format_time(), 'data': {'url': url}, 'brand_id': data['brand_id']} cm.dump(dump_data) return [] # 处理重定向 m = re.search('<h2>Object moved to <a href="(.+?)">', html) if m is not None: data['url'] = data['host'] + m.group(1) return fetch_countries(data) m = re.search('var\s+data\s*=\s*', html) if m is None: return [] sub, start, end = cm.extract_closure(html[m.end():], r'\[', r'\]') if end == 0: return [] store_list = [] for s in json.loads(sub): entry = cm.init_store_entry(data['brand_id'], data['brandname_e'], data['brandname_c']) name = s['Name'] if cm.is_cjk(name): entry[cm.name_c] = name else: entry[cm.name_e] = name entry[cm.addr_e] = cm.html2plain(s['Street']) entry[cm.city_e] = cm.extract_city(data['city_e'])[0] entry[cm.country_e] = data['country_e'] entry[cm.province_e] = data['province_e'] pat = re.compile(ur'tel[\.: ]*', re.I) entry[cm.tel] = re.sub(pat, '', s['Phone']).strip() pat = re.compile(ur'fax[\.: ]*', re.I) entry[cm.fax] = re.sub(pat, '', s['Fax']).strip() entry[cm.email] = s['Email'].strip() entry[cm.url] = s['Website'].strip() coord = s['LatLng'] if coord is not None and len(coord) >= 2: if coord[0] is not None: entry[cm.lat] = string.atof(coord[0]) if coord[1] is not None: entry[cm.lng] = string.atof(coord[1]) gs.field_sense(entry) ret = gs.addr_sense(entry[cm.addr_e], entry[cm.country_e]) if ret[1] is not None and entry[cm.province_e] == '': entry[cm.province_e] = ret[1] if ret[2] is not None and entry[cm.city_e] == '': entry[cm.city_e] = ret[2] gs.field_sense(entry) print '(%s/%d) Found store: %s, %s (%s, %s)' % (data['brandname_e'], data['brand_id'], entry[cm.name_e], entry[cm.addr_e], entry[cm.country_e], entry[cm.continent_e]) store_list.append(entry) db.insert_record(entry, 'stores') return store_list
def modularity(vector_dict, edge_dict): Q = 0.0 # m represents the total wight m = 0 for i in edge_dict.keys(): edge_list = edge_dict[i] for j in xrange(len(edge_list)): l = edge_list[j].strip().split(":") m += string.atof(l[1].strip()) # cal community of every vector #find member in every community community_dict = {} for i in vector_dict.keys(): if vector_dict[i] not in community_dict: community_list = [] else: community_list = community_dict[vector_dict[i]] community_list.append(i) community_dict[vector_dict[i]] = community_list #cal inner link num and degree innerLink_dict = {} for i in community_dict.keys(): sum_in = 0.0 sum_tot = 0.0 #vector num vector_list = community_dict[i] #print "vector_list : ", vector_list #two loop cal inner link if len(vector_list) == 1: tmp_list = edge_dict[vector_list[0]] tmp_dict = {} for link_mem in tmp_list: l = link_mem.strip().split(":") tmp_dict[l[0]] = l[1] if vector_list[0] in tmp_dict: sum_in = string.atof(tmp_dict[vector_list[0]]) else: sum_in = 0.0 else: for j in xrange(0, len(vector_list)): link_list = edge_dict[vector_list[j]] tmp_dict = {} for link_mem in link_list: l = link_mem.strip().split(":") #split the vector and weight tmp_dict[l[0]] = l[1] for k in xrange(0, len(vector_list)): if vector_list[k] in tmp_dict: sum_in += string.atof(tmp_dict[vector_list[k]]) #cal degree for vec in vector_list: link_list = edge_dict[vec] for i in link_list: l = i.strip().split(":") sum_tot += string.atof(l[1]) Q += ((sum_in / m) - (sum_tot / m) * (sum_tot / m)) return Q
def stockCheckInsert(request): try: if isTokenExpired(request): json2Dict = json.loads(request.body) take_stock_date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) identifier = 'TSO-' + take_stock_date[0:10] + '-' if 'warehouseID' in json2Dict: if isValid(json2Dict['warehouseID']): warehouse_id = int(json2Dict['warehouseID']) else: warehouse_id = 0 else: warehouse_id = 0 if 'personID' in json2Dict: if isValid(json2Dict['personID']): person_id = int(json2Dict['personID']) else: person_id = 0 else: person_id = 0 if 'originator' in json2Dict: if isValid(json2Dict['originator']): originator = json2Dict['originator'] else: originator = None else: warehouse_id = None if 'financeReviewer' in json2Dict: if isValid(json2Dict['financeReviewer']): finance_reviewer = json2Dict['financeReviewer'] else: finance_reviewer = None else: finance_reviewer = None if 'managerReviewer' in json2Dict: if isValid(json2Dict['managerReviewer']): manager_reviewer = json2Dict['managerReviewer'] else: manager_reviewer = None else: manager_reviewer = None if 'summary' in json2Dict: if isValid(json2Dict['summary']): summary = json2Dict['summary'] else: summary = None else: summary = None if 'state' in json2Dict: if isValid(json2Dict['state']): state = int(json2Dict['state']) else: state = 0 else: state = 0 if 'printNum' in json2Dict: if isValid(json2Dict['printNum']): print_num = int(json2Dict['printNum']) else: print_num = 0 else: print_num = 0 is_delete = 0 stock = TakeStockOrder(None, take_stock_date, identifier, warehouse_id, person_id, originator, finance_reviewer, manager_reviewer, summary, state, print_num, is_delete) stock.save() stock.identifier = identifier + str(stock.id) stock.save() if 'stockCommodities' in json2Dict: stockCommodities = json2Dict['stockCommodities'] for stockCommodity in stockCommodities: take_stock_order_id = stock.id if 'commoditySpecificationID' in stockCommodity: if isValid(stockCommodity['commoditySpecificationID']): commodity_specification_id = int( stockCommodity['commoditySpecificationID']) else: commodity_specification_id = 0 else: commodity_specification_id = 0 if 'inventoryNum' in stockCommodity: if isValid(stockCommodity['inventoryNum']): inventory_num = int(stockCommodity['inventoryNum']) else: inventory_num = 0 else: inventory_num = 0 if 'realNum' in stockCommodity: if isValid(stockCommodity['realNum']): real_num = int(stockCommodity['realNum']) else: real_num = 0 else: real_num = 0 if 'profitOrLossNum' in stockCommodity: if isValid(stockCommodity['profitOrLossNum']): profit_or_loss_num = int( stockCommodity['profitOrLossNum']) else: profit_or_loss_num = 0 else: profit_or_loss_num = 0 if 'unitPrice' in stockCommodity: if isValid(stockCommodity['unitPrice']): unit_price = atof(stockCommodity['unitPrice']) else: unit_price = 0 else: unit_price = 0 if 'money' in stockCommodity: if isValid(stockCommodity['money']): money = atof(stockCommodity['money']) else: money = 0 else: money = 0 stockCommoditiObj = TakeStockOrderCommodity( None, take_stock_order_id, commodity_specification_id, inventory_num, real_num, profit_or_loss_num, unit_price, money) stockCommoditiObj.save() stockJSON = getStock(stock) stockCheckInsert = setStatus(200, stockJSON) else: return notTokenExpired() except Exception, e: logErr = basic_log.Logger('error') logErr.log(traceback.format_exc()) transaction.rollback() stockCheckInsert = setStatus(500, traceback.format_exc())