def __init__(self,foldrname): self.path = foldrname self.files = g(os.path.join(foldrname,'*')) self.name = "" self.pdbs = [] self.pdbs_nf = [] self.__manifest__()
def energies2dict(prefix): nrg = {} files = g('energies_*.txt') for f in files: res = f[f.find('_') + 1:] res = res[:res.find('_')] nrg[res] = parse_energies(f) p.dump(nrg, open(prefix + '_energies.pckl', 'wb'))
def energies2dict(prefix): nrg={} files=g('energies_*.txt') for f in files: res = f[f.find('_')+1:] res = res[:res.find('_')] nrg[res]=parse_energies(f) p.dump(nrg,open(prefix+'_energies.pckl','wb'))
def main(path): lst = g(path + "*json") d = {} for i in lst: with open(i) as in_put: data = load(in_put) d[i.split('/')[-1].split('.')[0]] = data save_json(d, path) return d
def glob(basepath, expression): """Properly globs even when the base path has globbing metacharacters.""" newbasepath = [] for c in basepath: if c in "*[]?": newbasepath.append("[") newbasepath.append(c) if c in "*[]?": newbasepath.append("]") newbasepath = "".join(newbasepath) return g(os.path.join(newbasepath, expression))
def traverse(self): for cat in [x for x in self.categories]: path = os.path.join(self.path,cat) if (not os.path.isdir(path)): self.categories.remove(cat) else: self.folders[(path,cat)] = [] for path, cat in self.folders: groups = g(os.path.join(path,'group*')) if ('fp' in cat): self.falsepositives.append((path,cat)) else: self.truepositives.append((path,cat)) for group in groups: groupf = sabmarkFolder(group) self.folders[(path,cat)].append(groupf) self.pdbs.extend(groupf.pdbs)
def my_maps(settings, project): shpDir = settings["repository.path"] + "/maps/" + project + "/" shapefiles = g(shpDir + "*.shp") for shp in shapefiles: # try: field = "" tp, dt, yy, mm = "", "", "", "" shp_p = shp.split("/")[-1] print(">>>>>>>>" + shp_p) if "canton" in shp_p: dt = "Cantonal" elif "distrito" in shp_p: dt = "Distrital" if "hazard" in shp_p: tp = "Amenaza" ptype = "hazard" field = "classCode" elif "sensitivity" in shp_p: tp = "Sensibilidad" ptype = "sensitivity" field = "classCode" elif "vulnerability" in shp_p: tp = "Vulnerabilidad" ptype = "vulnerability" field = "comCode" if "vulnerability" in shp_p or "hazard" in shp_p: yy = shp_p.split("-")[-1][:4] mm = shp_p.split("-")[-1][:-4][4:] months = [ "", "Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre", ] title = "%s %s para %s del %s" % (tp, dt, months[int(mm)], yy) elif "sensitivity" in shp_p: title = "Sensibilidad %s" % dt genMaps(shp, shp[:-3] + "jpg", title, field, ptype)
napers=30 a0=1 sep=1 apers=range(a0,sep*napers+a0,sep) apers_string="" for aper in apers: apers_string+=str(aper)+"," apers_string=apers_string[:-1] os.chdir(workdir) parfilename="apercorr.param" paramfile=open(parfilename,"w") paramfile.write("ALPHA_J2000\nDELTA_J2000\nXWIN_IMAGE\nYWIN_IMAGE\nMAG_AUTO \ \nFLUX_RADIUS\nMAG_APER("+str(len(apers))+")\nMAGERR_APER("+str(len(apers))+")\ \nNUMBER\nFLAGS\nELONGATION\nFWHM_IMAGE") paramfile.close() files=sorted(g("v*_sci.fits")) skips=1 for f in files[::skips]: catname=f[:10]+"apercorr.fits" #print " ".join([sextractor,f+"[0]","-PARAMETERS_NAME",parfilename,"-FILTER_NAME",det_filt,"-PHOT_APERTURES",apers_string, #"-CATALOG_TYPE","FITS_LDAC","-CATALOG_NAME",catname]) #s.call([sextractor,f+"[0]","-PARAMETERS_NAME",parfilename,"-FILTER_NAME",det_filt,"-PHOT_APERTURES",apers_string, #"-CATALOG_TYPE","FITS_LDAC","-CATALOG_NAME",catname,"-BACKPHOTO_TYPE","LOCAL"]) hdu=p.open(catname) data=hdu[2].data data=data[data["FLUX_RADIUS"]>1] data=data[data["MAG_AUTO"]<50] od=dc(data) data=data[data["FLAGS"]==0] data=data[data["FLUX_RADIUS"]<2] selected_data=np.sort(data,order="MAG_AUTO")[2:5]
# -*- coding: utf-8 -*- # # 统计当前文件夹下的 js 一共有多少行 from glob import glob as g if __name__ == "__main__": print "lines: %d" % sum(len(open(fn).readlines()) for fn in g("*.js")) raw_input("")
def formSummary(self, fname): data = {} records = self.request.dbsession.execute( "select count(*) as total from %s.maintable" % fname).fetchone() data["count"] = records.total # total de encuestas if data["count"] is 0: return data records = self.request.dbsession.execute( "select count(DISTINCT provincia) as prov,count(DISTINCT canton) as cant,count(DISTINCT distrito) as dist from %s.maintable" % fname).fetchone() # numero de cantones, distritos y provincias distintos data["prov"] = records.prov data["cant"] = records.cant data["dist"] = records.dist # porcentaje de hombres y de mujeres records = self.request.dbsession.execute( "select count(respondentsex) as c, respondentsex as s from %s.maintable group by respondentsex" % fname).fetchall() for r in records: if r.s == "F": data["F"] = "%.1f" % (100 / int(data["count"]) * r.c) else: data["M"] = "%.1f" % (100 / int(data["count"]) * r.c) # tipos de fincas records = self.request.dbsession.execute( "SELECT i_d as id,grow_crops as gc,livestock_owners as lo FROM %s.maintable;" % fname) fa = 0 # fincas agricolas fg = 0 # fincas ganaderas fm = 0 # fincas mixtas na = 0 # ninguna de las anteriores for r in records: if r.gc is "Y" and r.lo is "Y": fm += 1 elif r.gc is "N" and r.lo is "Y": fg += 1 elif r.gc is "Y" and r.lo is "N": fa += 1 else: na += 1 data["fincas"] = { "fm": "%.1f" % (100 / int(data["count"]) * fm), "fg": "%.1f" % (100 / int(data["count"]) * fg), "fa": "%.1f" % (100 / int(data["count"]) * fa), "na": "%.1f" % (100 / int(data["count"]) * na) } # saber cuantas fincas son prestadas, alquiladas o propias records = self.request.dbsession.execute( "select count(m.land_tenure) as ln,l.land_tenure_des as des from %s.maintable as m, %s.lkpland_tenure as l where m.land_tenure = l.land_tenure_cod group by m.land_tenure;" % (fname, fname)) rent = 0 data["own"] = [] for r in records: if "prestada" in r.des: data["own"].append( ["Prestadas", "%.1f" % (100 / int(data["count"]) * r.ln)]) elif "propia" in r.des: data["own"].append( ["Propias", "%.1f" % (100 / int(data["count"]) * r.ln)]) else: rent += r.ln data["own"].append( ["Alquiladas", "%.1f" % (100 / int(data["count"]) * rent)]) records = self.request.dbsession.execute( "select DATE(endtime_auto) mydate, count(*) as total from %s.maintable group by mydate order by mydate;" % fname) data["n_days"] = [] data["days"] = [] for r in records: data["n_days"].append(r.total) data["days"].append([r.mydate, r.total]) data["max"] = max(data["n_days"]) data["mean"] = "%.1f" % (np.mean(data["n_days"])) for m in data["days"]: if m[1] == data["max"]: data["max_date"] = m[0] # calc sdi records = self.request.dbsession.execute( "select m.distrito as d,dc.distrito_des as de, m.crops as c, m.livestock as l from %s.maintable as m, %s.lkpdistrito as dc where m.distrito = dc.distrito_cod;" % (fname, fname)) sdiDict = {} # sdi por distrito sdiGen = {"A": {}, "P": {}, "G": {}} # sdi clasificado distD = {} # distrito data for i in records: if i.d not in distD.keys(): # store code and name for each distrito distD[i.d] = i.de.title() if i.d not in sdiDict.keys(): sdiDict[i.d] = {} if i.c is not None: vals = i.c.split(" ") for v in vals: if v not in sdiDict[i.d].keys(): sdiDict[i.d][v] = 0 sdiGen["A"][v] = 0 sdiDict[i.d][v] += 1 sdiGen["A"][v] += 1 if i.l is not None: vals = i.l.split(" ") for v in vals: if v not in sdiDict[i.d].keys(): sdiDict[i.d][v] = 0 sdiGen["P"][v] = 0 sdiDict[i.d][v] += 1 sdiGen["P"][v] += 1 data["sdi"] = [] for sd in sdiDict: data["sdi"].append([distD[sd], sdi(sdiDict[sd])]) data["sdi"] = sorted(data["sdi"], key=lambda x: int(x[1]), reverse=True) data["sdiA"] = "%.2f" % sdi(sdiGen["A"]) data["sdiP"] = "%.2f" % sdi(sdiGen["P"]) sdiGen["P"].update(sdiGen["A"]) data["sdiG"] = "%.2f" % sdi(sdiGen["P"]) records = self.request.dbsession.execute( "SELECT (SELECT COUNT(DISTINCT livestock) FROM %s.maintable_msel_livestock) + (SELECT COUNT(DISTINCT crops) FROM %s.maintable_msel_crops) AS tot, (SELECT COUNT(lkpc.crop_list_cod) FROM %s.lkpcrop_list AS lkpc)+(SELECT COUNT(lkp.livestock_list_cod) FROM %s.lkplivestock_list AS lkp) AS ind;" % (fname, fname, fname, fname)).fetchone() data["spc"] = "%s / %s" % (str(records.tot), str(records.ind)) # sensibilidad por distrito data records = self.request.dbsession.execute( "SELECT idx.distrito AS cod,lkp.distrito_des AS des, idx.idxdist AS ind FROM %s.lkpdistrito as lkp,%s.idxdist_view as idx WHERE lkp.distrito_cod=idx.distrito ORDER BY ind ASC;" % (fname, fname)) data["sensi"] = {} for row in records: data["sensi"][row.cod] = { "name": row.des, "ind": row.ind, "ndvi": row.ind } # p(data) data["maps_v"] = [] data["maps_h"] = [] imgs = g( self.request.registry.settings.get("repository.path", "") + "maps/%s/*jpg" % fname) for i_path in imgs: if "sensitivity" not in i_path: with open(i_path, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()) title = "" tp, dt, yy, mm = "", "", "", "" shp_p = i_path.split("/")[-1] if "canton" in shp_p: dt = "Cantonal" elif "distrito" in shp_p: dt = "Distrital" if "hazard" in shp_p: tp = "Amenaza" elif "sensitivity" in shp_p: tp = "Sensibilidad" elif "vulnerability" in shp_p: tp = "Vulnerabilidad" yy = shp_p.split("-")[-1][:4] mm = shp_p.split("-")[-1][:-4][4:] months = [ "", "Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre" ] title = "%s %s para %s del %s" % (tp, dt, months[int(mm)], yy) val = [ "data:image/jpg;base64,%s" % encoded_string.decode("utf-8"), i_path.split("/")[-1], title ] if tp == "Amenaza": data["maps_h"].append(val) if tp == "Vulnerabilidad": data["maps_v"].append(val) return data
def __init__(self, dataDir='./base', data_range=(1,300)): from glob import glob as g print("load GGG-dataset start") print(" from: %s"%dataDir) print(" range: [%d, %d)"%(data_range[0], data_range[1])) self.IN_CH = 4 self.dataDir = dataDir self.dataset = [] files = g('./pics.mozaic/*') orgs = list(filter(lambda x:'.org.' in x, files)) heads = [] for org in orgs: heads.append( '.'.join(org.split('.')[1:6]).split('/').pop() ) print( "filename", '.'.join(org.split('.')[1:6]).split('/').pop() ) import json linker_tags = json.loads(open('./linker_tags.json').read()) for k, v in linker_tags.items(): print("linker-key", k) for i in range(data_range[0],data_range[1]): head = heads[i] """ headが入っているのが、jsonのキーにもなる """ print(i, "/", data_range[1] - data_range[0], head) #print("head", head) tagvec = np.array(linker_tags[head + '.jpg']['vector']) #print("tagvec", tagvec) """ meta tag vecを可変にする """ #tagvec = np.repeat(tagvec, 256) #tagvec = np.resize(tagvec, (286, 286)) #np.set_printoptions(threshold=np.nan) tagvec = np.resize(tagvec, (256,256) ) #tagvec = np.repeat(tagvec,256) """ 確実に見える素性にするために、255倍する """ tagvec *= 255 img_path = list(filter(lambda x: head in x and '.org.' in x, files)).pop() lbl_path = list(filter(lambda x: head in x and '.cnv.' in x, files)).pop() img = Image.open(img_path) label = Image.open(lbl_path).convert('RGB') label_org = label w,h = img.size r = 286/min(w,h) # resize images so that min(w, h) == 286 img = img.resize((int(r*w), int(r*h)), Image.BILINEAR) label = label.resize((int(r*w), int(r*h)), Image.NEAREST) img = np.asarray(img).astype("f").transpose(2,0,1)/128.0-1.0 lbl_ = np.array(label) # [0, 12) #frombuffer = Image.frombuffer(data=lbl_, size=(img.shape[1], img.shape[2]), mode='RGB') """ FIX : このパラメータで、メタ情報領域を生成する """ FIX = 1 red, grn, blu = lbl_[:,:,0], lbl_[:,:,1], lbl_[:,:,2] """ ここで、スタンダライゼーションを行う """ #red = (red - red.mean())/red.std() #grn = (grn - grn.mean())/grn.std() #blu = (blu - blu.mean())/blu.std() #>>> inser = np.array([[11, 12], [21, 22]]) #>>> zeros = np.zeros(9).reshape( (3,3) ) #>>> inser #array([[11, 12], #[21, 22]]) # >>> zeros #array([[ 0., 0., 0.], # [ 0., 0., 0.], #[ 0., 0., 0.]]) # >>> zeros[:inser.shape[0], :inser.shape[1]] = inser # >>> zeros # array([[ 11., 12., 0.], # [ 21., 22., 0.], # [ 0., 0., 0.]]) #print(tagvec) t = np.zeros((self.IN_CH, lbl_.shape[0], lbl_.shape[1])).astype('uint8') #print( "red", red.size, red.shape, red) t[0, :, :] = red t[1, :, :] = grn t[2, :, :] = blu t[3, :tagvec.shape[0], :tagvec.shape[1]] = tagvec #print(t[:,:,3]) #w, h, _ = lbl_.shape #frombuffer = Image.frombuffer(data=t, size=(w, h), mode='RGB') #frombuffer.save('test.png') label = np.zeros((self.IN_CH, img.shape[1], img.shape[2])).astype("i") for j, e in [(0, red), (1, grn), (2, blu), (3, tagvec)]: if j == 3: print("Enter meta execution") label[j,:tagvec.shape[0], :tagvec.shape[1]] = tagvec else: label[j,:] = e """ for j in range(self.IN_CH): print("その他の処理です") label[j,:] = label_==j """ self.dataset.append((img,label)) Image.fromarray(t, mode='RGB').save( 'out/preview/test.png' ) """ for i in range(data_range[0],data_range[1]): img = Image.open(dataDir+"/cmp_b%04d.jpg"%i) label = Image.open(dataDir+"/cmp_b%04d.png"%i) w,h = img.size r = 286/min(w,h) # resize images so that min(w, h) == 286 img = img.resize((int(r*w), int(r*h)), Image.BILINEAR) label = label.resize((int(r*w), int(r*h)), Image.NEAREST) img = np.asarray(img).astype("f").transpose(2,0,1)/128.0-1.0 label_ = np.asarray(label)-1 # [0, 12) label = np.zeros((12, img.shape[1], img.shape[2])).astype("i") for j in range(12): label[j,:] = label_==j self.dataset.append((img,label)) """ print("load GGG-dataset done")
def feed(): checkLogin() me = whoAmI() postList = [] commentList = [] replyList = [] #Section for recent posts recentPostList = [] recentCommentList = [] recentReplyList = [] #only grab posts that are recent enough content_path = os.path.join(students_dir, me) myPosts = [ content for content in os.listdir(content_path) if re.search("^\d+.txt$", content) ] for post in myPosts: filename = os.path.join(content_path, post) try: with open(filename) as f: data = f.read() except Exception as e: print("checking posts file", e) self = re.sub('\D', '', post) time = re.search("time: (.*)", data).group(1) t = dt.strptime(time, "%Y-%m-%dT%H:%M:%S+0000") lastMonth = "01/10/2017" t2 = dt.strptime(lastMonth, "%d/%m/%Y") if (t > t2): parent = None sender = re.search("from: (.*)", data).group(1) m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.replace("\\n", "<br \>") message = addTags(message) recentPostList.append((time, parent, self, sender, message)) #only grab comments that are from recent posts myComments = [ content for content in os.listdir(content_path) if re.search("^\d+-\d+\.txt", content) ] parents = [tup[2] for tup in recentPostList] if (parents): for comment in myComments: p = re.search('^(\d+)-(\d+)\.txt$', comment) parent = p.group(1) self = p.group(2) if (parent not in parents): continue filename = os.path.join(content_path, comment) try: with open(filename) as f: data = f.read() except Exception as e: print("checking comments file", e) sender = re.search("from: (.*)", data).group(1) time = re.search("time: (.*)", data).group(1) m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.replace("\\n", "<br \>") message = addTags(message) recentCommentList.append((time, parent, self, sender, message)) #only grab replies that are from comments that are from recent posts myReplies = [ content for content in os.listdir(content_path) if re.search("^\d+-\d+-\d+\.txt$", content) ] parents = [tup[2] for tup in recentCommentList] if (parents): for reply in myReplies: r = re.search('^\d+-(\d+)-(\d+)\.txt$'.reply) parent = r.group(1) self = r.group(2) if (parent not in parents): continue filename = os.path.join(content_path, reply) try: with open(filename) as f: data = f.read() except Exception as e: print("checking comments file", e) sender = re.search("from: (.*)", data).group(1) time = re.search("time: (.*)", data).group(1) m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.replace("\\n", "<br \>") message = addTags(message) recentReplyList.append((time, parent, self, sender, message)) recentPostList.sort(reverse=True) recentCommentList.sort(reverse=False) recentReplyList.sort(reverse=False) #Section for friend's posts friendsPostList = [] friendsCommentList = [] friendsReplyList = [] #get list of friends filename = os.path.join(students_dir, me, "student.txt") try: with open(filename) as f: data = f.read() except Exception as e: print("Getting friends list", e) friendStr = re.search("friends: (.*)", data).group(1) friendStr = friendStr.replace("(", '').replace(")", '').replace(",", '') friends = friendStr.split(" ") #for every single friend for friend in friends: content_path = os.path.join(students_dir, friend) friendPosts = [ content for content in os.listdir(content_path) if re.search("^\d+.txt$", content) ] for post in friendPosts: filename = os.path.join(content_path, post) try: with open(filename) as f: data = f.read() except Exception as e: print("checking posts file", e) self = re.sub('\D', '', post) self = self + "//" + friend #unique identifier for post per friend time = re.search("time: (.*)", data).group(1) parent = None sender = re.search("from: (.*)", data).group(1) m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.replace("\\n", "<br \>") message = addTags(message) friendsPostList.append((time, parent, self, sender, message)) #only grab comments that are from parents friendComments = [ content for content in os.listdir(content_path) if re.search("^\d+-\d+\.txt", content) ] parents = [tup[2] for tup in friendsPostList] if (parents): for comment in friendComments: p = re.search('^(\d+)-(\d+)\.txt$', comment) parent = p.group(1) + "//" + friend self = p.group(2) + "//" + friend if (parent not in parents): continue filename = os.path.join(content_path, comment) try: with open(filename) as f: data = f.read() except Exception as e: print("checking comments file", e) sender = re.search("from: (.*)", data).group(1) time = re.search("time: (.*)", data).group(1) m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.replace("\\n", "<br \>") message = addTags(message) friendsCommentList.append( (time, parent, self, sender, message)) #only grab replies that are from comments that are from friends posts friendReplies = [ content for content in os.listdir(content_path) if re.search("^\d+-\d+-\d+\.txt$", content) ] parents = [tup[2] for tup in friendsCommentList] if (parents): for reply in friendReplies: r = re.search('^(\d)+-(\d+)-(\d+)\.txt$', reply) root = r.group(1) + "//" + friend parent = r.group(2) + "//" + friend self = r.group(3) + "//" + friend if (parent not in parents): continue filename = os.path.join(content_path, reply) try: with open(filename) as f: data = f.read() except Exception as e: print("checking comments file", e) sender = re.search("from: (.*)", data).group(1) time = re.search("time: (.*)", data).group(1) m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.replace("\\n", "<br \>") message = addTags(message) friendsReplyList.append( (time, parent, self, sender, message, root)) #end of friend loop friendsPostList.sort(reverse=True) friendsCommentList.sort(reverse=False) friendsReplyList.sort(reverse=False) #Section for mentions mentionPostList = [] mentionCommentList = [] mentionReplyList = [] #get a list of all files which have you tagged in it others = [student for student in os.listdir(students_dir) if student != me] mention = [] for student in others: paths = os.path.join(students_dir, student) for file in os.listdir(paths): if (file == "student.txt" or file == "img.jpg"): continue #check to see if file mentions you filepath = os.path.join(paths, file) try: with open(filepath) as f: data = f.read() m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) if (message.find(me) > -1): mention.append(filepath) except Exception as e: print("Accessing file", e) #populate set with all files/ (posts/comments/replies) relating to ME # as long as ME is mentioned in it everything must be shown toGlob = set() for mentionPath in mention: mentionPath = re.sub("-\d*?-?\d*?\.txt$", '*', mentionPath) toGlob.add(mentionPath) mention = [] for item in toGlob: for file in g(item): mention.append(file) #for each file, separate them into lists and store as tuple of info for path in mention: try: with open(path) as f: data = f.read() except Exception as e: print("Content file", e) content = re.sub(".*?\/", '', path) person = re.search("(z\d+)", path).group(1) #print(path, content, person) sender = re.search("from: (\w+)", data).group(1) #get zid time = re.search("time: ([\w:\+\-]+)", data).group(1) #get Date m = re.search("message: (.*)", data) message = "" if (m is None) else m.group(1) #get message message = message.replace("\\n", "<br \>") message = addTags(message) parent = None root = None self = re.search("-?(\d+)\.txt$", content).group(1) + "//" + person type = "post" #if its a comment then the parent number is left of slash comment = re.compile('^\d+\-\d+\.txt$') if comment.match(content): type = "comment" parent = re.search("^(\d+)\-\d+\.txt$", content).group(1) parent = parent + "//" + person #if its a reply then the parent number is between two slashes reply = re.compile('^\d+\-\d+\-\d+.txt$') if reply.match(content): type = "reply" m = re.search("(^\d+)\-(\d+)\-\d+.txt$", content) root = m.group(1) + "//" + person parent = m.group(2) + "//" + person element = (time, parent, self, sender, message, root) if (type == "post"): mentionPostList.append(element) elif (type == "comment"): mentionCommentList.append(element) elif (type == "reply"): mentionReplyList.append(element) else: print("ERROR IN CONTENT") mentionPostList.sort(reverse=True) mentionCommentList.sort(reverse=True) mentionReplyList.sort(reverse=True) num = (len(friendsPostList) // 10) + 1 jsList = ["\"group" + str(i) + "\"" for i in range(num)] return render_template('feed.html', recent=recentPostList, recentComment=recentCommentList, recentReply=recentReplyList, friends=friendsPostList, friendsComment=friendsCommentList, friendsReply=friendsReplyList, mention=mentionPostList, mentionComment=mentionCommentList, mentionReply=mentionReplyList, jsList=jsList)
def results(action=None, search=None): checkLogin() results = [] students = {} search = request.form.get('search', '') #print("Searched :","\'"+search+"\'") search = search.lower() if (request.form.get('users')): #get dictionary of all users -- student['name'] = zid for user in os.listdir(students_dir): students[getName(user).lower()] = user for key, value in students.items(): if (re.search(search, key)): results.append(value) return render_template('results.html', search=search, results=results, action="user") if (request.form.get('posts')): postList = [] commentList = [] replyList = [] found = [] #return nothing for empty searches if (search == ''): return render_template('results.html', action="post", search=search, postList=postList, commentList=commentList, replyList=replyList) #return list(s) of posts + children with the search words #get all students for student in os.listdir(students_dir): paths = os.path.join(students_dir, student) for file in os.listdir(paths): if (file == "student.txt" or file == "img.jpg"): continue if (re.search("^\d+\.txt", file) is None): continue #check to see if message matches search query filepath = os.path.join(paths, file) try: with open(filepath) as f: data = f.read() m = re.search("message: (.*)", data) message = "" if m is None else m.group(1) message = message.lower() if (message.find(search) > -1): found.append(filepath) except Exception as e: print("Accessing file", e) #populate set with all files/ (posts/comments/replies) relating to found toGlob = set() for foundPath in found: foundPath = re.sub("\.txt$", '*', foundPath) toGlob.add(foundPath) found = [] for item in toGlob: for file in g(item): found.append(file) #for each file, separate them into lists and store as tuple of info for path in found: try: with open(path) as f: data = f.read() except Exception as e: print("Content file", e) content = re.sub(".*?\/", '', path) person = re.search("(z\d+)", path).group(1) #print(path, content, person) sender = re.search("from: (\w+)", data).group(1) #get zid time = re.search("time: ([\w:\+\-]+)", data).group(1) #get Date m = re.search("message: (.*)", data) message = "" if (m is None) else m.group(1) #get message message = message.replace("\\n", "<br \>") message = addTags(message) parent = None root = None self = re.search("-?(\d+)\.txt$", content).group(1) + "//" + person type = "post" #if its a comment then the parent number is left of slash comment = re.compile('^\d+\-\d+\.txt$') if comment.match(content): type = "comment" parent = re.search("^(\d+)\-\d+\.txt$", content).group(1) parent = parent + "//" + person #if its a reply then the parent number is between two slashes reply = re.compile('^\d+\-\d+\-\d+.txt$') if reply.match(content): type = "reply" m = re.search("(^\d+)\-(\d+)\-\d+.txt$", content) root = m.group(1) + "//" + person parent = m.group(2) + "//" + person element = (time, parent, self, sender, message, root) if (type == "post"): postList.append(element) elif (type == "comment"): commentList.append(element) elif (type == "reply"): replyList.append(element) else: print("ERROR IN CONTENT") postList.sort(reverse=True) commentList.sort(reverse=False) replyList.sort(reverse=False) return render_template('results.html', action="post", search=search, postList=postList, commentList=commentList, replyList=replyList)
def find_files(localtion): l = g(localtion + '/*xlsx') clima = [i for i in l if "VCLIMA" in i][0] new = [i for i in l if "_new-" + localtion.split('/')[-1] in i][0] return [new, clima]
def FoldXrunner(prefix, pdb): # get the PDB into a txt file cat = Popen('ls %s.pdb > PDB.txt' % (pdb), shell=True) cat.wait() # write the repair runner script repair = '''<TITLE>FOLDX_runscript; <JOBSTART>#; <PDBS>#; <BATCH>PDB.txt; <COMMANDS>FOLDX_commandfile; <RepairPDB>#; <END>#; <OPTIONS>FOLDX_optionfile; <Temperature>298; <R>#; <pH>7; <IonStrength>0.050; <water>-CRYSTAL; <metal>-CRYSTAL; <VdWDesign>2; <OutPDB>true; <pdb_hydrogens>false; <END>#; <JOBEND>#; <ENDFILE>#;''' if not os.path.isfile('run_repair_%s.txt' % (pdb)): fname = 'run_repair_%s.txt' % (pdb) fout = open(fname, 'w') fout.write(repair) fout.close() if not os.path.isfile('RepairPDB_%s.pdb' % (pdb)): #run repair runrep = Popen('FoldX -runfile %s' % (fname), shell=True, stderr=PIPE, stdout=PIPE) o, e = runrep.communicate() print o, e if not os.path.isfile('nPDB.txt'): # get the result in a new pdb list file np = Popen('ls RepairPDB_%s.pdb > nPDB.txt' % (pdb), shell=True) np.wait() # get the homologous residues to be mutated eq, chain = parse_landmarks(prefix, pdb) #check if a partial run have been executed enf = g('energies_*') ms = [] if enf: #check if all are done if len(enf) == len(eq): print 'It seems that its been completely done.' #continue else: print 'It seems that it is partially donde... completing.' done = [] for f in enf: res = f[f.find('_') + 1:] res = res[:res.find('_')] done.append(res) for k, v in eq.iteritems(): if not k in done: m = v + chain + k + 'a,' ms.append(m) else: continue else: for k, v in eq.iteritems(): m = v + chain + k + 'a,' ms.append(m) m = None if ms: for m in ms: mutate = '''<TITLE>FOLDX_runscript; <JOBSTART>#; <PDBS>#; <BATCH>nPDB.txt; <COMMANDS>FOLDX_commandfile; <PositionScan>#,%s; <END>#; <OPTIONS>FOLDX_optionfile; <Temperature>298; <R>#; <pH>7; <IonStrength>0.050; <water>-CRYSTAL; <metal>-CRYSTAL; <VdWDesign>2; <OutPDB>false; <pdb_hydrogens>false; <complex_with_DNA> true; <END>#; <JOBEND>#; <ENDFILE>#;''' % (m[:-1]) finame = 'run_mutate%s.txt' % (pdb) outf = open(finame, 'w') outf.write(mutate) outf.close() mut = Popen('FoldX -runfile %s' % (finame), shell=True, stdout=PIPE, stderr=PIPE) o, e = mut.communicate() print o, e
from acstools import calacs from glob import glob as g from os.path import isfile import subprocess as s jref = "/raid/coma/jref/" ljref = "ftp://ftp.stsci.edu/cdbs/jref/" jref_keys = [ 'BPIXTAB', 'CCDTAB', 'OSCNTAB', 'BIASFILE', 'PCTETAB', 'FLSHFILE', 'CRREJTAB', 'SHADFILE', 'PCTETAB', 'DRKCFILE', 'DARKFILE', 'PFLTFILE', 'IDCTAB', 'DFLTFILE', 'LFLTFILE', 'PHOTTAB', 'DGEOFILE', 'MDRIZTAB', 'CFLTFILE', 'SPOTTAB', 'GRAPHTAB', 'COMPTAB', 'IMPHTTAB', 'D2IMFILE', 'NPOLFILE', 'SNKCFILE', 'MLINTAB' ] rawfiles = g("*_raw.fits") for r in rawfiles: hdu = p.open(r) print r head = hdu[0].header for j in jref_keys: f = head[j] if f == "N/A": print "no file for " + j else: f = f.split("$")[1] if not isfile(jref + f): print f, " doesnt exist" s.call(["wget", ljref + f]) s.call(["mv", f, jref + f]) else:
a.set_xlim([-lim, lim]) a.set_yscale("log") a.legend() return maxes f, axarr = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 8)) scimaxes = np.array(plotting(0, dss)) newmaxes = np.array(plotting(1, ds)) plt.savefig("slices_offsets", density=300) diff = scimaxes - newmaxes s = np.argmin(abs(diff)) diffs = diff - abs(diff[s]) * (abs(diff[s]) / diff[s]) print diffs flcs = g("*_flc.fits") ''' for f in flcs: j=0 hdu=p.open(f) head=hdu[0].header exptime=head["EXPTIME"] fig,axarr=plt.subplots(2,3,figsize=(10,5)) vmin=10 vmax=40 bins=np.linspace(vmin,vmax,100) xbins=np.linspace(vmin,vmax,10000) maxes=[] for i,a in enumerate(axarr[:,0]): im=a.matshow(hdu[1+3*i].data,vmin=vmin,vmax=vmax) splits=np.array_split(hdu[1+3*i].data,2,axis=1)
def FoldXrunner(prefix,pdb): # get the PDB into a txt file cat = Popen('ls %s.pdb > PDB.txt'%(pdb),shell=True) cat.wait() # write the repair runner script repair = '''<TITLE>FOLDX_runscript; <JOBSTART>#; <PDBS>#; <BATCH>PDB.txt; <COMMANDS>FOLDX_commandfile; <RepairPDB>#; <END>#; <OPTIONS>FOLDX_optionfile; <Temperature>298; <R>#; <pH>7; <IonStrength>0.050; <water>-CRYSTAL; <metal>-CRYSTAL; <VdWDesign>2; <OutPDB>true; <pdb_hydrogens>false; <END>#; <JOBEND>#; <ENDFILE>#;''' if not os.path.isfile('run_repair_%s.txt'%(pdb)): fname = 'run_repair_%s.txt'%(pdb) fout = open(fname,'w') fout.write(repair) fout.close() if not os.path.isfile('RepairPDB_%s.pdb'%(pdb)): #run repair runrep = Popen('FoldX -runfile %s'%(fname),shell=True,stderr=PIPE,stdout=PIPE) o,e = runrep.communicate() print o, e if not os.path.isfile('nPDB.txt'): # get the result in a new pdb list file np=Popen('ls RepairPDB_%s.pdb > nPDB.txt'%(pdb),shell=True) np.wait() # get the homologous residues to be mutated eq,chain= parse_landmarks(prefix,pdb) #check if a partial run have been executed enf = g('energies_*') ms=[] if enf: #check if all are done if len(enf) == len(eq): print 'It seems that its been completely done.' #continue else: print 'It seems that it is partially donde... completing.' done=[] for f in enf: res = f[f.find('_')+1:] res = res[:res.find('_')] done.append(res) for k, v in eq.iteritems(): if not k in done: m= v + chain + k + 'a,' ms.append(m) else: continue else: for k,v in eq.iteritems(): m= v + chain + k + 'a,' ms.append(m) m=None if ms: for m in ms: mutate = '''<TITLE>FOLDX_runscript; <JOBSTART>#; <PDBS>#; <BATCH>nPDB.txt; <COMMANDS>FOLDX_commandfile; <PositionScan>#,%s; <END>#; <OPTIONS>FOLDX_optionfile; <Temperature>298; <R>#; <pH>7; <IonStrength>0.050; <water>-CRYSTAL; <metal>-CRYSTAL; <VdWDesign>2; <OutPDB>false; <pdb_hydrogens>false; <complex_with_DNA> true; <END>#; <JOBEND>#; <ENDFILE>#;'''%(m[:-1]) finame='run_mutate%s.txt'%(pdb) outf=open(finame,'w') outf.write(mutate) outf.close() mut = Popen('FoldX -runfile %s'%(finame),shell=True,stdout=PIPE,stderr=PIPE) o,e=mut.communicate() print o,e
def quit(): null.close() print("Done.\n") input("Press Enter to exit. . .") exit(0) # Get modpack zip while True: modpack_name = input( "\nWhich pack should be converted?\n(Type 'EUI' to just convert EUI)\n" ) if modpack_name == "EUI": eui_only = True break modpack_zips = g(j(vanilla_packs_path, modpack_name + ".*")) if len(modpack_zips) > 0: modpack_zip = modpack_zips[0] break print("This file doesn't exist, try again.") # Remove previous modpack print("Removing previous modpack leftovers...") if os.path.isdir(modpack_path): shutil.rmtree(modpack_path) if os.path.isdir(eui_path): shutil.rmtree(eui_path) # Compile EUI with colored unlocked citizens if not os.path.isfile(modded_eui_zip_path): print("Creating colored unlocked Citizens EUI...")
vanilla_ui_file_names = [] unit_panel_addon_file_names = [] load_tags = {} # Change working dir to base path os.chdir(base_path) # Get modpack zip while True: modpack_name = input( "\nWhich pack should be converted?\n(Type 'EUI' to just convert EUI)\n" ) if modpack_name == "EUI": eui_only = True break modpack_zips = g(j(vanilla_packs_path, modpack_name + ".*")) if len(modpack_zips) > 0: modpack_zip = modpack_zips[0] break print("This file doesn't exist, try again.") print("") # Remove previous modpack print("Removing previous modpack and EUI leftovers...") if os.path.isdir(modpack_path): shutil.rmtree(modpack_path) if os.path.isdir(eui_folder_path): shutil.rmtree(eui_folder_path) # Compile EUI with colored unlocked citizens if not os.path.isfile(modded_eui_zip_path):
def file_exist(fname): f = g(fname) if fname in f: return True else: return False
"odktype": "select one" }, "crop_irrigated": { "odktype": "select one" }, "fecha_siembra": { "odktype": "date" }, "rowuuid": { "odktype": "" }, } }, } tree = etree.parse(g("*.xml")[0]) root = tree.getroot() err = [] for k in myTables.keys(): table = root.find(".//table[@name='" + k + "']") if table is not None: t_fields = list(myTables[k]["fields"].keys()) for i in table.iterchildren(): if i.tag == "field": if i.attrib["name"] in t_fields: t_fields.remove(i.attrib["name"]) odktype = myTables[k]["fields"][ i.attrib["name"]]["odktype"] if str(i.attrib["odktype"]) != str(odktype):