def test_two_char_chunks(self): # TODO which of these is the desired result? self.assertEqual(set(expand.expand('5-HT2A/2B')), {'5-HT2A', '5-HT2B'}) # grep -P "\d[a-zA-Z]\/\d" ./fails.txt self.assertEqual({'CDKN1A', 'CDKN2A'}, set(expand.expand('CDKN1A/2A'))) # grep -P "\d\/[a-zA-Z]\d" ./successes.txt self.assertEqual({'AdipoR1', 'AdipoR2'}, set(expand.expand('AdipoR1/R2')))
def test_complex_chunks(self): # TODO: the item actually in the data is Wnt-1/3a self.assertEqual({'Wnt1', 'Wnt3a'}, set(expand.expand('Wnt1/3a'))) # TODO can we delete the following one? It's not actually in the data. self.assertEqual({'Wnt1a', 'Wnt3'}, set(expand.expand('Wnt1a/3'))) # TODO here's what we're actually getting: self.assertEqual(set(expand.expand('Wnt-1/3a')), {'Wnt-1', 'Wnt-3a'}) self.assertEqual(set(expand.expand('Wnt-1a/3')), {'Wnt-1a', 'Wnt-3'})
def a_star_search(dis_map, time_map, start, end): path = [] open_set = [] closed_set = [] heappush(open_set, Node(start)) while open_set: q = heappop(open_set) if q.name == end: while q is not None: path.insert(0, q.name) q = q.parent return path for x in expand(q.name, time_map): if time_map[q.name][x] is not None: g_score = time_map[q.name][x] + q.g_score h_score = dis_map[x][end] y = Node(x, q, g_score, h_score) if y in closed_set: continue if y in open_set: if y < open_set[open_set.index(y)]: open_set[open_set.index(y)] = y continue else: continue heappush(open_set, y) heappush(closed_set, q) return path
def __init__(self): with open(self.YAML) as f: yaml_file = dumps(yaml.load(f)) self.api_origin_spec = dumps(yaml.load(yaml_file)) self.api_spec = dumps(expand(yaml.load(yaml_file)))
def modified_expand(self, S, depth): self.visited[S] = True temp1 = deque([S]) while depth > 0: temp2 = [] while len(temp1): C = temp1.popleft() if C == '012345678': temp1.appendleft(C) return temp1 temp3 = filter( lambda x: not self.visited[x] or self.g[x] > self.g[C] + 1, expand(C)) temp3 = list(temp3) for t in temp3: self.g[t] = self.g[C] + 1 self.cameFrom[t] = C self.visited[t] = True temp2.extend(temp3) depth -= 1 temp1 = deque(temp2) return temp1
def get_successors(dis_map, time_map, start, end): successors = [] for x in expand(start.name, time_map): successor_node = Node(x, float('inf'), float('inf'), start) successors.append(successor_node) return successors
def main(label_path: str, option_sheet_path=None, question_sheet_path=None): ori_dict = parse(label_path) option_list = parse_option_sheet(option_sheet_path) ques_list = parse_question_sheet(question_sheet_path) exp_dict = expand(ori_dict, option_list, ques_list) name, ext = os.path.splitext(label_path) exp_label_path = name + '_pprint' + ext json_path = name + '.json' pprint_outfile(exp_dict, exp_label_path) json.dump(exp_dict, open(json_path, 'w'), indent=4)
def a_star_search(dis_map, time_map, start, end): path = [] expandedNode = [] if end not in dis_map or start not in dis_map: return path elif start == end: path.append(end) else: finalPaths = [] q = deque( sorted(searching(time_map, start, dis_map, end, finalPaths, expandedNode), key=lambda k: (k['Cost'], k['Path'][-1]))) queue_iteration(q, time_map, dis_map, end, finalPaths, expandedNode) if finalPaths: path = sorted(finalPaths, key=lambda k: (k['Cost'], k['Path'][-1]))[0]["Path"] for each in expandedNode: expand(each, dis_map) return path
def reconstruction(input_arr): input_arr = input_arr*divide_factor input_arr = input_arr.tolist() reconstruction_arr = [] working_image = input_arr[len(input_arr)-1] for i in xrange(len(input_arr)-2,-1,-1): expanded = ec.expand(working_image,kernel,k) # cv2.imshow(str(i),expanded) working_image = addition(input_arr[i],expanded) reconstruction_arr.append(working_image) return reconstruction_arr
def catch_all(path): target = 'http://compute-1-34:9200/' + path method = request.method data = request.data if 'from' in request.args and request.args['from'] == '0': logfile.write("{0}\t{1}\n".format( str(datetime.now()), json.loads(data)['query']['multi_match']['query'])) if method == 'POST': content = requests.post(target, data = data, params = request.args).content data = expand(data, content) content = requests.post(target, data = data, params = request.args).content res = make_response(content) elif method == 'GET': res = make_response(requests.get(target, data = data, params = request.args).content) else: res = make_response() return res
f = open(file) for line in f.readlines(): if len(line) > 0 and line[0] != '#': hs = line.split() if len(hs) > 0 and hs[0].strip(): hosts.append( (hs[0], "", opts.user) ) if len(args) < 1: for host in hosts: print host[0] print "\n%s hosts" % (len(hosts)) sys.exit(0) cmdline = " ".join(args) elif opts.group_names: hosts = [] for group_name in opts.group_names: groups = expand(group_name) for group in groups: if cfg.has_key('group') and cfg['group'].has_key(group): for host in cfg['group'][group].split(','): hosts.append( (host.strip(), "", opts.user) ) elif os.path.exists(_GETHOST.split(' ')[0]): for host in os.popen("%s %s" % (_GETHOST, group)).read().split(): hosts.append( (host, "", opts.user) ) if len(args) < 1: for host in hosts: print host[0] print "\n%s hosts" % (len(hosts)) sys.exit(0) cmdline = " ".join(args) else: if len(args) < 2:
def test_digit_separators(self): self.assertEqual(set(expand.expand('TLR1,2, 5')), {'TLR2', 'TLR1', 'TLR5'})
] #kernel_multiplier k = 1 / 256.0 divide_factor = 1 current_image = image reduced_arr = [image] while (current_image.shape[0] > 20): current_image = rc.reduce(current_image, kernel, k) reduced_arr.append(current_image) expanded_arr = [] for j in xrange(1, len(reduced_arr)): expanded_image = ec.expand(reduced_arr[j], kernel, k) expanded_arr.append(expanded_image) def difference(G_image1, expanded_image2): rows1, cols1, _ = G_image1.shape rows2, cols2, _ = expanded_image2.shape extra_rows = rows2 - rows1 extra_cols = cols2 - cols1 if (extra_rows > 0 and extra_cols > 0): new_expanded_image2 = np.array( expanded_image2[:-extra_rows, :-extra_cols]) elif (extra_rows > 0 and extra_cols == 0): new_expanded_image2 = np.array(expanded_image2[:-extra_rows, :]) elif (extra_rows == 0 and extra_cols > 0): new_expanded_image2 = np.array(expanded_image2[:, :-extra_cols])
def save_ascii(self,pha=True,bra=True,tra=True,mra=True,hk=True,cmd=True): """ Extract data and save to ascii files The data products are set to True -> write or False -> do not write pha -> PHA data. Extensions ph0,ph1,ph2,ph3 are use for the 4 priority ranges bra -> Base rate files. All sectors for all 4 PHA priorities. tra -> Trigger rates. Start (fsr) and ssd trigger rates. Coincidences: double (dcr), and triple (tcr). Proton and Helium rates from auxiliary channel. (One rate for each sector and epq) mra -> Several matrix rates, which have been filled by onboard classification of PHA data into M/MpQ Boxes (NOTE: No idea how good any of theses boxes work!!!) hk -> Housekeeping data. 25 entries per epq step. Not all of these entries are identified? cmd -> A set of 11 commandable values (per 12 minute cycle). Almost constant. Up to now the only relevant quantity is the PAPS level, which changed in 2000. Which means that individual calibrations are needed for the time before and after the switch. """ if not self.checkdir_structure(pha=pha,bra=bra,tra=tra,mra=mra,hk=hk,cmd=cmd): return False if pha: phaout = [] for rng in range(4): if rng in self.prange: phaout.append(open(self.wpath+"/%.4i/pha/%.3i.ph%.1i"%(self.year,self.doy,rng),"w")) phaout[-1].write("year\ttime\t\tdoy\t\teoq\ttof\tesd\tdid\t\trng\t\tsec\t\tmas\tmoq\ttwgt\t\tswgt\n") phaout[-1].write("[year]\t[secs1970]\t[DayOfYear]\t[step]\t[CH]\t[CH]\t[detector]\t[priority]\t[sector]\t[CH]\t[CH]\t[totalweight]\t[sectorweight]\n") else: phaout.append(False) if bra: braout = open(self.wpath+"/%.4i/bra/%.3i.bra"%(self.year,self.doy),"w") braout.write("year\ttime\t\tdoy\t\teoq\trng\tbr0\tbr1\tbr2\tbr3\tbr4\tbr5\tbr6\tsbr7\tbrall\n") braout.write("[year]\t[secs1970]\t[DayOfYear]\t[step]\t[range]\t[sec0]\t[sec1]\t[sec2]\t[sec3]\t[sec4]\t[sec5]\t[sec6]\t[sec7]\t[allsecs]\n") if tra: traout = open(self.wpath+"/%.4i/tra/%.3i.tra"%(self.year,self.doy),"w") traout.write("year\ttime\t\tdoy\t\teoq\tsec\tHeaux\tHaux\tssd\ttcr\tdcr\tfsr\tepq\tepq_aux\n") traout.write("[year]\t[secs1970]\t[DayOfYear]\t[step]\t[sect]\t[/1.5s]\t[/1.5s]\t[/1.5s]\t[/1.5s]\t[/1.5s]\t[/1.5s]\t[keV/e]\t[keV/e]\n") if mra: mraout = open(self.wpath+"/%.4i/mra/%.3i.mra"%(self.year,self.doy),"w") mraout.write("year\ttime\t\tdoy\t\teoq\tsec\tmr0\tmr1\tmr2\tmr3\tmr4\tmr5\tmr6\tmr7\n") mraout.write("[year]\t[secs1970]\t[DayOfYear]\t[step]\t[sect]\t[H+]\t[He2+]\t[He+]\t[O7+]\t[O6+]\t[Fe]\t[Si]\t[Err]\t'Sector 8 means sum of all sectors'\n") if hk: hkout = open(self.wpath+"/%.4i/hk/%.3i.hk"%(self.year,self.doy),"w") hkout.write("year\ttime\t\tdoy\t\teoq\tGV28\tGV20\tGV10\tGV5P\tGV5M\tSPR1\tGTE1\tGTE2\tGPI1\tGPI2\tGI5P\tGI20\tGPAV\tSPR2\tGEPQ\tSPR3\tPAC7\tPTE1\tPTE2\tPV5P\tPI5P\tPV5M\tPI5M\tPMCP\tPGAI\n") hkout.write("[year]\t[secs1970]\t[DayOfYear]\t[step]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[degC]\t[degC]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[kV]\t[Byte]\t[V]\t[Byte]\t[Byte]\t[degC]\t[degC]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[kV]\t[Byte]\tAll units are just guessed!!!\n") if cmd: cmdout = open(self.wpath+"/%.4i/cmd/%.3i.cmd"%(self.year,self.doy),"w") cmdout.write("year\ttime\t\tdoy\t\tTrigger_mode\tMCP_level\tT_Cal\tE_Cal\tE_Cal\tTAC_slope\tSSD_enables\tDPPS_Level\tPAPS_Level\tHV_enables\n") cmdout.write("[year]\t[secs1970]\t[DayOfYear]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\t[Byte]\n") nxtcycexp = False for cyc in range(self.TIME.shape[0]): if (self.TIME[cyc][0] > self.t0) * (self.TIME[cyc][0] < self.t1): if not nxtcycexp: expcyc=SBCYC() expand(self.CYC[cyc],self.TIME[cyc],self.QAC[cyc],expcyc) cyctime = self.TIME[cyc][0] # All PHAs of one cycle get the starting time of the cycle as time cycdoy = time.gmtime(cyctime) cycdoy = cycdoy.tm_yday+cycdoy.tm_hour/24.+cycdoy.tm_min/(24.*60.)+cycdoy.tm_sec/(24.*60.*60.) if pha: self.write_pha(expcyc,cyctime,cycdoy,phaout,laststep = False) if bra: self.write_bra(expcyc,cyctime,cycdoy,braout,laststep = False) if tra: self.write_tra(expcyc,cyctime,cycdoy,traout,laststep = False) if mra: self.write_mra(expcyc,cyctime,cycdoy,mraout,laststep = False) if hk: self.write_hk(expcyc,cyctime,cycdoy,hkout,laststep = False) if cmd: self.write_cmd(expcyc,cyctime,cycdoy,cmdout) if (cyc+1 < self.TIME.shape[0]): if ((self.TIME[cyc+1][0] - self.TIME[cyc][0])>717) * ((self.TIME[cyc+1][0] - self.TIME[cyc][0])<723) : expcyc=SBCYC() expand(self.CYC[cyc+1],self.TIME[cyc+1],self.QAC[cyc+1],expcyc) nxtcycexp = True if pha: self.write_pha(expcyc,cyctime,cycdoy,phaout,laststep = True) if bra: self.write_bra(expcyc,cyctime,cycdoy,braout,laststep = True) if tra: self.write_tra(expcyc,cyctime,cycdoy,traout,laststep = True) if mra: self.write_mra(expcyc,cyctime,cycdoy,mraout,laststep = True) if hk: self.write_hk(expcyc,cyctime,cycdoy,hkout,laststep = True) else: nxtcycexp = False if pha: for f in phaout: if f: f.close() if bra: braout.close() if tra: traout.close() if mra: mraout.close() if hk: hkout.close() if cmd: cmdout.close()
def menu(): filter = 5 while int(filter) != 4: print("\nWhat do you want to do?") for n in dict_operations: print(str(n) + " - " + str(dict_operations[n])) filter = input() try: filter = int(filter) except: print("Error") if filter == 0: from join_letterboxd_file import join_letterboxd join_letterboxd() from lbd import letterboxd letterboxd() from expand import expand expand() if filter == 1: from stats_menu import stats_menu import pandas as pd db = pd.read_csv("output/database.csv", low_memory=False) db = pd.DataFrame(db) diary = pd.read_csv("input/diary.csv") diary = pd.DataFrame(diary) diary = diary['Watched Date'].str.split("-", expand=True) diary = (diary.iloc[:, 0]) watched = pd.read_csv("input/watched.csv") watched = pd.DataFrame(watched) stats_menu(db, watched, diary) if filter == 2: year_filter = input("\nWhat year? ") from stats_menu import stats_menu_watching import pandas as pd diary = pd.read_csv("input/diary.csv") diary = pd.DataFrame(diary) diary = diary[diary['Watched Date'].str.startswith(year_filter)] #diary = (diary.iloc[:, 0]) watched = pd.read_csv("input/watched.csv") watched = pd.DataFrame(watched) db2 = pd.merge(diary, watched, on=['Name', 'Year']) watched = db2[['Date_x', 'Name', 'Year', 'Letterboxd URI_y']] watched.columns = ['Date', 'Name', 'Year', 'Letterboxd URI'] db = pd.read_csv("output/database.csv", low_memory=False) db = pd.DataFrame(db) db = db[db['watched'].notna()] db = db[db['watched'].str.startswith(year_filter)] stats_menu_watching(db, watched, diary, year_filter) if filter == 3: year_filter = input("\nWhat year? ") from stats_menu import stats_menu_release import pandas as pd db = pd.read_csv("output/database.csv", low_memory=False) db = pd.DataFrame(db) db = db[db['year'].notna()] db = db[db['year'].str.startswith(year_filter)] diary = pd.read_csv("input/diary.csv") diary = pd.DataFrame(diary) diary = diary[diary['Watched Date'].str.startswith(year_filter)] watched = pd.read_csv("input/watched.csv") watched = pd.DataFrame(watched) stats_menu_release(db, watched, diary) if filter == 4: try: from reformat_in_imdb import reformat_imdb reformat_imdb() except: print("\nError, check that you have built the main DB") pass if filter == 5: from affinity import affinity user1 = input("Enter first username: "******"Enter second username: ") affinity(user1, user2) pass if filter == 6: break pass
def test_simple_chunks(self): self.assertEqual(set(expand.expand('WNT1/HCK3/ABC')), {'WNT1', 'HCK3', 'ABC'})
def test_mixed_chunks(self): self.assertEqual(set(expand.expand('WNT1/HCK1-3/ABC')), {'WNT1', 'HCK1', 'HCK2', 'HCK3', 'ABC'})
def a_star_search(dis_map, time_map, start, end): path = [] tree = {} access = {} expanded = {} detect = {} following = [start,0,dis_map[start][end]] found = False priority_dict = [] agla_no = [] while(found ==False): ij = following[0] #print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~PEHLA ij is --> {}".format(ij)) agla_no.append(ij) access[following[0]] = following[1] #print("visited is --->{}".format(access)) #print("detect is ----->{}".format(detect)) #print("score is ---->{}".format(tree)) #print("visited is ---->{}".format(access)) #print("expanded is -----> {}".format(expanded)) if ij in detect: #print("LOL checking if ij in detect, if present popping. Current detect ---> {}".format(detect)) detect.pop(ij,None) #print("LOL checking if ij in detect, if present popping. After Pop detect ---> {}".format(detect)) #print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&The current path is -----> {}".format(path)) temp_dict = {} #print("temp_dict before checking into time_map --> {}".format(temp_dict)) for p in time_map[ij]: if (time_map[ij][p]!= None): temp_dict[p] = following[1]+time_map[ij][p]+dis_map[p][end] #print("current value of p is ------>{}".format(p)) #print("current value of ij is ------>{}".format(ij)) #print("AFTER UPDATE temp_dict ---> {}".format(temp_dict)) tree[ij] = temp_dict #print("updated score dictionary is ---> {}".format(tree)) #print("updated temp_dict dictionary is ----> {}".format(temp_dict)) #print("!! updated detect dictionary is ---> {}".format(detect)) #print("!! updated visited dictionary is --->{}".format(access)) #print("Current temp_dict is -> {}".format(temp_dict)) #print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ LOOP STARTING @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@") for p in temp_dict: if p in detect: #print("# James Bond") #print("@@@@@ current p is -> {}".format(p)) if (access[ij]+time_map[ij][p]+dis_map[p][end] < detect[p]): detect[p] = access[ij]+time_map[ij][p]+dis_map[p][end] expanded[p] = access[ij]+time_map[ij][p] #print("james bond loop updates the detect to --> {}".format(detect)) ik = min(detect, key = detect.get) following = [ik, detect[ik]-dis_map[ik][end], dis_map[ik][end]] #print("minimum of ik in james bond loop is -->{}".format(ik)) #print("next best in james bond loop is -->{}".format(following)) if ((p not in access) and (p not in detect)): #print("$$$$$$$$$$$$$ LOOP mein ghusa hain -----> {}".format(p)) if not detect: detect[p] = access[ij]+time_map[ij][p]+dis_map[p][end] #print("INLOOP detect is updated to ---> {}".format(detect[p])) expanded[p] = access[ij]+time_map[ij][p] #print("INLOOP expanded is updated to ---> {}".format(expanded[p])) #print("AFTER INLOOP detect is --> {}".format(detect)) ik = min(detect, key = detect.get) #print("INLOOP ik updated to --> {}".format(ik)) i_j = access[ij]+time_map[ij][p]+dis_map[p][end] #print("** After choosing minimum i_j is now ---> {}".format(i_j)) if(i_j > detect[ik]): #print("# PAUL WALKER") following = [ik, expanded[ik], dis_map[ik][end]] #print("next best via paul walker is --> {}".format(following)) detect[p] = i_j expanded[p] = access[ij]+time_map[ij][p] #print("the expanded list in PAUL WALKER is ----> {}".format(expanded)) #print("its length is ----> {}".format(len(expanded))) if (i_j < detect[ik]): #print("# VIN DIESEL") following = [p, access[ij]+time_map[ij][p], dis_map[p][end]] #print("next best via vin diesel is --> {}".format(following)) detect[p] = i_j expanded[p] = access[ij]+time_map[ij][p] if (i_j == detect[ik]): #print("WALHALLA") #print("The visited node for the condition is -->{}".format(access[ij])) #print("The time from visited node to p is --->{}".format(time_map[ij][p])) #print("The detect value is --->{}".format(detect[ik])) #print("The dismap value is --->{}".format(dis_map[ik][end])) if (access[ij]+time_map[ij][p] < detect[ik] - dis_map[ik][end]): following = [p, access[ij]+time_map[ij][p], dis_map[p][end]] #print("NEXT BEST is updated to from <if> --> {}".format(following)) else: following = [ik, expanded[ik], dis_map[ik][end]] #print("NEXT BEST is updated to from <else>--> {}".format(following)) if (p == end): #print("#teja","p is ---> {}".format(p)) #print("^^^FINAL visited is --->{}".format(access)) #print("^^^FINAL detect is ----->{}".format(detect)) #print("^^^FINAL score is ---->{}".format(tree)) #print("^^^FINAL expanded is -----> {}".format(expanded)) found = True break path = get_path(tree,start,end) #print("@ path is @ --> {}".format(path)) #print("@ length of expanded is ---> {}".format(len(expanded))) #print("@ length of visited is ---> {}".format(len(access))) #print("@ length of scores is ----> {}".format(len(tree))) #print("@length of detect is -----> {}".format(len(detect))) #print("@length of agla_no is ---> {}".format(len(agla_no))) for k,v in expanded.items(): a0 = v a11 = k for k,v in detect.items(): b0 = v b11 = k if a0 == b0: for e in expanded: expand(e,dis_map) else: expanded.pop(a11) for e in expanded: expand(e,dis_map) return path
def expand(PGL, d=0, genome={}): """Takes a PyGL object and returns a PyGL object resulting from the expand operation""" args = {'d': d} return _expa.expand(PGL, args, genome)
def test_single_char_chunks(self): # TODO which of these is the desired result? self.assertEqual(set(expand.expand('KDM6A/B')), {'KDM6A', 'KDM6B'}) self.assertEqual({'CDK25C', 'CDK25B'}, set(expand.expand('CDK25B/C')))
from decoded import * from bittools import * from expand import expand from Scientific.IO import NetCDF phas = [] for doy in range(1, 2): filein = NetCDF.NetCDFFile( "/data/ivar/berger/acedata/2007/2007%.3i.nc" % (doy), "r") cyc = filein.variables['cyc'][:] #cyc=vectorize(uint8)(cyc) cyc2 = cyc.view(uint8) QAC = filein.variables["QAC"][:] time = filein.variables["time"][:] expcyc = SBCYC() expand(cyc2[0], time[0], QAC[0], expcyc) for i in range(1, 2): try: expcyc = SBCYC() expand(cyc2[i], time[i], QAC[i], expcyc) print "Doy,EDB : " + str(doy) + "," + str(i) for step in range(60): for nrpha in range(expcyc.asxNof[step]): phas.append([ step, expcyc.asxtof[step, nrpha], expcyc.abxesd[step, nrpha], expcyc.abxdid[step, nrpha], expcyc.abxrng[step, nrpha], expcyc.abxsec[step, nrpha], expcyc.afxmas[step, nrpha], expcyc.afxmoq[step, nrpha], expcyc.afxwgt[step, expcyc.abxrng[step, nrpha]] ])
def a_star_search(dis_map, time_map, start, end): path = [] ansdict = [] ansdict.append({ 'cur': start, 'his': [start], 'curdis': getdis(dis_map, start, end), 'pasttime': 0, 'sum': getdis(dis_map, start, end) + 0 }) closedlist = {} while len(ansdict): # resort the ansdict ansdict = sorted(ansdict, key=lambda e: e.__getitem__('cur')) ansdict = sorted(ansdict, key=lambda e: e.__getitem__('sum')) # pop the node with min sum currnode = isend(ansdict, end) ansdict.remove(currnode) # if currnode == end, return path if currnode['cur'] == end: path = currnode['his'] break # if currnode already in closedlist if currnode['cur'] in closedlist.keys(): if currnode['sum'] >= closedlist[currnode['cur']]: continue else: closedlist[currnode['cur']] = currnode['sum'] else: closedlist[currnode['cur']] = currnode['sum'] # find the record of dismap temp = expand(currnode['cur'], time_map) for item in temp: if item in currnode['his']: continue currnode1 = copy.deepcopy(currnode) temphis = currnode1['his'] temphis.append(item) tempsum = currnode['pasttime'] + getdis( time_map, currnode['cur'], item) + getdis(dis_map, item, end) # if item in closedlist or not if item in closedlist.keys() and tempsum >= closedlist[item]: continue # if item in ansdict(=opencode)or not index = -1 for i in range(0, len(ansdict)): if ansdict[i]['cur'] == item: index = i # if item is in opencode(ansdict) if index != -1: # item's sum < opennode's, exchange them if tempsum < ansdict[index]['sum']: ansdict[index] = { 'cur': item, 'his': temphis, 'curdis': getdis(dis_map, item, end), 'pasttime': currnode['pasttime'] + getdis(time_map, currnode['cur'], item), 'sum': tempsum } # item's sum >= opennode's, break else: continue # if item is not in opencode(ansdict) else: ansdict.append({ 'cur': item, 'his': temphis, 'curdis': getdis(dis_map, item, end), 'pasttime': currnode['pasttime'] + getdis(time_map, currnode['cur'], item), 'sum': tempsum }) return path
def test_first(self): self.assertEqual(expand(123), "100 + 20 + 3")
def a_star_search(dis_map, time_map, start, end): path = [] # be sure to call the imported function expand to get the next list of nodes open_nodes = [] closed_nodes = [] node_expansion_dict = {} node_path_dict = {} # check if start or end in the map if start not in dis_map or end not in dis_map: return path # initialization node = [dis_map[start][end], start] minheap.heappush(open_nodes, node) node_path_dict[start] = tuple(path) while open_nodes[0][1] != end: # choose a node with minimum expected cost # break ties alphabetically if nodes have the same expected cost node = minheap.heappop(open_nodes) path = list(node_path_dict[node[1]]) path.append(node[1]) # expand this node to get the next list of nodes if node[1] in node_expansion_dict: next_node_list = node_expansion_dict[node[1]] else: # if cost of node is not available # choose another node with minimum expected cost if node[1] not in time_map: closed_nodes.append(node[1]) continue next_node_list = expand(node[1], time_map) node_expansion_dict[node[1]] = next_node_list for next_node in next_node_list: # if the expanded node not in closed_nodes # and its heuristic cost is available (in dis_map) if next_node not in closed_nodes and next_node in dis_map: # evaluate total cost g_cost = (node[0] - dis_map[node[1]][end]) + time_map[node[1]][next_node] h_cost = dis_map[next_node][end] total_cost = g_cost + h_cost for i in range(len(open_nodes)): if next_node == open_nodes[i][1]: # if the node's total cost can be updated if total_cost < open_nodes[i][0]: # update total cost open_nodes[i][0] = total_cost # update path node_path_dict[next_node] = tuple(path) # maintain heap structure minheap.heapify(open_nodes) break else: # push the expanded node in open_nodes minheap.heappush(open_nodes, [total_cost, next_node]) # if path to the expanded node does not exist if next_node not in node_path_dict: node_path_dict[next_node] = tuple(path) # add used node to closed_nodes closed_nodes.append(node[1]) # if there is nowhere to go before reaching end if len(open_nodes) == 0: return [] # append the final node to path node = minheap.heappop(open_nodes) closed_nodes.append(node[1]) path = list(node_path_dict[node[1]]) path.append(node[1]) return path
def parse(inport): "Parse a program: read and expand/error-check it." # Backwards compatibility: given a str, convert it to an InPort if isinstance(inport, str): inport = InPort(StringIO.StringIO(inport)) return expand(read(inport), toplevel=True)
def save_bin(self,pha=True,bra=True,tra=True,mra=True): if pha: self.phal = [] if bra: self.bral = [] if tra: self.tral = [] if mra: self.mral =[] nxtcycexp = False for cyc in range(self.TIME.shape[0]): if (self.TIME[cyc][0] > self.t0) * (self.TIME[cyc][0] < self.t1): if not nxtcycexp: expcyc=SBCYC() expand(self.CYC[cyc],self.TIME[cyc],self.QAC[cyc],expcyc) cyctime = self.TIME[cyc][0] # All PHAs of one cycle get the starting time of the cycle as time cycdoy = time.gmtime(cyctime) cycdoy = cycdoy.tm_yday+cycdoy.tm_hour/24.+cycdoy.tm_min/(24.*60.)+cycdoy.tm_sec/(24.*60.*60.) if pha: self.extr_pha(expcyc,cyctime,cycdoy,laststep = False) if bra: self.extr_bra(expcyc,cyctime,cycdoy,laststep = False) if tra: self.extr_tra(expcyc,cyctime,cycdoy,laststep = False) if mra: self.extr_mra(expcyc,cyctime,cycdoy,laststep = False) if (cyc+1 < self.TIME.shape[0]): if ((self.TIME[cyc+1][0] - self.TIME[cyc][0])>717) * ((self.TIME[cyc+1][0] - self.TIME[cyc][0])<723) : expcyc=SBCYC() expand(self.CYC[cyc+1],self.TIME[cyc+1],self.QAC[cyc+1],expcyc) nxtcycexp = True if pha: self.extr_pha(expcyc,cyctime,cycdoy,laststep = True) if bra: self.extr_bra(expcyc,cyctime,cycdoy,laststep = True) if tra: self.extr_tra(expcyc,cyctime,cycdoy,laststep = True) if mra: self.extr_mra(expcyc,cyctime,cycdoy,laststep = True) else: nxtcycexp = False if pha: self.phal = array(self.phal) phaout = open(self.wpath+"/%.4i/%.3i.pha.npz"%(self.year,self.doy),"w") save(phaout,self.phal) phaout.close() if bra: self.bral = array(self.bral) braout = open(self.wpath+"/%.4i/%.3i.bra.npy"%(self.year,self.doy),"w") save(braout,self.bral) braout.close() if tra: self.tral = array(self.tral) traout = open(self.wpath+"/%.4i/%.3i.tra.npy"%(self.year,self.doy),"w") save(traout,self.tral) traout.close() if mra: self.mral = array(self.mral) mraout = open(self.wpath+"/%.4i/%.3i.mra.npy"%(self.year,self.doy),"w") save(mraout,self.mral) mraout.close()
def test_not_increasing_slash(self): self.assertEqual(set(expand.expand('TLR2/1')), {'TLR2', 'TLR1'}) self.assertEqual(set(expand.expand('TLR2/6')), {'TLR2', 'TLR6'})
def test_memory_leak(self): self.assertEqual(set(expand.expand('4000-3295')), {'4000-3295'}) self.assertEqual(set(expand.expand('3VIT19-001490329014')), {'3VIT19-001490329014'})
def a_star_search(dis_map, time_map, start, end): path = [] # TODO Put your code here. # be sure to call the imported function expand to get the next list of nodes #Time taken to reach each node time_taken_for_each_node = {} time_taken_for_each_node[start] = 0 #Inititalize an empty visited set - visited = set() #Initialize the priority queue by adding the start element to the queue with weight zero priority_queue = PriorityQueue() priority_queue.add_node([start], 0) #Check if the start and end is not in the time map then return the path if start not in time_map and end not in time_map: return path #Check if the start and end is not in the dis map if start not in dis_map and end not in dis_map: return path #Check if the start is in the time map and end not in dis map if start in time_map and end not in dis_map: return path #Check if the end is in the time map but not in dis map if end in time_map and start not in dis_map: return path #Check if the keys are present in all if dis_map.keys() != time_map.keys(): return path #Nodes added # #If queue is not empty run a loop and keep popping node while priority_queue.empty() == False: #pop the element and add it to the set of visited along with the time current_node_tuple = priority_queue.pop_node() # current_node = current_node_tuple[0] # print("CURRENT NODE : ".format(current_node_tuple[1])) #Check if the node has already been visited if not then add to visited if current_node_tuple[1][-1] not in visited: visited.add(current_node_tuple[1][-1]) #check if the current is the destination if yes then return if current_node_tuple[1][-1] == end: path = current_node_tuple[1] # print("Found the path") return path #Else get all the neighbors of current node from expand #Push onto the heap using heuristics of distance for node in expand(current_node_tuple[1][-1], time_map): # print("Node {} in {} ".format(node,current_node_tuple[1][-1])) # print("") #Get the cost of going to the node time_cost = time_map[current_node_tuple[1][-1]][node] dist_cost = dis_map[node][end] cost_till_now = time_taken_for_each_node[current_node_tuple[1] [-1]] new_cost = time_cost + cost_till_now priority_in_queue = new_cost + dist_cost # #Check if node is present in the time taken dictionary if it isn't add # #if it is then update time if node not in time_taken_for_each_node.keys( ) or time_taken_for_each_node[node] > new_cost: time_taken_for_each_node[node] = new_cost # print("node {}".format(node)) # print("time {} dist {} newcost {}".format(time_cost,dist_cost, new_cost)) # #add to heap temp = current_node_tuple[1].copy() temp.append(node) priority_queue.add_node(temp, priority_in_queue) return path #"Path can't be found Willie"
def test_second(self): self.assertEqual(expand(605030), "600000 + 5000 + 30")
def test_digit_chunks(self): self.assertEqual(set(expand.expand('WNT9/10')), {'WNT9', 'WNT10'}) self.assertEqual(set(expand.expand('WNT10/11')), {'WNT10', 'WNT11'}) self.assertEqual(set(expand.expand('WNT9-11')), {'WNT9', 'WNT10', 'WNT11'}) self.assertEqual(set(expand.expand('WNT10-11')), {'WNT10', 'WNT11'})
def test_comma_case(self): self.assertEqual(set(expand.expand('MEKK1-3,')), {'MEKK1', 'MEKK2', 'MEKK3'})
def __init__(self, X, Y, draw_graph=False): # PLOT THE BEST ERROR OVER TIME! # LEARN EXPANSION 'RULES' based on HOW MUCH THE ERROR IMPROVES # Log guesses logfile = open("./guesses", 'w') self.X = X self.Y = Y self.THRESH = .01 self.MAX_DEPTH = 10; self.guesses = 5 # Number of guesses per fit ''' For plotting purposes: ''' self.best_err = [] exptree = ExpTree() init_consts, init_score = self.score(exptree, exptree.constants) init_state = AStar.AStarState(exptree, init_score, 0, init_consts) ''' Minimum erro expression, in case we hit max_iter: ''' min = init_state self.states = PriorityQueue() self.states.put((1, init_state)) iter = 0 while True: if iter > 100: print "Hit max iterations. Best so far: " + str(child.root.collapse()) print "\tError: " + str(score) print "\tFitted constants: " + str(fit_constants) break iter += 1 # Choose state to expand: try: state_to_expand = self.states.get(False)[1] except: print "Could not find any more states to expand" break expr_to_expand = state_to_expand.exptree expr_str = str(expr_to_expand.root.collapse()) self.best_err.append(state_to_expand.score) print "EXPANDING:", expr_str, state_to_expand.fit_consts, state_to_expand.score ''' ++++++++++++++++++++++++++++++++++ This is where the expansion happens: +++++++++++++++++++++++++++++++++++ ''' children = expand.expand(expr_to_expand) #children = expand.expand_two_levels(expr_to_expand) ''' +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ''' temp = set() for child in children: logfile.write(expr_str + '\t' + str(child.root.collapse())) logfile.write('\n') logfile.flush() try: fit_constants, score = self.score(child.root.collapse(), child.constants) child_depth = state_to_expand.depth + 1 if score < self.THRESH: self.best_err.append(score) print "Found solution! " + str(child.root.collapse()) print "\tError: " + str(score) print "\tFitted constants: " + str(fit_constants) print return if child_depth >= self.MAX_DEPTH: print "Hit maximum depth" continue new_state = AStar.AStarState(child, score, child_depth, fit_constants) temp.add(new_state) ''' Keeping track of the min state: ''' if score < min.score: min = new_state except: print '\t' + str(child.root.collapse()) + "FAILED" for new_state in temp: print '\t', new_state.exptree.root.collapse(), "SCORE", new_state.score self.states.put((score, new_state))