def load_cache_file_into_mem(self, cache_file_abs_path, params, store_allKeys=False, msg=''): '''cache_file assumed to be a dict of k:list, with header, dtype, keyPos info. ''' if self.my_platform == "mac": single_server = True # not on cluster else: single_server = False assert params["engine"] == "redis" Force_Redo = ("overwrite_memory" in params and params["overwrite_memory"]) with AccessRestrictionContext(prefix=cache_file_abs_path, no_restriction=single_server) as lock: lock.Access_Or_Wait_And_Skip("load_cache_file_into_mem") if iprint: print("load_cache_file_into_mem params", params) self.mm = Mem(params) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if tmp == 1 and tmp2 == 1 and not Force_Redo: if iprint: print("Redis already cached, skip...") return print("pickle.load %s ..." % cache_file_abs_path) da = pickle.load(open(cache_file_abs_path, "rb")) if iprint >= 2: try: print(" - Info - ", da["header"], da["dtype"], da["keyPos"]) except: print(" - missing keyPos or header or dtype -") if not Force_Redo: self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 1) time.sleep(0.1) # redis checks expired at freq 10hz. allKeys = [] if "header" in da: header = da.pop("header") self.mm.set("header", header) if "dtype" in da: dtype = da.pop("dtype") self.mm.set("dtype", dtype) if "keyPos" in da: keyPos = da.pop("keyPos") self.mm.set("keyPos", keyPos) cnt, thresh = 0, 1024 * 8 for k, v in da.items(): self.mm.set(k, v) cnt += 1 if store_allKeys: allKeys.append(k) if iprint and cnt >= thresh: thresh *= 2 print("mm key cnt %d. %s" % (cnt, msg)) if store_allKeys: self.mm.set("allKeys", allKeys) self.mm.set(End_Flag_Key, 1) if iprint: print("loaded cache key num: %d" % cnt)
def invalidate_mem( self): # Mark as invalid. force reload/re-yield k,v into mem try: meta = pickle.load(open(self.meta_file_abs_path, 'rb')) except: # TODO not using json meta = json.load(open(self.meta_file_abs_path, 'rb')) params = pickle.loads(str(meta["params"])) assert params["engine"] == "redis" self.mm = Mem(params) self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 0)
def activate_mem(self, params): if params["engine"] == "redis": if iprint >= 3: print("Redis params", params) self.mm = Mem(params) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if (tmp is None or tmp == 0) or (tmp2 is None or tmp2 == 0): if iprint >= 2: print("[ CM ] Invalid activation") return "invalid" return 'ok' return "unknown-engine"
def main(options, arguments): #print 'options %s' % options #print 'arguments %s' % arguments if(options.device != None) : if(options.device == '/dev/mem') : mmemory = Mem() elif(options.device == '/dev/kmem') : mmemory = Kmem() else: usage() else : mmemory = Kmem() if(options.usemmap == None): options.usemmap = 0 if(options.view != None): if(options.view == 'tasks'): ttasks = GVTasks(mmemory, options.usemmap) ttasks.viewTasks() elif(options.view == 'syscalls'): mysyscalls = GVSyscalls(mmemory, options.usemmap) mysyscalls.viewSyscalls() elif(options.view == 'networks'): nnetworks = GVNetworks(mmemory, options.usemmap) nnetworks.viewNetworks() elif(options.check != None): if(options.check == 'tasks'): ttasks = GVTasks(mmemory, options.usemmap) ttasks.checkViewTasks() elif(options.check == 'networks'): nnetworks = GVNetworks(mmemory, options.usemmap) nnetworks.checkViewNetworks() elif(options.fingerprints != None): ffingerprints = Fingerprints(mmemory) if(options.fingerprints[1] == 'create'): ffingerprints.doFingerprints(options.fingerprints[0]) elif(options.fingerprints[1] == 'check'): ffingerprints.checkFingerprints(options.fingerprints[0]) elif(options.bump != None): mmemory.open("r", options.usemmap) mmemory.dump(string.atol(options.bump[0], 16), int(options.bump[1]), options.bump[2]) mmemory.close() else: usage()
def yield_file_into_mem(self, yield_func, yield_args, params, kv_action_type=None, msg=''): if self.my_platform == "mac": single_server = True # not on cluster else: single_server = False assert params["engine"] == "redis" Force_Redo = ("overwrite_memory" in params and params["overwrite_memory"]) ignore_lock = ("ignore_lock" in params and params["ignore_lock"]) with AccessRestrictionContext( prefix=yield_args + self.meta_file_name, no_restriction=(ignore_lock or single_server)) as lock: lock.Access_Or_Wait_And_Skip("yield_file_into_mem") self.mm = Mem(params) if iprint: print("yield_file_into_mem params", params) print('mm.prefix', self.mm.prefix) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if tmp == 1 and tmp2 == 1 and not Force_Redo: if iprint: print("Redis already cached, skip...") return if not Force_Redo: self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 1) time.sleep(0.1) cnt, thresh = 0, 1024 * 8 for k, v in yield_func(yield_args): if kv_action_type is None: self.mm.set(k, v) elif kv_action_type == 1: # append to vlist tmp = self.mm.get(k) if tmp is None: tmp = [] if v not in tmp: tmp.append(v) self.mm.set(k, tmp) cnt += 1 if iprint and cnt >= thresh: thresh *= 2 print("mm key cnt %d. %s" % (cnt, msg)) self.mm.set(End_Flag_Key, 1) if iprint: print("yielded key num: %d" % cnt)
def set_mem_valid(self, params): # force set to be valid if params["engine"] == "redis": self.mm = Mem(params) self.mm.set(Valid_Flag_Key, 1) self.mm.set(End_Flag_Key, 1)
try: dic_mm_geo_addr_mapping = pickle.load( open(Geo_File_Dir + "mm_geo_addr_mapping", "rb")) except: dic_mm_geo_addr_mapping = {} try: dic_mm_addr_bbox_snwe = pickle.load( open(Geo_File_Dir + "mm_addr_bbox_snwe", "rb")) except: dic_mm_addr_bbox_snwe = {} if On_Cluster: mm_geo_addr_mapping = Mem({ "num": 5, "prefix": "geo~map~", "expire": 90 * 86400 }) mm_addr_bbox_snwe = Mem({ "num": 5, "prefix": "bbox~snwe~", "expire": 90 * 86400 }) else: mm_geo_addr_mapping = Mem({ "use_ips": ['localhost'], "prefix": "geo~map~", "expire": 90 * 86400, 'overwrite_servers': True }) mm_addr_bbox_snwe = Mem({
def fix_no_speed(addr, mm_loaded=False): print("fix_no_speed():", addr, 'mm_loaded', mm_loaded) bugNoSpeedFn = mypydir + "/cache/nospeed-%s.txt" % addr if On_Cluster: mm_tmp_nid2spd = Mem({ "num": 30, "prefix": "~fx_n2spd~", "expire": 86400 * 30 }) else: mm_tmp_nid2spd = Mem({ "use_ips": ['localhost'], "prefix": "~fx_n2spd~", "expire": 86400 * 30 }) aleady_mm_loaded = mm_loaded correctSpdFn = DirOSM + os.sep + addr + "/%s-nids-to-speed.txt" % addr if not aleady_mm_loaded: cnt = 0 pthresh = 1 print('Loading ' + correctSpdFn) with open(correctSpdFn, "r") as f: for l in f: st = l.split(",") if len(st) < 3: continue mm_tmp_nid2spd.set((int(st[0]), int(st[1])), float(st[2])) cnt += 1 if cnt >= pthresh: print('fix_no_speed load cnt', cnt) pthresh *= 2 print("correct spd tup len=", cnt) mm_nid2latlng = CacheManager( overwrite_prefix=True) # will use existing config mm_nid2neighbor = CacheManager(overwrite_prefix=True) mm_nid2latlng.use_cache( meta_file_name="osm/cache-%s-nodeid-to-lat-lng.txt" % addr) mm_nid2neighbor.use_cache( meta_file_name="osm/cache-%s-nodeid-to-neighbor-nid.txt" % addr) n1n2s = {} print('Reading ' + bugNoSpeedFn) with open(bugNoSpeedFn, "r") as f: for l in f: st = l.split(",") if len(st) < 2: continue st = [int(x) for x in st] tup = (st[0], st[1]) if tup not in n1n2s: n1n2s[tup] = -1 print("no_speed nid tup len=", len(n1n2s)) lastBugCnt = None while True: bugcnt = 0 for tup in n1n2s.keys(): if n1n2s[tup] > 0: continue n1 = tup[0] n2 = tup[1] hd1 = get_bear_given_nid12(n1, n2, mm_nid2latlng) if hd1 is None: continue nblist = mm_nid2neighbor.get(n1) # ?->n1->n2 mindiff = 1e10 fixed = 0 if nblist: for nbn in nblist: hdn = get_bear_given_nid12(nbn, n1, mm_nid2latlng) if hdn is None: continue angle = min_angle_diff(hd1, hdn) if (nbn, n1) in n1n2s and n1n2s[(nbn, n1)] > 0: spdn = n1n2s[(nbn, n1)] else: spdn = mm_tmp_nid2spd.get((nbn, n1)) if angle < mindiff and spdn is not None: mindiff = angle n1n2s[tup] = spdn fixed = 1 if fixed: continue nblist = mm_nid2neighbor.get(n2) # n1->n2->? mindiff = 1e10 if nblist: for nbn in nblist: hdn = get_bear_given_nid12(n2, nbn, mm_nid2latlng) if hdn is None: continue angle = min_angle_diff(hd1, hdn) if (n2, nbn) in n1n2s and n1n2s[(n2, nbn)] > 0: spdn = n1n2s[(n2, nbn)] else: spdn = mm_tmp_nid2spd.get((n2, nbn)) if angle < mindiff and spdn is not None: mindiff = angle n1n2s[tup] = spdn fixed = 1 if fixed == 0: bugcnt += 1 if bugcnt == 0: break print("bugcnt", bugcnt) if lastBugCnt is not None: if lastBugCnt == bugcnt: print("Give up #", bugcnt) break lastBugCnt = bugcnt with open(correctSpdFn, "a") as f: for tup in n1n2s.keys(): if n1n2s[tup] < 0: continue print("%d,%d,%.2f" % (tup[0], tup[1], n1n2s[tup])) f.write("%d,%d,%.2f\n" % (tup[0], tup[1], n1n2s[tup])) print("Give up #", bugcnt)
from a4loadServer import query_sumo_connection, query_sumo_edge, query_sumo_junction from configure.params import Mytools_Rel_Dir, region2serverIP from configure.assemble import * mypydir = os.path.abspath( os.path.dirname(inspect.getfile(inspect.currentframe()))) addpath = mypydir + Mytools_Rel_Dir from mem import Mem config = None traci = None # Redis cache on port 6380 mm_sumo = Mem({ "prefix": "s!", "expire": 864000, "use_ips": [region2serverIP[R['addr']]['host']] }) mm_sumo.set("test", 1) _mm_is_good = 0 if mm_sumo.get('test') == 1: print("Mem good on " + region2serverIP[R['addr']]['host']) _mm_is_good = 1 else: raise Exception("Mem fail ... " + region2serverIP[R['addr']]['host']) def construct_edge(eid, isInternal, turn="", allViaLanes=None,