def load_cache_file_into_mem(self, cache_file_abs_path, params, store_allKeys=False, msg=''): '''cache_file assumed to be a dict of k:list, with header, dtype, keyPos info. ''' if self.my_platform == "mac": single_server = True # not on cluster else: single_server = False assert params["engine"] == "redis" Force_Redo = ("overwrite_memory" in params and params["overwrite_memory"]) with AccessRestrictionContext(prefix=cache_file_abs_path, no_restriction=single_server) as lock: lock.Access_Or_Wait_And_Skip("load_cache_file_into_mem") if iprint: print("load_cache_file_into_mem params", params) self.mm = Mem(params) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if tmp == 1 and tmp2 == 1 and not Force_Redo: if iprint: print("Redis already cached, skip...") return print("pickle.load %s ..." % cache_file_abs_path) da = pickle.load(open(cache_file_abs_path, "rb")) if iprint >= 2: try: print(" - Info - ", da["header"], da["dtype"], da["keyPos"]) except: print(" - missing keyPos or header or dtype -") if not Force_Redo: self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 1) time.sleep(0.1) # redis checks expired at freq 10hz. allKeys = [] if "header" in da: header = da.pop("header") self.mm.set("header", header) if "dtype" in da: dtype = da.pop("dtype") self.mm.set("dtype", dtype) if "keyPos" in da: keyPos = da.pop("keyPos") self.mm.set("keyPos", keyPos) cnt, thresh = 0, 1024 * 8 for k, v in da.items(): self.mm.set(k, v) cnt += 1 if store_allKeys: allKeys.append(k) if iprint and cnt >= thresh: thresh *= 2 print("mm key cnt %d. %s" % (cnt, msg)) if store_allKeys: self.mm.set("allKeys", allKeys) self.mm.set(End_Flag_Key, 1) if iprint: print("loaded cache key num: %d" % cnt)
def invalidate_mem( self): # Mark as invalid. force reload/re-yield k,v into mem try: meta = pickle.load(open(self.meta_file_abs_path, 'rb')) except: # TODO not using json meta = json.load(open(self.meta_file_abs_path, 'rb')) params = pickle.loads(str(meta["params"])) assert params["engine"] == "redis" self.mm = Mem(params) self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 0)
def activate_mem(self, params): if params["engine"] == "redis": if iprint >= 3: print("Redis params", params) self.mm = Mem(params) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if (tmp is None or tmp == 0) or (tmp2 is None or tmp2 == 0): if iprint >= 2: print("[ CM ] Invalid activation") return "invalid" return 'ok' return "unknown-engine"
def main(options, arguments): #print 'options %s' % options #print 'arguments %s' % arguments if(options.device != None) : if(options.device == '/dev/mem') : mmemory = Mem() elif(options.device == '/dev/kmem') : mmemory = Kmem() else: usage() else : mmemory = Kmem() if(options.usemmap == None): options.usemmap = 0 if(options.view != None): if(options.view == 'tasks'): ttasks = GVTasks(mmemory, options.usemmap) ttasks.viewTasks() elif(options.view == 'syscalls'): mysyscalls = GVSyscalls(mmemory, options.usemmap) mysyscalls.viewSyscalls() elif(options.view == 'networks'): nnetworks = GVNetworks(mmemory, options.usemmap) nnetworks.viewNetworks() elif(options.check != None): if(options.check == 'tasks'): ttasks = GVTasks(mmemory, options.usemmap) ttasks.checkViewTasks() elif(options.check == 'networks'): nnetworks = GVNetworks(mmemory, options.usemmap) nnetworks.checkViewNetworks() elif(options.fingerprints != None): ffingerprints = Fingerprints(mmemory) if(options.fingerprints[1] == 'create'): ffingerprints.doFingerprints(options.fingerprints[0]) elif(options.fingerprints[1] == 'check'): ffingerprints.checkFingerprints(options.fingerprints[0]) elif(options.bump != None): mmemory.open("r", options.usemmap) mmemory.dump(string.atol(options.bump[0], 16), int(options.bump[1]), options.bump[2]) mmemory.close() else: usage()
def yield_file_into_mem(self, yield_func, yield_args, params, kv_action_type=None, msg=''): if self.my_platform == "mac": single_server = True # not on cluster else: single_server = False assert params["engine"] == "redis" Force_Redo = ("overwrite_memory" in params and params["overwrite_memory"]) ignore_lock = ("ignore_lock" in params and params["ignore_lock"]) with AccessRestrictionContext( prefix=yield_args + self.meta_file_name, no_restriction=(ignore_lock or single_server)) as lock: lock.Access_Or_Wait_And_Skip("yield_file_into_mem") self.mm = Mem(params) if iprint: print("yield_file_into_mem params", params) print('mm.prefix', self.mm.prefix) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if tmp == 1 and tmp2 == 1 and not Force_Redo: if iprint: print("Redis already cached, skip...") return if not Force_Redo: self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 1) time.sleep(0.1) cnt, thresh = 0, 1024 * 8 for k, v in yield_func(yield_args): if kv_action_type is None: self.mm.set(k, v) elif kv_action_type == 1: # append to vlist tmp = self.mm.get(k) if tmp is None: tmp = [] if v not in tmp: tmp.append(v) self.mm.set(k, tmp) cnt += 1 if iprint and cnt >= thresh: thresh *= 2 print("mm key cnt %d. %s" % (cnt, msg)) self.mm.set(End_Flag_Key, 1) if iprint: print("yielded key num: %d" % cnt)
def gen_major_header(self,tracer_dump): self.update_major_header(tracer_dump) major_header = Mem(2); self.avp_handle.write("\t// START OF Program & Data\n") tracer.total_size = tracer.major_header_size for key in tracer.MAJOR_HEADER: mem_addr = "%04x"%(tracer.MAJOR_HEADER[key]["position"]*4) if mem_addr in major_header.mem_lines: major_header.check_mem_line(mem_addr, tracer.MAJOR_HEADER[key]["offset"],tracer.MAJOR_HEADER[key]["size"],tracer.MAJOR_HEADER[key]["value"],tracer.MAJOR_HEADER[key]["DOC"]) else: major_header.add_mem_line(mem_addr, tracer.MAJOR_HEADER[key]["offset"],tracer.MAJOR_HEADER[key]["size"],tracer.MAJOR_HEADER[key]["value"],tracer.MAJOR_HEADER[key]["DOC"]) for key in sorted(major_header.mem_lines): self.avp_handle.write("\tmem\t0x00%s%s\t0x%s\n"%(self.__replay_addr_h,key,major_header.mem_lines[key]["value"])) del(major_header)
def gen_sub_headers(self,subhdr,replay_addr): sub_hdr = Mem(2); for key in subhdr: if eq(key,"hdr_size") or eq(key,"offset"): continue else: mem_addr = "%04x"%(subhdr[key]["position"]*4 + tracer.total_size) if mem_addr in sub_hdr.mem_lines: sub_hdr.check_mem_line(mem_addr,subhdr[key]["offset"],subhdr[key]["size"],subhdr[key]["value"],subhdr[key]["DOC"]) else: sub_hdr.add_mem_line(mem_addr,subhdr[key]["offset"],subhdr[key]["size"],subhdr[key]["value"],subhdr[key]["DOC"]) for key in sorted(sub_hdr.mem_lines): self.avp_handle.write("\tmem\t0x00%s%s\t0x%s\n"%(replay_addr,key,sub_hdr.mem_lines[key]["value"])) del(sub_hdr)
def gen_major_header_type2(self,tracer_dump,replay_addr): if not tracer_dump["occur"]: return self.update_major_header_type2(tracer_dump) major_header = Mem(2); self.avp_handle.write("\t// TRACER DUMP TYPE 2\n") tracer.total_size = tracer.major_header_size for key in tracer.MAJOR_HEADER_type2: mem_addr = "%04x"%(tracer.MAJOR_HEADER_type2[key]["position"]*4) if mem_addr in major_header.mem_lines: major_header.check_mem_line(mem_addr, tracer.MAJOR_HEADER_type2[key]["offset"],tracer.MAJOR_HEADER_type2[key]["size"],tracer.MAJOR_HEADER_type2[key]["value"],tracer.MAJOR_HEADER_type2[key]["DOC"]) else: major_header.add_mem_line(mem_addr, tracer.MAJOR_HEADER_type2[key]["offset"],tracer.MAJOR_HEADER_type2[key]["size"],tracer.MAJOR_HEADER_type2[key]["value"],tracer.MAJOR_HEADER_type2[key]["DOC"]) for key in sorted(major_header.mem_lines): self.avp_handle.write("\tmem\t0x00%s%s\t0x%s\n"%(replay_addr,key,major_header.mem_lines[key]["value"])) del(major_header)
def gen_prologs(self,prolog): avp_prolog = Mem(2); for key in prolog: if eq(key,"prolog_size") or eq(key,"offset"): continue else: mem_addr = "%04x"%(prolog[key]["position"]*4 + tracer.total_size) if mem_addr in avp_prolog.mem_lines: #prolog[key]["value"]= self.adjust_data(prolog[key]["size"],prolog[key]["value"]) avp_prolog.check_mem_line(mem_addr,prolog[key]["offset"],prolog[key]["size"],prolog[key]["value"],prolog[key]["DOC"]) else: avp_prolog.add_mem_line(mem_addr,prolog[key]["offset"],prolog[key]["size"],prolog[key]["value"],prolog[key]["DOC"]) for key in sorted(avp_prolog.mem_lines): self.avp_handle.write("\tmem\t0x00%s%s\t0x%s\n"%(self.__replay_addr_h,key,avp_prolog.mem_lines[key]["value"])) del(avp_prolog)
def set_mem_valid(self, params): # force set to be valid if params["engine"] == "redis": self.mm = Mem(params) self.mm.set(Valid_Flag_Key, 1) self.mm.set(End_Flag_Key, 1)
class CacheManager: def __init__(self, **kwargs): self.my_ip = get_my_ip() self.my_home_dir = os.path.expanduser("~") #'/root' self.whoami = getpass.getuser() #'root' self.my_platform = get_platform() # 'mac','ubuntu','centos' assert not " " in self.my_ip, "[ CM ] my_ip cannot multi interface!" self.my_meta_dir = self.my_home_dir + CACHE_META_DIR # run which func if redis return None? params follow the format: func_if_get_none(self.meta, key, *args , **kwargs) and return val (->key) self.func_if_get_none = None # shorten the prefix, like TinyURL. self.overwrite_prefix = kwargs.get("overwrite_prefix", False) # for Mem use. Use recent host ranking? self.rt_servers = kwargs.get("rt_servers", False) # overwrite redis cluster config? need to reload mem ! self.overwrite_redis_servers = kwargs.get("overwrite_redis_servers", False) if self.overwrite_redis_servers: print('[ CM ] overwrite_redis_servers !!! Reload !') def set_meta_file_name(self, fname): """Check and set self.meta_file_name, self.meta_file_abs_path. :param fname: = self.meta_file_name. str """ assert fname != "", "Please provide meta_file_name (cannot be empty)!" assert not fname.endswith( os.sep), "set_meta_file_name cannot endswith(os.sep)!" assert not ".." in fname, "meta_file_name cannot have '../' !" self.meta_file_name = fname self.meta_file_abs_path = self.my_meta_dir + os.sep + fname def exist_cache(self, meta_file_name): self.set_meta_file_name(meta_file_name) return os.path.exists(self.meta_file_abs_path) def create_cache(self, **kwargs): """Create cache meta and file. :param meta_file_name: relative path to meta file. str :param overwrite_meta_file: overwrite meta_file? bool :param overwrite_cache_file: overwrite cache file? bool :param overwrite_memory: overwrite contents in cache memory? bool :param gen_cache_file_cmd: linux command for generating cache file on host. str :param gen_host_ip: the host ip who generated cache file. str :param gen_username: the user who generated cache file. str :param cache_file_abs_path: where to store generated cache file? str :param store_allKeys: if store all keys as list in 'allKeys' :param engine: redis or TODO. str :param params: a dict, params for redis. dict :param yield_func: if not cache-file, yield on the fly, give yield-in-file as well :param yield_args: yield-in-file CUT/+ other args """ self.set_meta_file_name(kwargs.get("meta_file_name", "")) overwrite_meta_file = kwargs.get("overwrite_meta_file", False) overwrite_cache_file = kwargs.get("overwrite_cache_file", False) if not self.overwrite_prefix: self.overwrite_prefix = kwargs.get("overwrite_prefix", False) if os.path.exists(self.meta_file_abs_path) and not overwrite_meta_file: raise Exception( "Already exists locally: " + self.meta_file_abs_path + ", overwrite_meta_file=True,? or use_cache() instead of create_cache()" ) gen_cache_file_cmd = kwargs.get("gen_cache_file_cmd", "") assert gen_cache_file_cmd != "", "gen_cache_file_cmd cannot be empty!" gen_host_ip = kwargs.get("gen_host_ip", self.my_ip) if gen_host_ip == "127.0.0.1" or gen_host_ip == "localhost": gen_host_ip = self.my_ip gen_username = kwargs.get("gen_username", self.whoami) self.cache_file_abs_path = kwargs.get("cache_file_abs_path", "") self.store_allKeys = kwargs.get("store_allKeys", False) assert self.cache_file_abs_path != "", "cache_file_abs_path cannot be empty!" engine = kwargs.get("engine", "redis") params = kwargs.get("params", {}) params["prefix"] = self.meta_file_name if self.overwrite_prefix: params[ "overwrite_prefix"] = True # ask Mem() to translate pref mapping params["engine"] = engine params["overwrite_memory"] = kwargs.get("overwrite_memory", False) yield_func = kwargs.get("yield_func", None) # gen k,v on the fly, avoid cache file yield_args = kwargs.get("yield_args", None) # inputs to yield func kv_action_type = kwargs.get( "kv_action_type", None) # yield func set key-value append/direct set loading_msg = kwargs.get("loading_msg", "") meta = {} meta["meta_file_name"] = self.meta_file_name meta["meta_file_abs_path"] = self.meta_file_abs_path meta["gen_cache_file_cmd"] = gen_cache_file_cmd meta["gen_host_ip"] = gen_host_ip meta["gen_username"] = gen_username meta["cache_file_abs_path"] = self.cache_file_abs_path meta["engine"] = engine meta["params"] = pickle.dumps(params) meta["create_time"] = unix2datetime(time.time()) meta["yield_func"] = dill.dumps(yield_func) # it is a function in 3.py meta["yield_args"] = yield_args meta["kv_action_type"] = kv_action_type if self.my_platform != "centos" and not overwrite_meta_file: # check if already exists on tarekc: r = requests.get(url=FILE_SERVER_GET + "cache_meta", params={"meta_file_name": self.meta_file_name}) data = r.json() if not "not-found" in data['status']: raise Exception( "Already exists on Centos: " + self.meta_file_name + ", overwrite_meta_file=True,? or use_cache() instead of create_cache()" ) # auto set content type to application/json: r = requests.post(FILE_SERVER_POST + "cache_meta", json=meta) # TODO not using json if r.status_code != 200: raise Exception("create_cache post fail status_code: %d" % r.status_code) if gen_host_ip == self.my_ip or self.my_platform == "centos": if yield_func is None or yield_args is None: if overwrite_cache_file: gen_cache_file_cmd += " overwrite=true" if iprint: print("RUN " + gen_cache_file_cmd) subprocess.call(gen_cache_file_cmd.split(" ")) tmpdir = os.sep.join(self.meta_file_abs_path.split(os.sep)[0:-1]) if not os.path.exists(tmpdir): os.makedirs(tmpdir) if True: pickle.dump(meta, open(self.meta_file_abs_path, 'wb')) else: # TODO, no longer using json with open(self.meta_file_abs_path, 'wb') as f: json.dump(meta, codecs.getwriter('utf-8')(f), ensure_ascii=False) if iprint: print("create_cache(): Saved meta_file " + self.meta_file_abs_path) else: if iprint: print("Gen cmd not on local host: " + gen_cache_file_cmd) meta["params"] = pickle.loads(str(meta["params"])) if self.rt_servers: meta["params"]["rt_servers"] = True if self.overwrite_redis_servers: meta["params"]["overwrite_servers"] = True ret = self.activate_mem(meta["params"]) if ret == "invalid" or params["overwrite_memory"]: if yield_func is None or yield_args is None: if iprint: print("RUN load_cache_file_into_mem() ...") self.load_cache_file_into_mem(meta["cache_file_abs_path"], meta["params"], self.store_allKeys, msg=loading_msg) else: if iprint: print("RUN yield_func() " + yield_args) self.yield_file_into_mem(yield_func, yield_args, meta["params"], kv_action_type=kv_action_type, msg=loading_msg) print(self.activate_mem(meta["params"])) self.meta = meta def use_cache(self, **kwargs): """Use mem according to cache meta. :param meta_file_name: relative path to meta file. str :param overwrite_meta_by_centos: overwrite meta_file by that on centos? bool :param store_allKeys: upon re-do, if store all keys as list in 'allKeys' - overwrite_memory: force reload into memory. - ignore_invalid_mem: non-block call, ignore not in cache. - set_to_be_valid: overwrite flag=1 to regard as valid. - ignore_lock: no access lock on load mm func. - overwrite_prefix: use tiny short prefix. - check_pref_is_short: check if overwrite_prefix is SET. """ self.set_meta_file_name(kwargs.get("meta_file_name", "")) overwrite_meta_by_centos = kwargs.get("overwrite_meta_by_centos", False) self.store_allKeys = kwargs.get("store_allKeys", False) self.ignore_invalid_mem = kwargs.get("ignore_invalid_mem", False) overwrite_memory = kwargs.get("overwrite_memory", False) set_to_be_valid = kwargs.get("set_to_be_valid", False) ignore_lock = kwargs.get("ignore_lock", False) check_pref_is_short = kwargs.get("check_pref_is_short", True) if not self.overwrite_prefix: self.overwrite_prefix = kwargs.get("overwrite_prefix", False) loading_msg = kwargs.get("loading_msg", "") params = kwargs.get("params", {}) # if you want to change params if "params" in kwargs: print("[CM] params update these:", params) if not os.path.exists(self.meta_file_abs_path): if self.my_platform != "centos": r = requests.get( url=FILE_SERVER_GET + "cache_meta", params={"meta_file_name": self.meta_file_name}) meta = r.json() # TODO not using json, changed syncdir/file-server/app.py: get_file() if "not-found" in meta['status']: raise Exception("Not exists? " + self.meta_file_name) meta.pop('status') tmpdir = os.sep.join( self.meta_file_abs_path.split(os.sep)[0:-1]) if not os.path.exists(tmpdir): os.makedirs(tmpdir) with open(self.meta_file_abs_path, 'wb') as f: # TODO not using json json.dump(meta, codecs.getwriter('utf-8')(f), ensure_ascii=False) if iprint: print("Copied from centos: " + self.meta_file_abs_path) else: raise Exception("Not exists? " + self.meta_file_name) else: # already exists locally: if self.my_platform != "centos" and overwrite_meta_by_centos: r = requests.get( url=FILE_SERVER_GET + "cache_meta", params={"meta_file_name": self.meta_file_name}) meta = r.json() if not "not-found" in meta['status']: meta.pop('status') with open(self.meta_file_abs_path, 'wb') as f: # TODO not using json json.dump(meta, codecs.getwriter('utf-8')(f), ensure_ascii=False) if iprint: print("Copied from centos: " + self.meta_file_abs_path) try: meta = pickle.load(open(self.meta_file_abs_path, 'rb')) except: # TODO not using json meta = json.load(open(self.meta_file_abs_path, 'rb')) meta["params"] = pickle.loads(str(meta["params"])) meta["params"].update(params) if self.overwrite_prefix: meta["params"]['overwrite_prefix'] = True meta["params"]["overwrite_memory"] = overwrite_memory meta["params"]["ignore_lock"] = ignore_lock if self.rt_servers: meta["params"]["rt_servers"] = True if self.overwrite_redis_servers: meta["params"]["overwrite_servers"] = True if set_to_be_valid: print("force set to be valid !") self.set_mem_valid(meta["params"]) return ret = self.activate_mem(meta["params"]) if (ret == "invalid" and not self.ignore_invalid_mem) or overwrite_memory: yield_func = dill.loads( meta["yield_func"]) if "yield_func" in meta else None yield_args = meta["yield_args"] if "yield_args" in meta else None if yield_func is None or yield_args is None: if iprint: print("Redo load_cache_file_into_mem() ...") self.load_cache_file_into_mem(meta["cache_file_abs_path"], meta["params"], self.store_allKeys, msg=loading_msg) else: if iprint: print("RUN yield_func() " + yield_args) kv_action_type = meta[ "kv_action_type"] if "kv_action_type" in meta else None self.yield_file_into_mem(yield_func, yield_args, meta["params"], kv_action_type=kv_action_type, msg=loading_msg) print(self.activate_mem(meta["params"])) self.meta = meta if check_pref_is_short: if len(self.mm.prefix) > 4: # support more prefixes if > more. print(self.mm.prefix) print(" :prefix too long, not overwrite_prefix?") sys.exit(0) # You Edit def activate_mem(self, params): if params["engine"] == "redis": if iprint >= 3: print("Redis params", params) self.mm = Mem(params) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if (tmp is None or tmp == 0) or (tmp2 is None or tmp2 == 0): if iprint >= 2: print("[ CM ] Invalid activation") return "invalid" return 'ok' return "unknown-engine" def set_mem_valid(self, params): # force set to be valid if params["engine"] == "redis": self.mm = Mem(params) self.mm.set(Valid_Flag_Key, 1) self.mm.set(End_Flag_Key, 1) def load_cache_file_into_mem(self, cache_file_abs_path, params, store_allKeys=False, msg=''): '''cache_file assumed to be a dict of k:list, with header, dtype, keyPos info. ''' if self.my_platform == "mac": single_server = True # not on cluster else: single_server = False assert params["engine"] == "redis" Force_Redo = ("overwrite_memory" in params and params["overwrite_memory"]) with AccessRestrictionContext(prefix=cache_file_abs_path, no_restriction=single_server) as lock: lock.Access_Or_Wait_And_Skip("load_cache_file_into_mem") if iprint: print("load_cache_file_into_mem params", params) self.mm = Mem(params) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if tmp == 1 and tmp2 == 1 and not Force_Redo: if iprint: print("Redis already cached, skip...") return print("pickle.load %s ..." % cache_file_abs_path) da = pickle.load(open(cache_file_abs_path, "rb")) if iprint >= 2: try: print(" - Info - ", da["header"], da["dtype"], da["keyPos"]) except: print(" - missing keyPos or header or dtype -") if not Force_Redo: self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 1) time.sleep(0.1) # redis checks expired at freq 10hz. allKeys = [] if "header" in da: header = da.pop("header") self.mm.set("header", header) if "dtype" in da: dtype = da.pop("dtype") self.mm.set("dtype", dtype) if "keyPos" in da: keyPos = da.pop("keyPos") self.mm.set("keyPos", keyPos) cnt, thresh = 0, 1024 * 8 for k, v in da.items(): self.mm.set(k, v) cnt += 1 if store_allKeys: allKeys.append(k) if iprint and cnt >= thresh: thresh *= 2 print("mm key cnt %d. %s" % (cnt, msg)) if store_allKeys: self.mm.set("allKeys", allKeys) self.mm.set(End_Flag_Key, 1) if iprint: print("loaded cache key num: %d" % cnt) def yield_file_into_mem(self, yield_func, yield_args, params, kv_action_type=None, msg=''): if self.my_platform == "mac": single_server = True # not on cluster else: single_server = False assert params["engine"] == "redis" Force_Redo = ("overwrite_memory" in params and params["overwrite_memory"]) ignore_lock = ("ignore_lock" in params and params["ignore_lock"]) with AccessRestrictionContext( prefix=yield_args + self.meta_file_name, no_restriction=(ignore_lock or single_server)) as lock: lock.Access_Or_Wait_And_Skip("yield_file_into_mem") self.mm = Mem(params) if iprint: print("yield_file_into_mem params", params) print('mm.prefix', self.mm.prefix) tmp = self.mm.get(Valid_Flag_Key) tmp2 = self.mm.get(End_Flag_Key) if tmp == 1 and tmp2 == 1 and not Force_Redo: if iprint: print("Redis already cached, skip...") return if not Force_Redo: self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 1) time.sleep(0.1) cnt, thresh = 0, 1024 * 8 for k, v in yield_func(yield_args): if kv_action_type is None: self.mm.set(k, v) elif kv_action_type == 1: # append to vlist tmp = self.mm.get(k) if tmp is None: tmp = [] if v not in tmp: tmp.append(v) self.mm.set(k, tmp) cnt += 1 if iprint and cnt >= thresh: thresh *= 2 print("mm key cnt %d. %s" % (cnt, msg)) self.mm.set(End_Flag_Key, 1) if iprint: print("yielded key num: %d" % cnt) ''' ---- mm.get: what to do if redis returns null? perhaps low memory and you reload ?''' def get(self, key, *args, **kwargs): val = self.mm.get(key, *args, **kwargs) if val is None: if self.func_if_get_none is not None: val = self.func_if_get_none(self.meta, key, *args, **kwargs) if val is not None: self.mm.set(key, val, *args, **kwargs) return val def set(self, key, val, *args, **kwargs): # self.set = self.mm.set set only allowed in load_cache_file_into_mem ? _val = self.mm.get(key, *args, **kwargs) if _val is None: print("CacheManager SET !!", key, val) self.mm.set(key, val, *args, **kwargs) return True return False def get_id(self, ): try: return self.meta_file_name except AttributeError: return None def invalidate_mem( self): # Mark as invalid. force reload/re-yield k,v into mem try: meta = pickle.load(open(self.meta_file_abs_path, 'rb')) except: # TODO not using json meta = json.load(open(self.meta_file_abs_path, 'rb')) params = pickle.loads(str(meta["params"])) assert params["engine"] == "redis" self.mm = Mem(params) self.mm.set(End_Flag_Key, 0) self.mm.set(Valid_Flag_Key, 0)
try: dic_mm_geo_addr_mapping = pickle.load( open(Geo_File_Dir + "mm_geo_addr_mapping", "rb")) except: dic_mm_geo_addr_mapping = {} try: dic_mm_addr_bbox_snwe = pickle.load( open(Geo_File_Dir + "mm_addr_bbox_snwe", "rb")) except: dic_mm_addr_bbox_snwe = {} if On_Cluster: mm_geo_addr_mapping = Mem({ "num": 5, "prefix": "geo~map~", "expire": 90 * 86400 }) mm_addr_bbox_snwe = Mem({ "num": 5, "prefix": "bbox~snwe~", "expire": 90 * 86400 }) else: mm_geo_addr_mapping = Mem({ "use_ips": ['localhost'], "prefix": "geo~map~", "expire": 90 * 86400, 'overwrite_servers': True }) mm_addr_bbox_snwe = Mem({
def fix_no_speed(addr, mm_loaded=False): print("fix_no_speed():", addr, 'mm_loaded', mm_loaded) bugNoSpeedFn = mypydir + "/cache/nospeed-%s.txt" % addr if On_Cluster: mm_tmp_nid2spd = Mem({ "num": 30, "prefix": "~fx_n2spd~", "expire": 86400 * 30 }) else: mm_tmp_nid2spd = Mem({ "use_ips": ['localhost'], "prefix": "~fx_n2spd~", "expire": 86400 * 30 }) aleady_mm_loaded = mm_loaded correctSpdFn = DirOSM + os.sep + addr + "/%s-nids-to-speed.txt" % addr if not aleady_mm_loaded: cnt = 0 pthresh = 1 print('Loading ' + correctSpdFn) with open(correctSpdFn, "r") as f: for l in f: st = l.split(",") if len(st) < 3: continue mm_tmp_nid2spd.set((int(st[0]), int(st[1])), float(st[2])) cnt += 1 if cnt >= pthresh: print('fix_no_speed load cnt', cnt) pthresh *= 2 print("correct spd tup len=", cnt) mm_nid2latlng = CacheManager( overwrite_prefix=True) # will use existing config mm_nid2neighbor = CacheManager(overwrite_prefix=True) mm_nid2latlng.use_cache( meta_file_name="osm/cache-%s-nodeid-to-lat-lng.txt" % addr) mm_nid2neighbor.use_cache( meta_file_name="osm/cache-%s-nodeid-to-neighbor-nid.txt" % addr) n1n2s = {} print('Reading ' + bugNoSpeedFn) with open(bugNoSpeedFn, "r") as f: for l in f: st = l.split(",") if len(st) < 2: continue st = [int(x) for x in st] tup = (st[0], st[1]) if tup not in n1n2s: n1n2s[tup] = -1 print("no_speed nid tup len=", len(n1n2s)) lastBugCnt = None while True: bugcnt = 0 for tup in n1n2s.keys(): if n1n2s[tup] > 0: continue n1 = tup[0] n2 = tup[1] hd1 = get_bear_given_nid12(n1, n2, mm_nid2latlng) if hd1 is None: continue nblist = mm_nid2neighbor.get(n1) # ?->n1->n2 mindiff = 1e10 fixed = 0 if nblist: for nbn in nblist: hdn = get_bear_given_nid12(nbn, n1, mm_nid2latlng) if hdn is None: continue angle = min_angle_diff(hd1, hdn) if (nbn, n1) in n1n2s and n1n2s[(nbn, n1)] > 0: spdn = n1n2s[(nbn, n1)] else: spdn = mm_tmp_nid2spd.get((nbn, n1)) if angle < mindiff and spdn is not None: mindiff = angle n1n2s[tup] = spdn fixed = 1 if fixed: continue nblist = mm_nid2neighbor.get(n2) # n1->n2->? mindiff = 1e10 if nblist: for nbn in nblist: hdn = get_bear_given_nid12(n2, nbn, mm_nid2latlng) if hdn is None: continue angle = min_angle_diff(hd1, hdn) if (n2, nbn) in n1n2s and n1n2s[(n2, nbn)] > 0: spdn = n1n2s[(n2, nbn)] else: spdn = mm_tmp_nid2spd.get((n2, nbn)) if angle < mindiff and spdn is not None: mindiff = angle n1n2s[tup] = spdn fixed = 1 if fixed == 0: bugcnt += 1 if bugcnt == 0: break print("bugcnt", bugcnt) if lastBugCnt is not None: if lastBugCnt == bugcnt: print("Give up #", bugcnt) break lastBugCnt = bugcnt with open(correctSpdFn, "a") as f: for tup in n1n2s.keys(): if n1n2s[tup] < 0: continue print("%d,%d,%.2f" % (tup[0], tup[1], n1n2s[tup])) f.write("%d,%d,%.2f\n" % (tup[0], tup[1], n1n2s[tup])) print("Give up #", bugcnt)
from a4loadServer import query_sumo_connection, query_sumo_edge, query_sumo_junction from configure.params import Mytools_Rel_Dir, region2serverIP from configure.assemble import * mypydir = os.path.abspath( os.path.dirname(inspect.getfile(inspect.currentframe()))) addpath = mypydir + Mytools_Rel_Dir from mem import Mem config = None traci = None # Redis cache on port 6380 mm_sumo = Mem({ "prefix": "s!", "expire": 864000, "use_ips": [region2serverIP[R['addr']]['host']] }) mm_sumo.set("test", 1) _mm_is_good = 0 if mm_sumo.get('test') == 1: print("Mem good on " + region2serverIP[R['addr']]['host']) _mm_is_good = 1 else: raise Exception("Mem fail ... " + region2serverIP[R['addr']]['host']) def construct_edge(eid, isInternal, turn="", allViaLanes=None,
def run(self): if verbosity > 1: print("Fuzzer runs handler thread started.") while (True): request = requestQueue.poll() if request != None: if verbosity > 1: print("Handling request 1 of " + str(requestQueue.request_cnt + 1)) #InputStream is = request.clientSocket.getInputStream() #OutputStream os = request.clientSocket.getOutputStream() Mem.clear() result = STATUS_CRASH appCall = None # read the mode (local or default) mode = int.from_bytes(request.conn.recv(1), "little") print("mode: " + str(mode)) # LOCAL MODE if mode == LOCAL_MODE: if verbosity > 1: print("Handling request in LOCAL MODE.") # read the length of the path (integer) #pathlen = is.recv() | is.recv() << 8 | is.recv() << 16 | is.recv() << 24 pathlen = int.from_bytes(request.conn.recv(4), "little") if verbosity > 2: print("Path len = " + pathlen) if pathlen < 0: if verbosity > 1: print("Failed to read path length") result = STATUS_COMM_ERROR else: # read the path input = bytearray(pathlen) read = 0 while read < pathlen: byte = request.conn.recv(1) if byte != "": input[read] = int.from_bytes(byte, "little") read = read + 1 else: if verbosity > 1: print( "No input available from stream, strangely, breaking." ) result = STATUS_COMM_ERROR break path = input.decode() if verbosity > 1: print("Received path: " + path) appCall = ApplicationCall(path, True) # DEFAULT MODE else: if (verbosity > 1): print("Handling request in DEFAULT MODE.") # read the size of the input file (integer) #filesize = is.recv() | is.recv() << 8 | is.recv() << 16 | is.recv() << 24 filesize = int.from_bytes(request.conn.recv(4), "little") if (verbosity > 2): print("File size = " + filesize) if (filesize < 0): if (verbosity > 1): print("Failed to read file size") result = STATUS_COMM_ERROR else: # read the input file input = bytearray(filesize) read = 0 while read < filesize: byte = request.conn.recv(1) if byte != "": input[read] = int.from_bytes(byte, "little") #print(str(input[read])) read = read + 1 else: if (verbosity > 1): print( "No input available from stream, strangely" ) print("Appending a 0") input[read] = 0 read = read + 1 appCall = ApplicationCall(input, False) # @todo: protect shared thread data if (result != STATUS_COMM_ERROR and appCall != None): # run app with input #ExecutorService executor = Executors.newSingleThreadExecutor() #Future < Long > future = executor.submit(appCall) executor = Executor(appCall) try: if (verbosity > 1): print("Started...") executor.start() if (executor.join(timeout)): if (verbosity > 1): print("Finished!") if executor.crashed: result = STATUS_CRASH else: result = STATUS_SUCCESS else: result = STATUS_TIMEOUT if (verbosity > 1): print("Timeout!") except: if (verbosity > 1): print("Something didn't work!") traceback.print_exc() result = STATUS_CRASH executor.shutdown() if (verbosity > 1): print("Result: " + str(result)) if (verbosity > 2): Mem.print() # send back status request.conn.send(result.to_bytes(1, "little")) # send back "shared memory" over TCP try: request.conn.send(Mem.mem) except: pass #Mem.print() # close connection request.conn.close() #request.clientSocket.shutdownOutput() #request.clientSocket.shutdownInput() #request.clientSocket.setSoLinger(true, 100000) #request.clientSocket.close() if (verbosity > 1): print("Connection closed.") else: # if no request, close your eyes for a bit time.sleep(0.10)