def test_file_bohmer(file): split_dataset(file + "_data.csv", file + "_labels.csv", file + "_train.csv", file + "_test.csv", 10000) train_data = LogFile(file + "_train.csv", ",", 0, 1000000, None, "case_id", "name", convert=False) train_data.remove_attributes(["label"]) model = bohmer.train(train_data, 3, 4, 1) test_data = LogFile(file + "_test.csv", ",", 0, 1000000, None, "case_id", "name", convert=False, values=train_data.values) bohmer.test(test_data, file + "_output_bohmer.csv", model, "label", 0) plot.plot_single_roc_curve(file + "_output_bohmer.csv", file, save_file="../Data/Nolle_Graphs/" + file.split("/")[-1] + "_roc_bohmer.png") plot.plot_single_prec_recall_curve(file + "_output_bohmer.csv", file, save_file="../Data/Nolle_Graphs/" + file.split("/")[-1] + "_precrec_bohmer.png")
def remote_exec_file(self, script, test_name, result_query, test_detail, log_file): host = Execute.db_get("host", "localhost", 6379, 0) port = Execute.db_get("port", "localhost", 6379, 0) user = Execute.db_get("user", "localhost", 6379, 0) password = Execute.db_get("password", "localhost", 6379, 0) stdout = Execute.remote_exec_file(script, host=host, port=int(port), user=user, password=password, test_name=test_name, result_query=result_query, test_detail=test_detail, log_file=log_file) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def remote_key_exec(self, key, test_name, result_query, test_detail, log_file): dbhost = "127.0.0.1" dbport = 6379 dbnum = 0 host = Execute.db_get("host", dbhost, dbport, dbnum) port = Execute.db_get("port", dbhost, dbport, dbnum) user = Execute.db_get("user", dbhost, dbport, dbnum) password = Execute.db_get("password", dbhost, dbport, dbnum) stdout = Execute.remote_exec_key(key, host=host, port=int(port), user=user, password=password, test_name=test_name, result_query=result_query, test_detail=test_detail, log_file=log_file, db_host=dbhost, db_port=dbport, db=dbnum) print stdout data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def compare_bpics(path): for i in range(1, 6): # Input Files train = path + "BPIC15_train_%i.csv" % (i) test = path + "BPIC15_test_%i.csv" % (i) output = path + "Output/BPIC15_output_%i.csv" % (i) output_edbn = path + "Output/BPIC15_edbn_output_%i.csv" % (i) prec_recall = path + "Output/prec_recall_%i.png" % (i) roc = path + "Output/roc_%i.png" % (i) train_data = LogFile(train, ",", 0, 500000, "Time", "Case", activity_attr="Activity", convert=False) train_data.remove_attributes(["Anomaly", "Type", "Time"]) test_data = LogFile(test, ",", 0, 500000, "Time", "Case", activity_attr="Activity", values=train_data.values, convert=False) bohmer_model = bmr.train(train_data) bmr.test(test_data, output, bohmer_model, label="Anomaly", normal_val="0") train_data.convert2int() test_data.convert2int() edbn_model = edbn.train(train_data) edbn.test(test_data, output_edbn, edbn_model, label="Anomaly", normal_val="0") plt.plot_compare_prec_recall_curve([output, output_edbn], ["Likelihood Graph", "EDBN"], save_file=prec_recall) plt.plot_compare_roc_curve([output, output_edbn], ["Likelihood Graph", "EDBN"], roc)
def run(self, version, is_all, week_id): """ Run AD Job :return: """ parameter_function = parameter.parameter(week_id, 0) if str("all") == str(is_all): run_country = parameter_function.AllCountry() elif str("jpn") == str(is_all): run_country = parameter_function.JPN() elif str("chn") == str(is_all): run_country = parameter_function.CHN() elif str("usa") == str(is_all): run_country = parameter_function.USA() else: run_country = parameter_function.Other() run_job = Execute.execute(version) response = run_job.post(run_country) print("1************************") print(response.status_code) if response.status_code is not 200: return None else: return json.loads(response.text)["string"]
def run_ad(self, job_id): """ Monitor run ad number :param job_id: :return: """ para = parameter.parameter(0, job_id) run_parameter = para.go_on_ad_to_order() run_job = Execute.execute(self.version) response = run_job.post(run_parameter) print("1************************") print(response.status_code) if response.status_code is not 200: return None else: return_object = json.loads(json.loads(response.text)["string"]) if int(str(return_object["errorCode"])) == 200: implementation = return_object["data"]["totalAdCount"] # for country in implementation: # print("monitor", country) return implementation else: return None
def is_done(self, job_id): """ Monitor AD Job is over :param job_id: :return: """ para = parameter.parameter(0, job_id) friday_run = para.is_finish() run_job = Execute.execute(self.version) response = run_job.post(friday_run) print("1************************") print(response.status_code) if response.status_code is not 200: return None else: return_object = json.loads(json.loads(response.text)["string"]) if int(str(return_object["errorCode"])) == 200: print( "monitor", str(job_id) + ": " + str(return_object["data"]["isDone"])) return str(return_object["data"]["isDone"]) else: return None
def checkRequestType(self): """Find requested request type and import given request parser.""" if not "request" in self.unparsedInputs: raise MissingParameterValue("request") # test, if one of the mandatory WPS operation is called (via request) # (mandatory operations see WPS_1-0-0 p.4 sect.6.1) if self.unparsedInputs["request"].lower() ==\ self.GET_CAPABILITIES: import GetCapabilities self.requestParser = GetCapabilities.Get(self.wps) self.inputs["request"] = self.GET_CAPABILITIES elif self.unparsedInputs["request"].lower() ==\ self.DESCRIBE_PROCESS: import DescribeProcess self.requestParser = DescribeProcess.Get(self.wps) self.inputs["request"] = self.DESCRIBE_PROCESS elif self.unparsedInputs["request"].lower() ==\ self.EXECUTE: import Execute self.requestParser = Execute.Get(self.wps) self.inputs["request"] = self.EXECUTE else: raise InvalidParameterValue("request", "Unsupported request type '%s'" % self.unparsedInputs["request"])
def getAllTeams(): con = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with con: res = e.execute(con, "select distinct name from teams") print(res) con.close() return res
def Exce(self, url, run_country): run_job = Execute.execute(url) response = run_job.post(run_country) logger.info("1************************") logger.info(response.status_code) if response.status_code is not 200: time.sleep(30) self.Exce(url, run_country) else: return json.loads(response.text)["string"]
def __init__(self, memory: Memory, params=dict()): self.num_threads = int(params["NUM_THREAD"]) if "NUM_THREAD" in params.keys() else NUM_THREADS self.num_stages = int(params["NUM_STAGES"]) if "NUM_STAGES" in params.keys() else NUM_STAGES self.thread_unit = [Thread(tid,params) for tid in range(0, self.num_threads)] self.fetch_unit = [Fetch(tid, memory, params, self.thread_unit[tid]) for tid in range(0, self.num_threads)] # Create fetch unit self.issue_unit = Issue(params) self.execute_unit = Execute(params) self.connect() self.timer = DEFAULT_TIMEOUT # Prefetch self.prefetch_policy = params["PREFETCH_POLICY"] if "PREFETCH_POLICY" in params.keys() else PREFETCH_POLICY self.tid_prefetch_vld = False self.tid_prefetch_ptr = 0 # Verbosity self.timer = DEFAULT_TIMEOUT # Statistics self.last_tick = 0 self.count_flushed_inst = 0 self.ipc = 0 self.total_num_of_mem_access = 0
def getPitchingData(playerID): con = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with con: filtered = "'" + playerID + "'" res = e.execute( con, "select * from Pitching where playerID = " + filtered + ";") print(res) con.close() return res
def getBirthdayBoys(): con = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with con: res = e.execute( con, "select concat(nameFirst, ' ' , nameLast) as name, birthYear as year, playerid from people where birthMonth =" "MONTH(CURDATE()) and birthDay = DAY(CURDATE()) and finalGame like '%2018%' order by debut - finalGame desc;" ) if res == "[]": print("NO INITIAL RESPONSE") res = e.execute( con, "select concat(nameFirst, ' ' , nameLast) as name, birthYear as year, playerid from people where birthMonth =" "MONTH(CURDATE()) and birthDay = DAY(CURDATE()) order by debut - finalGame desc;" ) print(res) con.close() return res
def getPlayerSalaries(playerID): con = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with con: filtered = "'" + playerID + "'" plSal = e.execute( con, "select distinct a.yearID, s.salary, a.salary as 'average' from salaries as s" " join averages as a using(yearID) where playerID = " + filtered + ";") print(plSal) con.close() return plSal
def getPlayerTeams(playerID): connection = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with connection: filtered = "'" + playerID + "'" pl = e.execute( connection, "select distinct teamID as 'Team', count(distinct yearID, playerID) as 'Years'" " from appearances where playerID = " + filtered + " group by teamID;") print(pl) connection.close() return pl
def getAllBatters(): con = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with con: res = e.execute( con, "SELECT concat(nameFirst, ' ', nameLast) as name FROM people order by nameLast" ) print(res) con.close() return res
def getPlayerBadges(playerID): connection = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with connection: filtered = "'" + playerID + "'" pl = e.execute( connection, "select inducted, awardID, a.yearid from halloffame h join awardsplayers a on" " h.playerID = a.playerID where h.playerID = " + filtered + " and inducted ='Y' " "group by awardID, inducted having inducted = 'Y';") print(pl) connection.close() return pl
def getPlayerUrl(playerID): con = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with con: filtered = "'" + playerID + "'" res = e.execute( con, "select url from playerUrls where playerID = " + filtered + ";") # res = json.dumps(json.loads(res).append({"url": "https://pecb.com/conferences/wp-content/uploads/2017/10/no-profile-picture.jpg"})) print(res) con.close() return res
def payment(self, version, week_id): """ Run AD payment :return: """ parameter_function = parameter.parameter(week_id, 0) run_payment = parameter_function.payment() run_job = Execute.execute(version) response = run_job.post(run_payment) if response.status_code is not 200: return None else: return json.loads(response.text)["string"]
def search(search): connection = pymysql.connect(cfg.mysql['host'], cfg.mysql['user'], cfg.mysql['password'], cfg.mysql['db']) with connection: filtered = "" + search + "" plSal = e.execute( connection, "(select distinct concat(nameFirst, ' ', nameLast) as v, playerId as k, 'p' as type from people where concat(nameFirst, ' ' , nameLast) like '%" + search + "%' limit 10) union " "(select distinct name, teamId, 't' from teams where name like '%" + search + "%' limit 10) " "union (select distinct parkname , id, 'f' from parks where parkname like '%" + search + "%' or parkalias like '%" + search + "%' limit 10) ; ") print(plSal) return plSal
def checkRequestType(self, node): """Find requested request type and import given request parser.""" firstTagName = node.tagName if firstTagName.find(self.GET_CAPABILITIES) > -1: import GetCapabilities self.requestParser = GetCapabilities.Post(self.wps) self.inputs["request"] = "getcapabilities" elif firstTagName.find(self.DESCRIBE_PROCESS) > -1: import DescribeProcess self.requestParser = DescribeProcess.Post(self.wps) self.inputs["request"] = "describeprocess" elif firstTagName.find(self.EXECUTE) > -1: import Execute self.requestParser = Execute.Post(self.wps) self.inputs["request"] = "execute" else: raise self.wps.Exceptions.InvalidParameterValue("request")
import Lexer import Parser import Execute if __name__ == '__main__': lexer = Lexer.TokenLexer() parser = Parser.Parser() print('IMP Language') env = {} while True: try: text = input('IMP Language > ') except EOFError: break if text: tree = parser.parse(lexer.tokenize(text)) Execute.Interpret(tree, env)
logger.info(u'Program started') days = {[ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ][i - 1]: i for i in range(1, 8)} Channels = [ '2х2', '5 канал', 'Домашний', 'Звезда', 'Канал Дисней', 'Карусель', 'Матч ТВ', 'Муз ТВ', 'НТВ', 'Первый', 'Пятница', 'РЕН ТВ', 'Россия 1', 'Россия 24', 'СТС', 'СТС ЛАВ', 'ТВ Центр', 'ТВ3', 'ТНТ', 'ТНТ4', 'Че', 'Ю' ] # Выполним sql запросы Execute.executeScriptsFromFile(logger) # -3 дня, поскольку факт рейтингов приходит только через 3 дня start_predict_day = datetime.datetime.today() - datetime.timedelta(days=3) sample = samples.train_sample(logger, start_predict_day) # Получим выборки # Разделим выборку train = sample[sample.blockdate < int( start_predict_day.strftime("%Y%m%d"))] # на тренировочную test = sample[sample.blockdate >= int( start_predict_day.strftime("%Y%m%d"))] # и тестовую # по всем каналам, кроме тех, for ch in [i for i in Channels if i not in ['Канал Дисней', 'Муз ТВ'] ]: # которых еще нет в новом вимбе
# Define NumPixels = 30 PIN_LED = board.D18 # Global const_changeModeDelay = 125 # msec #tempStripe = neopixel.NeoPixel(PIN_LED, NumPixels, 1, False, neopixel.GRB) Config = Conf.Configuration( neopixel.NeoPixel(PIN_LED, NumPixels, brightness=0.2, auto_write=False, pixel_order=neopixel.GRB)) # Modes ColorOffColorOff = Exe.Mode_ColorOffColorOff() ColorFade = Exe.Mode_ColorFade() RainbowFlow = Exe.Mode_RainbowFlow() PackChaser = Exe.Mode_PackChaser() KnightRider = Exe.Mode_KnightRider() Police = Exe.Mode_Police() # millis-lambda def millis(): return int(round(time.time() * 1000)) # Setup if __name__ == "__main__": Config.Stripe.fill(0x000000)
import Execute #Arquivo princial que será executado para saber o que o usuário quer executar. if __name__ == '__main__': option = 1 e = Execute.Functions() print('N-QUEENS PROBLEM') while option != 0: print('Choose an option:') print('1 - Use instance for eight queens') print('2 - Use instance for n queens') print('3 - Quit') option = int(input()) if option == 1: print('Running problem to eight queens') e.controller(1, 8) elif option == 2: number = int(input('Choose a number of queens\n')) print('Running problem to ', number, ' queens') e.controller(2, number) elif option == 3: print('Exiting') option = 0 else: print('Wrong answer. Please, choose a valid option')
def key_cmd(self, key, cmd): stdout = Execute.key_cmd(key=key, cmd=cmd, host="localhost", port=6379, db=0) return stdout
from tkinter import * from PIL import ImageTk, Image import imageio from Prototype import * import pyttsx3 import speech_recognition from Execute import * a = CONVAI() b = Execute() def stream(): try: image = video.get_next_data() frame_image = Image.fromarray(image) frame_image = frame_image.resize((1000, 778), Image.ANTIALIAS) frame_image = ImageTk.PhotoImage(frame_image) l1.config(image=frame_image) l1.image = frame_image l1.after(delay, lambda: stream()) except: video.close() return window = Tk() window.geometry("2000x1200") window.title('Conversational AI') window["bg"] = "darkgreen"
def schedule_exec_file(self, script, test_name, result_query, test_detail, exec_time, log_file): stdout = Execute.schedule_exec_file(script=script, test_name=test_name, result_query=result_query, test_detail=test_detail, exec_time=exec_time, log_file=log_file) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def schedule_key_exec(self, key, test_name, result_query, test_detail, exec_time, log_file): stdout = Execute.schedule_exec_key(key=key, test_name=test_name, result_query=result_query, test_detail=test_detail, exec_time=exec_time, log_file=log_file, host="0.0.0.0", port=6379, db=0) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def key_exec(self, key, test_name, result_query, test_detail, log_file): stdout = Execute.exec_key(key=key, test_name=test_name, result_query=result_query, test_detail=test_detail, log_file=log_file, host="localhost", port=6379, db=0) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def set_user(self, user): stdout = Execute.set_user(user, db_host="localhost", port=6379, db=0) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def set_password(self, password): stdout = Execute.set_user_pass(password, db_host="localhost", port=6379, db=0) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def set_port(self): stdout = Execute.set_remote_port(db_host="localhost", db_port="6379", db=0) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
def tally_count(self, logfile, resultfile, test_pass=True, test_fail=True): stdout = Execute.tally_counter(logfile, resultfile, test_pass=True, test_fail=True) data = "{0}".format(stdout) d = data.replace('\\n', " ") return d
bestLoss = 1.0 bestEpoch = 0 if opts['r'] == 'y': try: lstm.restore() bestLoss = pk.load(open(Constants.modelDir + 'bestLoss.p', 'rb')) bestEpoch = pk.load(open(Constants.modelDir + 'bestEpoch.p', 'rb')) print("\nMODEL LOADED (Loss: {})".format(bestLoss)) except Exception: print(""" ERROR: Unable to restore model. Does a stored model exist? Have you changed the LSTM architecture? """) sys.exit() # ##################################################################### # TRAIN MODEL # ##################################################################### if opts['t'] == 'y': Execute.train(lstm, offlineData, bestEpoch, bestLoss) # ##################################################################### # SIMULATE PREDICTIONS # ##################################################################### if opts['s'] == 'y': Execute.simulate(lstm, onlineData, prices, Constants.ticker)
import Execute Instance = Execute.Execution() Accuracy, FinalList = Instance.excute() Result = Instance.FindUsers([ 'n', 'n', 'y', 'n', 'y', 'n', 'n', 'n', 'n', 'p', 'n', 'n', 'B', 'B', 'A' ], FinalList) print(Result)
def start(): if var1.get() == '' or var2.get() == '': tkinter.messagebox.askokcancel('啊欧', '用此功能,图片以及文件夹的路径请完整填写') tabControl.select(0) #跳回第一个标签 pass else: #路径合法,可以执行 label3.config(text="正在处理……") Init_dog() #初始化图像显示 delButton(tree) #清空表列 showPic1() #预览所找图 time_start = time.time() #time.time()为1970.1.1到当前时间的毫秒数 #搜图开【新进程没啥用】 if numberChosen.current() == 0: if var5.get() == 1: ''' th1=threading.Thread(target=Execute.startSearch01(var1,var2,var3,tree,x,root)) th1.start() button_img.configure(state='disabled') #万一时间长,防止按钮连续点击 th1.join() button_img.configure(state='normal') ''' Execute.startSearch01(var1, var2, var3, tree, x, root) #单层文件夹 else: Execute.startSearch0(var1, var2, var3, tree, x, root) #多层文件夹 elif numberChosen.current() == 1: Execute.startSearch1(var1, var2, var3, tree, x, root) elif numberChosen.current() == 2: Execute.startSearch2(var1, var2, var3, tree, x, root) elif numberChosen.current() == 3: Execute.startSearch3(var1, var2, var3, tree, x, root) elif numberChosen.current() == 4: Execute.startSearch4(var1, var2, var3, tree, x, root) elif numberChosen.current() == 5: Execute.startSearch5(var1, var2, var3, tree, x, root) elif numberChosen.current() == 6: Execute.startSearch6(var1, var2, var3, tree, x, root) else: tkinter.messagebox.askokcancel('啊欧', '算法未找到,也许正在制作中') return time_end = time.time() #time.time()为1970.1.1到当前时间的毫秒数 label3.config(text="耗时" + str(round(time_end - time_start, 3)) + '秒') #显示耗时 #自动筛选 if (var4_1.get() == 1): sieve()
class Pipeline: # arg def __init__(self, memory: Memory, params=dict()): self.num_threads = int(params["NUM_THREAD"]) if "NUM_THREAD" in params.keys() else NUM_THREADS self.num_stages = int(params["NUM_STAGES"]) if "NUM_STAGES" in params.keys() else NUM_STAGES self.thread_unit = [Thread(tid,params) for tid in range(0, self.num_threads)] self.fetch_unit = [Fetch(tid, memory, params, self.thread_unit[tid]) for tid in range(0, self.num_threads)] # Create fetch unit self.issue_unit = Issue(params) self.execute_unit = Execute(params) self.connect() self.timer = DEFAULT_TIMEOUT # Prefetch self.prefetch_policy = params["PREFETCH_POLICY"] if "PREFETCH_POLICY" in params.keys() else PREFETCH_POLICY self.tid_prefetch_vld = False self.tid_prefetch_ptr = 0 # Verbosity self.timer = DEFAULT_TIMEOUT # Statistics self.last_tick = 0 self.count_flushed_inst = 0 self.ipc = 0 self.total_num_of_mem_access = 0 # Connect between the class's def connect(self): # Fetch Unit for tid in range(0, self.num_threads): self.fetch_unit[tid].thread_unit = self.thread_unit[tid] # Issue Unit # - thread_unit - Checks thread info and dependency # - fetch_unit - check the instruction inside the fetch # - Execute - pass the instruction to execute unit self.issue_unit.thread_unit = self.thread_unit self.issue_unit.fetch_unit = self.fetch_unit self.issue_unit.execute_unit = self.execute_unit # Execute # - thread_unit - TBD # - issue_unit - update in case of flush # - fetch_unit - update in case of flush self.execute_unit.thread_unit = self.thread_unit self.execute_unit.issue_unit = self.issue_unit self.execute_unit.fetch_unit = self.fetch_unit # The main function that happens every cycle and responsible on the progress of the pipeline. def tick(self, cur_tick): # Checking if all threads and units are finished there execution if self.check_done(): return False # Update Execute self.execute_unit.tick(cur_tick) # Update Issue self.issue_unit.tick(cur_tick) # Select which thread will prefetch self.set_prefetch(cur_tick) # Progress Fetch for idx in range(0, self.num_threads): self.fetch_unit[idx].tick(cur_tick) # Update simulation statistics self.update_statistics(cur_tick) self.trace_tick(cur_tick) return True # Used as trace of simulator def trace_tick(self, cur_tick): prefetch_id = self.tid_prefetch_ptr if self.tid_prefetch_vld else "x" fetch_sts = [str(self.fetch_unit[i].fetchQueue.len()) for i in range(0, self.num_threads)] issue_sts = self.issue_unit.get_status() execute_sts = self.execute_unit.get_status() thread_sts = [" t"+str(i)+": "+str(self.thread_unit[i].ready)+",af-"+str(int(self.fetch_unit[i].branch_taken_in_queue)) for i in range(0, self.num_threads)] msg = "{0:<5},{1:^5},{2},{3:^15}, {4:^35} \t, {5}".format( cur_tick, prefetch_id, ",".join(fetch_sts), issue_sts, execute_sts, ",".join(thread_sts)) pprint(msg, "NORM") # Check Between all thread, who is legit for fetching def set_prefetch(self, cur_tick): self.tid_prefetch_vld = False req_list = [self.fetch_unit[tid].check_prefetch() for tid in range(self.num_threads)] # update based on the policy of prefetch - changes tid_prefetch_ptr if it is needed self.update_prefetch_policy() self.tid_prefetch_ptr = round_robin(self.tid_prefetch_ptr, req_list, self.num_threads) if req_list[self.tid_prefetch_ptr]: self.fetch_unit[self.tid_prefetch_ptr].set_prefetch(cur_tick) self.tid_prefetch_vld = True # --------------- Policies ---------------# def update_prefetch_policy(self): if self.prefetch_policy == "RR": pass # Check if all units are done def check_done(self): # Check all fetch units are done = last inst + no pending inst + queue is empty fetch_done = all([self.fetch_unit[i].fetch_done() for i in range(0, self.num_threads)]) issue_done = self.issue_unit.issue_empty execute_done = self.execute_unit.done() timeout_done = self.timer == 0 return (fetch_done and issue_done and execute_done) or timeout_done # --------------- Statistics ---------------# def update_statistics(self, cur_tick): # count how many valid instruction committed self.timer -= 1 self.last_tick = cur_tick if not self.execute_unit.committed_inst.empty_inst: self.timer = DEFAULT_TIMEOUT else: self.timer -= 1 if self.last_tick: # Avoid division by zero self.ipc = float(self.execute_unit.count_committed_inst / self.last_tick) self.count_flushed_inst = self.issue_unit.count_flushed_inst + self.execute_unit.count_flushed_inst +\ sum([self.fetch_unit[idx].flushed_inst_count for idx in range(0, self.num_threads)]) self.total_num_of_mem_access = sum([self.fetch_unit[idx].num_of_mem_access for idx in range(0, self.num_threads)]) def report_model(self): for tid_idx in range(0, self.num_threads): self.fetch_unit[tid_idx].report_statistics() self.issue_unit.report_model() self.execute_unit.report_model() print("Num Thread={0}, stage={1}".format( self.num_threads, self.execute_unit.num_stages)) def report_statistics(self): msg = "Inst Committed {0} ipc {1:.3f} flushed {2} mem accesses {3}".format( self.execute_unit.count_committed_inst, self.ipc, self.count_flushed_inst, self.total_num_of_mem_access) pprint(msg, "NONE")