def test_trend_predictor(): h = 35 anal = Analyzer(dax_hist, min_trend_h=h) vali_anal = Analyzer(History("GER30", 2017), min_trend_h=h) #arti_anal = Analyzer(ArtificialHistory(dax_hist, out_len=1000000, min_trend_h=h), min_trend_h=h) anal.build_trend_predictor( 7, validation_data=vali_anal.trend_list, )
def test_max_profit(): anal = Analyzer(dax_hist, min_trend_h=80, fast=True) prof, trades = anal.get_max_profit(5) print(prof) print(len(trades)) print(prof / len(trades))
def wacot_analyze(args): analyzer = Analyzer.Analyzer() analyzer.compute_article_contributions() analyzer.compute_category_contributions() analyzer.compute_bot_flags() analyzer.count_article_contributions() analyzer.count_category_contributions()
def alert_management(Systolic_BP, Diastolic_BP, Heart_Rate, Heart_O2_Level, Body_temp): new_alert = analysis.Analyzer(Systolic_BP, Diastolic_BP, Heart_Rate, Heart_O2_Level, Body_temp) if (new_alert.Shock_Alert(int(Heart_Rate), int(Body_temp)) is True or\ new_alert.Signal_Loss(int(Heart_Rate), int(Body_temp)) is True or\ new_alert.Oxygen_Supply(int(Heart_O2_Level)) is True or\ new_alert.Fever(int(Body_temp)) is True or\ new_alert.Hypotension(int(Systolic_BP), int(Diastolic_BP)) is True or\ new_alert.Hypertension(int(Systolic_BP), int(Diastolic_BP)) is True): print('\n\033[1;31;40m|Alert|\033[0m') else: print( '\033[1;32mAll the vital signs were within normal limits.\033[0m\n' ) if new_alert.Shock_Alert(int(Heart_Rate), int(Body_temp)) is True: print('\033[1;31;40mShock_Alert!\033[0m') if new_alert.Signal_Loss(int(Heart_Rate), int(Body_temp)) is True: print('\033[1;31;40mWarning: Signal_Loss!\033[0m') if new_alert.Oxygen_Supply(int(Heart_O2_Level)) is True: print('\033[1;31;40mPlease Increase Oxygen_Supply!\033[0m') if new_alert.Fever(int(Body_temp)) is True: print('\033[1;31;40mFever!\033[0m') if new_alert.Hypotension(int(Systolic_BP), int(Diastolic_BP)) is True: print('\033[1;31;40mHypotension\033[0m') if new_alert.Hypertension(int(Systolic_BP), int(Diastolic_BP)) is True: print('\033[1;31;40mHypertension\033[0m')
def formUp(self, vis=False, lim=True): self.update = True ind = np.arange(self.N - 1) + 1 #need to initialize the first particle into the collection x = self.ptcl.copy() #important to rotate first, otherwise it will rotate about a non-central axis x.rotate(self.os[0, 0], axis='x') x.rotate(self.os[0, 1], axis='y') x.rotate(self.os[0, 2], axis='z') x.translate(self.ps[0]) arr = x # can do the rest in a loop for i in ind: x = self.ptcl.copy() x.rotate(self.os[i, 0], axis='x') x.rotate(self.os[i, 1], axis='y') x.rotate(self.os[i, 2], axis='z') x.translate(self.ps[i]) # adds each new particle to the large collection arr arr = merge(arr, x) self.array = arr if (vis): self.array.visualize() #computing shielding tensors an = Analyzer(arr, self.eta, limit=lim) z = an.zeta() gs = an.getShielding() self.gs = gs self.z = z
def solve(self): old_time = datetime.datetime.now() self.bfsUtil() new_time = datetime.datetime.now() return Analyzer(self.matrix, self.pathCount, self.nodeExplored, len(self.queue), (new_time - old_time).total_seconds(), self.isMazeSolved, 'BFS', self.probability, self.size)
def autocor_trend_mom(): anal16 = Analyzer(dax_hist, min_trend_h=5, realistic=False) moms = np.asarray([abs(trend.momentum) for trend in anal16.trend_list]) Plotter.sm_autocor(moms)
def __init__(self, events): super().__init__(events) self.analyzer = Analyzer() self.processing = None self.fig = None self.ax = None self.input_url = None self.output_path = None
def run(replications): SEED = 42 outAnal = Analyzer.Analyzer(["D1", "D2", "D3", "D4"]) for i in xrange(replications): random.seed(SEED) print('EMS! Run %i of %i' % (i + 1, replications)) outAnal.addData(_runRep(DETAIL)) SEED = random.random() outAnal.run(True)
def main(dic): dic = { "ID": 1, "age": 22, "gender": 'Male', "heartrate": random.randint(50, 100), "Diastolic_BP": random.randint(40, 110), "Systolic_BP": random.randint(70, 160), "blood_oxygen": random.randint(50, 100), "temperature": random.randint(34, 39), "time": time.ctime() } #input module: patient = input_api.input_api(dic["ID"], dic["age"], dic["gender"], dic["heartrate"], dic["Diastolic_BP"], dic["Systolic_BP"], dic["blood_oxygen"], dic["temperature"], dic["time"]) data1 = patient.return_request(1) patient.return_request(2) print("Patient Data:") print(data1) #Analyze module: data = Analyzer.Analyzer(dic["Systolic_BP"], dic["Diastolic_BP"], dic["heartrate"], dic["blood_oxygen"], dic["temperature"]) Signal_Loss = data.Signal_Loss(dic["heartrate"], dic["temperature"]) Shock_Alert = data.Shock_Alert(dic["heartrate"], dic["temperature"]) Oxygen_Supply = data.Oxygen_Supply(dic["blood_oxygen"]) Fever = data.Fever(dic["temperature"]) Hypotension = data.Hypotension(dic["Systolic_BP"], dic["Diastolic_BP"]) Hypertension = data.Hypertension(dic["Systolic_BP"], dic["Diastolic_BP"]) #Database: database = Database_Module.DataBaseModule() # print(authenDB.get("admin")) # database.auth(authenDB(), authenDB.get("admin")) database.insert(1, data1) ##AI_module AI = AI_module.AI_module(dic["ID"], data1) Blood_oxygen, heartate, Systolic, Diastolic = AI.Query_Data_From_Database() heartrate_predict_result, oxygen_predict_result, Diastolic_predict_result, Systolic_predict_result = AI.AI_Module( Blood_oxygen, heartate, Systolic, Diastolic) Predict_Hypertension_Alert, Predict_Hypotension_Alert, Predict_Shock_Alert, Predict_Oxygen_Alert = AI.Feedback( heartrate_predict_result, oxygen_predict_result, Diastolic_predict_result, Systolic_predict_result) ##Output OutputAlert_module.display_AI_iuput_data(dic["ID"], data1, Blood_oxygen, heartate, Systolic, Diastolic) OutputAlert_module.receive_basic_iuput_data( Signal_Loss, Shock_Alert, Oxygen_Supply, Fever, Hypotension, Hypertension, Predict_Hypertension_Alert, Predict_Hypotension_Alert, Predict_Shock_Alert, Predict_Oxygen_Alert)
def test_trend_forecasting_model(test_years, min_trend_h, model_years=None, model_hist=None, strict_mode=False, mode="avg"): if not model_years and not model_hist: raise Exception( "You must provide a model history or year for a model history!" ) if model_years: if type(model_years) is not list: model_years = list(model_years) if len(model_years) > 2: model_years = [model_years[0], model_years[-1]] if type(test_years) is not list: test_years = list(test_years) if len(test_years) > 2: test_years = [test_years[0], test_years[-1]] if model_hist: anal = Analyzer(model_hist, min_trend_h=min_trend_h, realistic=False) else: h = History("GER30", *model_years) anal = Analyzer(h, min_trend_h=min_trend_h, realistic=False) anal.get_intern_trend_prediction_error(p=True, use_strict_mode=strict_mode, mode=mode) test_anal = Analyzer(History("GER30", *test_years), min_trend_h=min_trend_h, realistic=False) anal.get_extern_trend_prediction_error(test_anal.trend_list, p=True, use_strict_mode=strict_mode, mode=mode)
def solve(self): old_time = datetime.datetime.now() self.dfsUtil() new_time = datetime.datetime.now() print((new_time - old_time).total_seconds()) print(self.nodeExplored) print(self.isMazeSolved) return Analyzer(self.matrix, self.pathCount, self.nodeExplored, len(self.stack), (new_time - old_time).total_seconds(), self.isMazeSolved, 'DFS', self.probability, self.size)
def main(): #Prints to signify it is running print("Started") #Starts the DataGrabber to load information to a file for stock to pull from stock = Stock("ko") #Loads the stock so it will have the needed information to analyze #stock = Stock("ko", years) #Will run the calculations so we can pull the results without #cluttering up this class which will interpret them processedData = Analyzer(stock)
def __init__(self): super(main, self).__init__() self.logger = logging.getLogger(self.__class__.__name__) self.logger.info('initializing Self-Adaptive System') KB = KnowledgeBase.KnowledgeBase() KB.loadGoalModel('car-wheel.json') E = Executor.Executor(KB) P = Planner.Planner(KB, E) A = Analyzer.Analyzer(KB, P) M = Monitor.Monitor(KB)
def solve(self): old_time = datetime.datetime.now() print(old_time) self.dfsUtil() new_time = datetime.datetime.now() print(new_time) print((new_time - old_time).total_seconds()) print("********************") print(self.nodeExplored) return Analyzer(self.matrix, 0, self.nodeExplored, len(self.stack), (new_time - old_time).total_seconds(), self.isMazeSolved)
def test_trend_forecast(): analyzer = Analyzer(dax_hist, min_trend_h=82) plen, ph = 0, 0 d_len_sum, d_h_sum = 0, 0 delta_len_percentual, delta_h_percentual = 0, 0 delta_len_percentual_sum, delta_h_percentual_sum = 0, 0 for trend in analyzer.trend_list: print("Len: {} \t\t\t\t\t\t\tHeight: {}".format( trend.len, trend.height)) d_len, d_h = abs(plen - trend.len), abs(ph - trend.height) d_len_sum += d_len d_h_sum += d_h_sum if not (d_len == 0 and d_h == 0): delta_len_percentual, delta_h_percentual = abs( d_len / trend.len), abs(d_h / trend.height) delta_len_percentual_sum += delta_len_percentual delta_h_percentual_sum += delta_h_percentual print("Diff len: {:12.2f}\t\t\t\tDiff height: {:12.2f}".format( d_len, d_h)) print( "Diff len percentual: {:12.2f} % Diff height percentual: {:12.2f} %" .format(delta_len_percentual * 100, delta_h_percentual * 100)) plen, ph = analyzer.predict_next_trend(trend, similarity_threshold=0.5) if not (plen == 0 and ph == 0): print( "\n Pred len: {:12.2f} Pred height: {:12.2f}" .format(plen, ph)) print() print("Avg d len: {:12.2f} Avg d h: {:12.2f}".format( d_len_sum / (len(analyzer.trend_list) - 1), d_h_sum / (len(analyzer.trend_list) - 1))) print( "Avg d len percentual: {:12.2f} % Avg d h percentual: {:12.2f} %". format( 100 * delta_len_percentual_sum / (len(analyzer.trend_list) - 1), 100 * delta_h_percentual_sum / (len(analyzer.trend_list) - 1)))
def wacot_import(args): importer = Importer.Importer() if args.from_dumps == 'all' or args.from_dumps == 'xml': importer.import_xml() if args.from_dumps == 'all' or args.from_dumps == 'cat': importer.import_categories() if not args.only_import: analyzer = Analyzer.Analyzer() analyzer.compute_article_contributions() analyzer.compute_category_contributions() analyzer.compute_bot_flags() analyzer.count_article_contributions() analyzer.count_category_contributions()
def solve(self): old_time = datetime.datetime.now() self.aStarUtil() new_time = datetime.datetime.now() print((new_time - old_time).total_seconds()) print(self.nodeExplored) print(self.isMazeSolved) return Analyzer(self.matrix, self.nodeExplored, self.nodeExplored, self.queue.qsize(), (new_time - old_time).total_seconds(), self.isMazeSolved, 'ASTAR', self.probability, self.size)
def anal_trends(): anal16 = Analyzer(dax_hist) plotter = Plotter(anal16) trends = anal16.get_trends(anal16.hist, min_trend_h=5, realistic=False) trend_heights = [abs(x.height) for x in trends] trend_len = [abs(x.len) for x in trends] mom = [abs(x.height / x.len) * 10 for x in trends] plotter.plot_general_same_y(list(range(len(trends))), [trend_heights, trend_len, mom], x_label="Trends", y_labels=["heights", "lens", "momentum"])
def test_trend_forecasting_ability(n=20, min_trend_h=50): np.set_printoptions(suppress=True) avg_rand_difference = 0 anal = Analyzer(dax_hist, min_trend_h=min_trend_h) r = len(anal.trend_list) - 1 for _ in range(r): random_list_a = (np.random.random(n) * 2) - 1 random_list_b = (np.random.random(n) * 2) - 1 avg_rand_difference += np.sum( np.absolute(random_list_a - random_list_b)) / n avg_rand_difference /= r print("Avg rand difference: ", avg_rand_difference) # ######################################## avg_avg_sim_difference = 0 for base_trend in anal.trend_list[:-1]: base_similar_trend_containers, base_sum_sim = anal.get_similar_trends( base_trend, n, -1) next_similar_trend_containers, next_sum_sim = anal.get_similar_trends( base_trend.next_trend, n, -1) base_similarities = np.asarray( [container.sim for container in base_similar_trend_containers]) next_similarities = np.asarray( [container.sim for container in next_similar_trend_containers]) avg_avg_sim_difference += np.sum( np.absolute(base_similarities - next_similarities)) / n avg_avg_sim_difference /= r print("Average sim difference: ", avg_avg_sim_difference) print("Average following trend similarity:", anal.get_avg_following_trend_similarity()) return avg_rand_difference, avg_avg_sim_difference
def run_all_algs(stock): import sys sys.path.append("/Users/carlsoncheng/PycharmProjects/grahamBot") import Analyzer graham = Analyzer.Analyzer(stock) graham.earn_inc_by_33_percent_test() graham.positive_earnings_test() graham.twenty_year_div_record_test() graham.shareholder_equity_to_total_assets() graham.long_term_debt_less_than_net_current_assets() graham.curr_ratio_greater_than_2() graham.long_term_debt_less_than_2x_shareholder_equity() graham.ttm_average_pe_less_than_20() graham.price_to_seven_year_earnings_ratio_less_than_25() graham.price_to_3_year_earnings_less_than_15() graham.pb_ratio_less_than_1_point_5() graham.graham_number()
def _runRep(status): # Setup and start the simulation env = simpy.Environment() #make emergency responder resources #ems = Dispatcher.SimpleDispatcher(env, makeResponders(env, STATION, NUM_AMB, status), status) ems = Dispatcher.SimpleDispatcher( env, makeResponders(env, STATION, NUM_AMB, status), status) calls = IncidentGenerator.IncidentGenerator(env, ems, TBA, SIM_DURATION, END_BUFFER, HOSPITAL, status) horizBar = "----------------------------------------------------------------------" if (status): print(horizBar) #run simulation #env.run(until=SIM_DURATION) env.run() #print incident summary table if (status): print(horizBar) #analysis columns = ["D1", "D2", "D3", "D4"] analysis = Analyzer.Analyzer(["D1", "D2", "D3", "D4"]) tempDict = dict() for inc in calls.incidentList: if (status): print(inc.toString()) incTimes = inc.getTimesList() analysis.addData(incTimes.difList) tempDict.update({ len(tempDict): {col: incTimes.difList[i] for i, col in enumerate(columns)} }) analysis.run(status) return ([i.mea for i in analysis.stats])
def main(): """ for (dirname, dirs, files) in os.walk("C:\\Users\\anivr\\Desktop\\AutoDownloadNew"): for file in files: if str(file).endswith("py"): try: taint_analyzer = Analyzer.Analyzer() taint_analyzer.http_finder(dirname + "\\" + file) with open("VulnerabilityResults2.txt", "a+") as outFile: outFile.write(dirname + "\\" + file + "\n") with open("VulnerabilityResultsTemp.txt") as readFile: outFile.write(readFile.read()) del taint_analyzer except Exception: print("There was an error: " + str(Exception)) """ ''' folders = [] for (dirname, dirs, files) in os.walk("C:\\Users\\anivr\\Desktop\\AutoDownload"): folders = dirs for x, f in enumerate(folders): folders[x] = dirname + "\\" + f break for folder in folders: print("FOLDER : " + folder) print("===================================") try: taint_analyzer = Analyzer.Analyzer() taint_analyzer.main(folder) with open("VulnerabilityResults.txt", "a+") as outFile: with open("VulnerabilityResultsTemp.txt") as readFile: outFile.write(readFile.read()) # print(str(taint_analyzer.S / (taint_analyzer.S + taint_analyzer.F))) del taint_analyzer except Exception as err: print("There was an error : " + "[" + str(folder) + "]" + str(err)) traceback.print_exc() ''' taint_analyzer = Analyzer.Analyzer() # taint_analyzer.main("C:\\Users\\anivr\\Desktop\\AutoDownload\\plugin.programm.xbmcmail-0.1.0\\plugin.programm.xbmcmail") taint_analyzer.main("C:\\Users\\anivr\\Desktop\\AutoDownload\\xbmc-mailnotifier-0.2.10\\xbmc-mailnotifier-0.2.10")
def backtest_trend_pred(h=35, mode="avg"): t = Trader.DEFAULT training_hist = ArtificialHistory(History("GER30", 2017), out_len=1000000, min_trend_h=h) anal = Analyzer(training_hist, min_trend_h=h) trend_pred = strat_dict["trend pred"]( anal, pred_mode=mode, similarity_threshold=0.1, pred_percentage_buffer=0.9, pred_abs_buffer=25, number_of_similar_trends_used=None, min_trends_used=0) bt1 = Backtester(trend_pred, dax_hist, use_balance=True, asset_data=AssetData.GER30, trader_data=t) #, sl_in_pips=114, ts_in_pips=106) # bt1.deep_test(deepness=100) # bt1.sl_in_pips = bt1.optimize_sl(p=True) # bt1.ts_in_pips = bt1.optimize_ts(p=True) bt1.test() #bt1.print_trades() # todo spread does not seem to adjust to positions size and is always 5€ per trade bt1.plot_trades( crosshair=False, plot_trends=True, min_trend_h=h, # indicators=[[1, TrendIndicator, h]], bar_amount=10000)
def __init__(self): self.board = None self.analyzer = Analyzer.Analyzer() self.analyzer.sd_limit = 10 self._log = None self._counter = 1
import Reader import Analyzer import help_func if 1: # Read data for patient Aksel. akselReader = Reader.Reader('.') akselReader.readData() akselReader.createCGMStructure() akselReader.createCarbStructure() akselReader.createBolusStructure() akselReader.createBasalRateStructure() akselAnalyzer = Analyzer.Analyzer('Aksel') # Find all in range dfCGM = akselReader.dfCGM # Detta är två samma värdne, inte kopior! dfBolus = akselReader.dfBolus dfBasal = akselReader.dfBasal timeAboveRange_per, timeBelowRange_per, timeInRange_per = akselAnalyzer.calcTimeInRange( dfCGM) timeAboveTarget_per, timeBelowTarget_per, timeInTarget_per = akselAnalyzer.calcTimeInTarget( dfCGM) stdCGM = akselAnalyzer.calcStdCGM(dfCGM)
def __init__(self): # print "DAK_Optimizer Powered by DAK" self.analyseur = Analyzer(self)
} # 100% tir tirCorrect100 = 1 PGS_correct100 = 14.2031 GVP_correct100 = 2.0622 dfCGMTL = pd.DataFrame(data100) fileNameEntries = '' fileNameTreatments = '' timeStrTreatments = 'timestamp' patientName = 'namn' readerTL = Reader.Reader(patientName, fileNameEntries, fileNameTreatments, timeCGMStableMin, 'timestamp', True, dfCGMTL, dfInsulin, numDayNight, booleanWholeDayNight) analyzerTL = Analyzer.Analyzer(patientName, readerTL) #analyzerTL = Analyzer.Analyzer(name, numDayNight, booleanWholeDayNight, dfCGMTL, dfInsulin, timeCGMStableMin); tirTest100 = analyzerTL.calcTimeInXNew(dfCGMTL, analyzerTL.tirLevel, '[]') # [3.9 10] PGSTest100 = analyzerTL.cgmPGS GVPTest100 = analyzerTL.cgmGVP data60 = { 'dateTime': [ dt.datetime(2019, 10, 12, 20, 55, 39), dt.datetime(2019, 10, 12, 19, 50, 38), dt.datetime(2019, 10, 12, 18, 45, 39), dt.datetime(2019, 10, 12, 17, 40, 39), dt.datetime(2019, 10, 12, 16, 35, 38), dt.datetime(2019, 10, 12, 15, 30, 39) ],
dfCGM = reader2.dfCGM dfInsulin = reader2.dfInsulin reader2Simple = Reader.Reader(patientName=name, fileNameEntries=ef, fileNameTreatments=tf2, timeCGMStableMin=timeCGMStableMin, timeStrTreatments='timestamp', runSimple=True, dfCGMIn=dfCGM, dfInsulinIn=dfInsulin, numDayNightIn=1, booleanWholeDayNightIn=True) analyzer2 = Analyzer.Analyzer( name, reader2Simple ) #.numDayNight, reader2.booleanWholeDayNight, reader2.dfCGM, reader2.dfInsulin, timeCGMStableMin); dfInsulinTest = reader2.dfInsulin length_dfInsulin_test = len(dfInsulinTest) length_dfTreatments_test = len(reader2.dfTreatments) tddTest = analyzer2.tdd # readData removes Sensor start due to timestamp is nan length_dfTreatments_correct = 16 length_dfInsulin_correct = 16 tddCorrectBolus = 0.05 + 0.05 + 0.55 + 0.75 + 0.7 + 0.6 rateTemp = np.array([ 0.00, 0.00, 0.95, 0.95, 0.95, 0.95, 0.95, 0.45, 0.45, 0.45, 0.45, 0.45, 0.45, 0.30, 0.30, np.nan
def OnStart(self,event): #主程序启动 pp.preprocess() ana.Analyzer() post.CalculatedPostProcess('temp.txt') frame = Result(parent=None, id=-1) frame.Show()