def spectrum(self): position_to_analysis = int(self.comboBox.currentText()) model_for_analysis = POSITION_FOR_ANALYSIS.get(position_to_analysis) analysis = Analysis(model_for_analysis) delta_t = 0.001 # if self.input_delta_t.get(): # delta_t = float(self.input_delta_t.get()) # else: # delta_t = 0.001 analysis.set_delta_t(delta_t) model = analysis.calculation_fourier_transform() n = model.n model.display_n = int(n / 2) model.x = model.x[:model.display_n] model.y = model.y[:model.display_n] place_to_show = int(self.comboBox_2.currentText()) POSITION_FOR_ANALYSIS[place_to_show] = model self.main_window.show_graph(model, place_to_show, normalisation=True) self.close_window()
def set_option(self): "Set options" Analysis.set_option(self) if not self.options.variable and not self.options.dry_run: error("Please provide me with the variable") sys.exit(1)
def click_button_fourier_transform(self, window): if self.check_empty_combobox_graph(): return analysis_model = self.get_model() if self.input_delta_t.get(): delta_t = float(self.input_delta_t.get()) else: delta_t = 0.001 analysis = Analysis(analysis_model) analysis.set_delta_t(delta_t) model = analysis.calculation_fourier_transform() n = model.n model.display_n = int(n / 2) place_of_graph = self.c2.get() self.set_graph(model, place_of_graph) # model.normalization() # self.graph_array.append(model) self.draw_graph(model) window.destroy()
def fetch_and_store_data(url): work_url = 'https://eu-trade.naeu.playblackdesert.com/Trademarket/GetWorldMarketList' # Dev V2 will just focus on sending a proper API request to the website # Dev V3 will focus on the group search and saving methods, and making sure everything runs just right # Using the API interaction module, it fetches and returns a raw message from the endpoint # Decodes the raw returned message into a processable data-type using the Analysis module # Open reference file to translate IDs with cm.ContextManager(f"{abspath}/data/mp_reference.json") as file: use_file = json.loads(file.read()) # List of item-groups as they are organised in bdo-mp # TO BE MOVED TO ITS OWN FILE group_list = [(1,"Main Weapons"),(5,"Sub Weapons"),(10,"Awakening Weapons"),(15,"Armors"),(20,"Accessories"),(25,"Materials"),(30,"Enchancements/Upgrades"),(35,"Consumables"),(40,"Life Tools"),(45,"Alchemy Stones"),(50,"Crystals"),(65,"Mount"),(70,"Ship"),(75,"Wagon"),(80,"Furniture")] sub_lists = [17,15,22,6,4,8,2,8,10,4,7,13,9,6,9] request_session = requests.Session() request_session.headers = use_headers with request_session as session: for i in group_list: sub_range = sub_lists[group_list.index(i)] final_print = [] for x in range(1,sub_range+1): # rawest_msg = (api_requests.Api_Request.sub_class_request(url=work_url,mainkey=i[0],subkey=1,session=session).content).content # Step 1: (fetches the info from the mp for the currently selected group, selecting the group with the value from "group_list") raw_msg = json.loads((api_requests.Api_Request.sub_request(mainKey=i[0],subKey=x,session=session).content))['resultMsg'] # Step 2: (decodes the message and converts it to usable data) decoded_msg = analysis.decode_msg(raw_msg) # This is how you find the index of a value in a list for a in use_file: for item in decoded_msg: if item[0] == a[0]: final_print.append((a,item[1:])) else: pass # print(item) """for a in decoded_msg: # This sorts through all the item references in the json file and match them to the api returned data # by ID, and return both data in a tuple for item in use_file: if item[0] == a[0]: final_print.append((item,a[1:]))""" # THIS SEGMENT IS GOING TO BE MOVED TO SAVE_DATA MODULE to_save_json = analysis.reformat_sub_group(final_print) to_save_json = json.dumps(to_save_json,indent=4) date_today = str(datetime.date.today()) folder_index = group_list.index(i)+1 with open(f"{abspath}/data/group_{folder_index}/daily/{date_today}.json","w") as file: file.write(to_save_json) # Progress bar progress_percent = ((group_list.index(i)+1) / (len(group_list))) * 100 print(f"Completion: {progress_percent}% - {i[1]}", end='\r')
def play_video(info): history.add(info) analysis = Analysis(info=info) video = analysis.get_video() logger = logging.getLogger(__name__) logger.debug(video) subtitle = analysis.get_ass_path() Player().play(video, subtitle, info['title'])
def fast_fourier_transform(self): position_to_analysis = int(self.comboBox.currentText()) model_for_analysis = POSITION_FOR_ANALYSIS.get(position_to_analysis) analysis = Analysis(model_for_analysis) model = analysis.spectrum() place_to_show = int(self.comboBox_2.currentText()) POSITION_FOR_ANALYSIS[place_to_show] = model self.main_window.show_graph(model, place_to_show, normalisation=True) self.close_window()
def test_get_analysis_lambda_for_reduce(self, mock_analysis_record): # set up input data structure obtained after transformation and aggregation input_data_structure = { 'rule': [{ 'key': False, 'func_name': 'Max', 'input_field': 'traffic' }, { 'key': False, 'func_name': 'Max', 'input_field': 'ip_size' }, { 'key': False, 'func_name': 'Sum', 'input_field': 'ip_size_sum' }], 'operation_type': 'reduceByKey' } # set up structure of config config = TestConfig({ "historical": { "method": "influx", "influx_options": { "measurement": "mock" } }, "alert": { "method": "stdout", "option": {} }, "time_delta": 20, "accuracy": 3, "rule": { "ip_size": 5, "ip_size_sum": 10, "traffic": 15 } }) detection = Analysis(config.content, Mock(), Mock(), input_data_structure) lambda_analysis = detection.get_analysis_lambda() self.assertIsInstance( lambda_analysis, types.LambdaType, "Failed. get_analysis_lambda should return a lambda object") lambda_analysis((3, 4, 5, 4)) self.assertTrue( mock_analysis_record.called, "Failed. The analysis_record didn't call in lambda that returned by get_analysis_lambda." )
def autocorrelation(self): position_to_analysis = int(self.comboBox.currentText()) model_for_analysis = POSITION_FOR_ANALYSIS.get(position_to_analysis) analysis = Analysis(model_for_analysis) model = analysis.calculation_autocorrelation() place_to_show = int(self.comboBox_2.currentText()) POSITION_FOR_ANALYSIS[place_to_show] = model self.main_window.show_graph(model, place_to_show, normalisation=False) self.close_window()
def set_option(self): "Set options" Analysis.set_option(self) if not self.options.variable: error("Please provide me with the variable and type of the measurement!") sys.exit(1) if self.options.variable != "rrate" and self.options.variable != "rdelay" and self.options.variable != "qlimit" and self.options.variable != "bnbw" and self.options.variable != "delay" and self.options.variable != "ackreor" and self.options.variable != "ackloss": error("I did not recognize the variable you gave me!") sys.exit(1) if self.options.rotype != "reordering" and self.options.rotype != "congestion" and self.options.rotype != "both": error("I did not recognize the type you gave me!") sys.exit(1)
def click_button_bpf(self, window): if self.check_empty_combobox_graph(): return analysis_model = self.get_model() analysis = Analysis(analysis_model) model = analysis.spectrum() place_of_graph = self.c2.get() self.set_graph(model, place_of_graph) self.draw_graph(model) window.destroy()
def __init__(self): Analysis.__init__(self) self.parser.set_usage("Usage: %prog [options]\n"\ "Creates graphs showing thruput, rtt, rtt_min and rtt_max over the "\ "variable given by the option -V.\n"\ "For this all flowgrind logs out of input folder are used "\ "which have the type given by the parameter -T.") self.parser.add_option('-V', '--variable', metavar = "Variable", action = 'store', type = 'string', dest = 'variable', help = 'The variable of the measurement [bnbw|delay].') self.parser.add_option('-E', '--plot-error', metavar = "PlotError", action = 'store_true', dest = 'plot_error', help = "Plot error bars [default: %default]") self.parser.add_option('-s', '--per-subflow', metavar = "Subflows", action = 'store_true', dest = 'per_subflow', help = 'For senarios with mulitple subflows each subflow is '\ 'ploted seperate instead of an aggregation of all those subflows [default: %default]'), self.parser.add_option('-f', '--filter', action = 'append', type = 'int', dest = 'scenarios', help = 'filter the scenarios to be used for a graph', default=[]), self.parser.add_option('-d', '--dry-run', action = "store_true", dest = "dry_run", help = "Test the flowlogs only") self.plotlabels = dict() self.plotlabels["bnbw"] = r"bottleneck bandwidth [$\\si[per=frac,fraction=nice]{\\Mbps}$]"; self.plotlabels["qlimit"] = r"bottleneck queue length [packets]"; self.plotlabels["rrate"] = r"reordering rate [$\\si{\\percent}$]"; self.plotlabels["rdelay"] = r"reordering delay [$\\si{\\milli\\second}$]"; self.plotlabels["rtos"] = r"RTO retransmissions [$\\#$]"; self.plotlabels["frs"] = r"fast retransmissions [$\\#$]"; self.plotlabels["thruput"] = r"throughput [$\\si[per=frac,fraction=nice]{\\Mbps}$]"; #self.plotlabels["bnbw"] = r"Bottleneck Bandwidth [$\\si{\\Mbps}$]"; #self.plotlabels["qlimit"] = r"Bottleneck Queue Length [packets]"; #self.plotlabels["rrate"] = r"Reordering Rate [$\\si{\\percent}$]"; #self.plotlabels["rdelay"] = r"Reordering Delay [$\\si{\\milli\\second}$]"; #self.plotlabels["rtos"] = r"RTO Retransmissions [$\\#$]"; #self.plotlabels["frs"] = r"Fast Retransmissions [$\\#$]"; #self.plotlabels["thruput"] = r"Throughput [$\\si{\\Mbps}$]"; self.plotlabels["rtt_max"] = r"Maximal Round Trip Time on Application Layer"; self.plotlabels["rtt_min"] = r"Minimal Rount Trip Time on Application Layer"; self.plotlabels["rtt_avg"] = r"Avarage Round Trip Time on Application Layer"; #self.plotlabels["fairness"]= r"Fairness" self.plotlabels["delay"] = r"RTT [$\\si{\\milli\\second}$]"
def task_spectrum(self, window): analysis_model = self.get_model() analysis = Analysis(analysis_model) model = analysis.calculation_fourier_transform() place_of_graph = self.combobox_place_graph.get() self.set_graph(model, place_of_graph) model.normalization() # self.graph_array.append(model) self.draw_graph(model) window.destroy()
def click_button_nested_correlation(self, window): if self.check_empty_combobox_graph(): return analysis_model = self.get_model() analysis = Analysis(analysis_model) model = analysis.calculation_nested_correlation() place_of_graph = self.c2.get() self.set_graph(model, place_of_graph) # self.graph_array.append(model) self.draw_graph(model) window.destroy()
def get_analysis(self, analyzed_model): for i in self.analysis_model_list: if i.model == analyzed_model: return i analysis_model = Analysis(analyzed_model) self.analysis_model_list.append(analysis_model) return analysis_model
def click_button_shift(self, window): if self.check_empty_combobox_graph(): return analysis_model = self.get_model() analysis = Analysis(analysis_model) model = analysis.calculation_fourier_transform() place_of_graph = self.c2.get() self.set_graph(model, place_of_graph) model.normalization() # self.graph_array.append(model) self.draw_graph(model) window.destroy()
def __init__(self): Analysis.__init__(self) self.parser.set_usage("Usage: %prog [options]\n"\ "Creates graphs showing thruput, frs and rtos over the "\ "variable given by the option -V.\n"\ "For this all flowgrind logs out of input folder are used "\ "which have the type given by the parameter -T.") self.parser.add_option('-V', '--variable', metavar="Variable", action = 'store', type = 'string', dest = 'variable', help = 'The variable of the measurement [bnbw|qlimit|rrate|rdelay].') self.parser.add_option('-T', '--type', metavar="Type", action = 'store', type = 'string', dest = 'rotype', help = 'The type of the measurement [reordering|congestion|both].') self.parser.add_option('-E', '--plot-error', metavar="PlotError", action = 'store_true', dest = 'plot_error', help = "Plot error bars") self.parser.add_option('-d', '--dry-run', action = "store_true", dest = "dry_run", help = "Test the flowlogs only") self.parser.add_option('-F', '--fairness', action = "store_true", dest = "fairness", help = "Plot fairness instead") self.plotlabels = dict() self.plotlabels["bnbw"] = r"Bottleneck Bandwidth [$\\si{\\Mbps}$]"; self.plotlabels["qlimit"] = r"Bottleneck Queue Length [packets]"; self.plotlabels["rrate"] = r"Reordering Rate [$\\si{\\percent}$]"; self.plotlabels["rdelay"] = r"Reordering Delay [$\\si{\\milli\\second}$]"; self.plotlabels["rtos"] = r"RTO Retransmissions [$\\#$]"; self.plotlabels["frs"] = r"Fast Retransmissions [$\\#$]"; self.plotlabels["thruput"] = r"Throughput [$\\si{\\Mbps}$]"; self.plotlabels["fairness"]= r"Fairness" self.plotlabels["delay"] = r"Round-Trip Time [$\\si{\\milli\\second}$]" self.plotlabels["ackreor"] = r"ACK Reordering Rate [$\\si{\\percent}$]" self.plotlabels["ackloss"] = r"ACK Loss Rate [$\\si{\\percent}$]" self.plotlabels["rtt_avg"] = r"Aplication Layer RTT [$\\si{\\second}$]" self.plotlabels["dsacks"] = r"Spurious Retransmissions [$\\#$]"
def __init__(self): Analysis.__init__(self) # create top-level parser description = textwrap.dedent("""\ Creates graphs showing thruput, fast retransmits and RTOs over the variable given. For this all flowgrind logs out of input folder are used which have the type given by the parameter -t.""") Analysis.__init__(self, description=description) self.parser.add_argument("variable", action="store", choices=["bnbw", "delay", "qlimit", "rrate", "rdelay", "ackreor", "ackloss"], help="The variable of the measurement") self.parser.add_argument("-t", "--type", action="store", dest="rotype", choices=["reordering", "congestion", "both"], help="The type "\ "of the measurement") self.parser.add_argument("-e", "--plot-error", action="store_true", help = "Plot error bars") self.parser.add_argument("-d", "--dry-run", action="store_true", dest="dry_run", help = "Test the flowlogs only") self.parser.add_argument("-f", "--fairness", action = "store_true", help = "Plot fairness instead") # Labels for plots - we use nice LaTeX code self.plotlabels = dict() self.plotlabels["bnbw"] = r"Bottleneck Bandwidth [$\\si{\\Mbps}$]"; self.plotlabels["qlimit"] = r"Bottleneck Queue Length [packets]"; self.plotlabels["rrate"] = r"Reordering Rate [$\\si{\\percent}$]"; self.plotlabels["rdelay"] = r"Reordering Delay [$\\si{\\milli\\second}$]"; self.plotlabels["rtos"] = r"RTO Retransmissions [$\\#$]"; self.plotlabels["frs"] = r"Fast Retransmissions [$\\#$]"; self.plotlabels["thruput"] = r"Throughput [$\\si{\\Mbps}$]"; self.plotlabels["fairness"]= r"Fairness" self.plotlabels["delay"] = r"Round-Trip Time [$\\si{\\milli\\second}$]" self.plotlabels["ackreor"] = r"ACK Reordering Rate [$\\si{\\percent}$]" self.plotlabels["ackloss"] = r"ACK Loss Rate [$\\si{\\percent}$]" self.plotlabels["rtt_avg"] = r"Aplication Layer RTT [$\\si{\\second}$]" self.plotlabels["dsacks"] = r"Spurious Retransmissions [$\\#$]"
def test__validation(self): # set up input data structure obtained after transformation and aggregation input_data_structure = { 'rule': [{ 'key': True, 'func_name': '', 'input_field': 'src_ip' }, { 'key': True, 'func_name': '', 'input_field': 'src_mac' }, { 'key': False, 'func_name': 'Max', 'input_field': 'ip_size_sum' }, { 'key': False, 'func_name': 'Sum', 'input_field': 'traffic' }], 'operation_type': 'reduceByKey' } # set up structure of config config = TestConfig({ "historical": { "method": "influx", "influx_options": { "measurement": "mock" } }, "alert": { "method": "stdout", "option": {} }, "time_delta": 20, "accuracy": 3, "rule": { "ip_size": 5, "ip_size_sum": 10, "traffic": 15 } }) with self.assertRaises(UnsupportedAnalysisFormat) as context: Analysis(config.content, Mock(), Mock(), input_data_structure) self.assertTrue( "An error in the analysis rule" in context.exception.args[0], "Catch exception, but it differs from test exception")
def version_3(self,list_with_indicators): calculate_ratio = [] for key,value in list_with_indicators.items(): working_stock = value['stock_change'] working_price = value['price_change'] working_volume = value['volume_change'] # I am trying to find items with the biggest price drop and highest stock gain # Filters the item pre-maths to limit absurdities and favor certain items value_filters = analysis.filter_condition(value) if value_filters: # print(value['price'][0]) try: appendable_value = (value['name'],working_volume/(2*working_price*working_stock)) calculate_ratio.append(appendable_value) except ZeroDivisionError as err: print(err,appendable_value) # used by working_list to reference names for later reference_dict = {} working_list = [] for item in calculate_ratio: reference_dict[item[1]] = item working_list.append(item[1]) loop_filter = 0 working_list = [a for a in working_list if a < loop_filter] working_list.sort() # All change is in %, between 0-100 # Formula: ratio(price_change,stock_change)*volume_change*price(this is to favor more expensive items) # the final print() prints out the top 10 best purchases top_10 = [reference_dict[a] for a in working_list[:10]] print(top_10) return top_10
def init_dependencies(): config_obj = Config() util_obj = Util() draw_obj = Draw(util_obj) connections_obj = Connections(util_obj) generator_obj = Generator(util_obj) data_obj = Data(generator_obj, util_obj) features_obj = Features(config_obj, util_obj) classify_obj = Classification(config_obj, features_obj, util_obj) independent_obj = Independent(config_obj, classify_obj, util_obj) comments_obj = Comments(config_obj, util_obj) pred_builder_obj = PredicateBuilder(config_obj, comments_obj, generator_obj, util_obj) psl_obj = PSL(config_obj, connections_obj, draw_obj, pred_builder_obj, util_obj) tuffy_obj = Tuffy(config_obj, pred_builder_obj, util_obj) mrf_obj = MRF(config_obj, connections_obj, draw_obj, generator_obj, util_obj) relational_obj = Relational(connections_obj, config_obj, psl_obj, tuffy_obj, mrf_obj, util_obj) label_obj = Label(config_obj, generator_obj, util_obj) purity_obj = Purity(config_obj, generator_obj, util_obj) evaluate_obj = Evaluation(config_obj, generator_obj, connections_obj, util_obj) interpret_obj = Interpretability(config_obj, connections_obj, generator_obj, pred_builder_obj, util_obj) analysis_obj = Analysis(config_obj, label_obj, purity_obj, evaluate_obj, interpret_obj, util_obj) app_obj = App(config_obj, data_obj, independent_obj, relational_obj, analysis_obj, util_obj) return config_obj, app_obj, util_obj
def click_button_anti_trend_anti_spike(self, window): if self.check_empty_combobox_graph(): return analysis_model = self.get_model() analysis = Analysis(analysis_model) model = analysis.calculation_anti_trend() analysis_2 = Analysis(model) model.s_without_spikes_min = -300 model.s_without_spikes_max = 500 model_2 = analysis_2.calculation_anti_spike_exam() place_of_graph = self.c2.get() self.set_graph(model_2, place_of_graph) self.append_graph_to_list_and_combobox(model_2) # model.normalization() # self.graph_array.append(model) self.draw_graph(model_2) window.destroy()
def set_option(self): """Set options""" Analysis.set_option(self)
def run_analysis(): with app.app_context(): Analysis.tick(db) db.session.commit()
def __init__(self): Analysis.__init__(self)
def run_analysis(self): self.analysis = Analysis(data=self.data)
from backtesting import Backtest, Strategy from strategy.ktn_channel import KTNChannel from strategy.buy_and_hold import BuyAndHold from service.calendar import Calendar from service.factor import Factor from modules.portfolio import Portfolio from analysis.analysis import Analysis server_ip = "http://140.115.87.197:8090/" factor_list = [ 'GVI', 'EPS', 'MOM', 'PE', 'EV_EBITDA', 'EV_S', 'FC_P', 'CROIC', 'FC_OI', 'FC_LTD' ] # 0: equal_weight; 1: equal_risk(ATR); 2: equal_risk(SD) weight_setting = [0, 1, 2] n_season = [0, 1] group = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] position = [5, 10, 15, 30, 90, 150] start_equity = 10000000 start_date = '2010-01-01' end_date = '2017-12-31' analysis = Analysis(start_equity, start_date, end_date) analysis.analysis_factor_performance(factor_list[1]) # analysis.anslysis_portfolio() # analysis.rank_portfolio_return() # analysis.plot_net_profit_years()
def test_get_analysis_lambda_for_reducebykey(self, mock_analysis_record, mock_rdd): def mock_foreachPartition(test_lambda): test_data = [(1, 2, 3)] return list(test_lambda(test_data)) mock_rdd.foreachPartition.side_effect = mock_foreachPartition # set up input data structure obtained after transformation and aggregation input_data_structure = { 'rule': [{ 'key': True, 'func_name': '', 'input_field': 'src_mac' }, { 'key': False, 'func_name': 'Max', 'input_field': 'traffic' }, { 'key': False, 'func_name': 'Max', 'input_field': 'ip_size' }, { 'key': False, 'func_name': 'Sum', 'input_field': 'ip_size_sum' }], 'operation_type': 'reduceByKey' } # set up structure of config config = TestConfig({ "historical": { "method": "influx", "influx_options": { "measurement": "mock" } }, "alert": { "method": "stdout", "option": {} }, "time_delta": 20, "accuracy": 3, "rule": { "ip_size": 5, "ip_size_sum": 10, "traffic": 15 } }) mock_historical_data_repository = MagicMock() mock_alert_sender = MagicMock() detection = Analysis(config.content, mock_historical_data_repository, mock_alert_sender, input_data_structure) lambda_analysis = detection.get_analysis_lambda() self.assertIsInstance( lambda_analysis, types.LambdaType, "Failed. get_analysis_lambda should return a lambda object") lambda_analysis(mock_rdd) self.assertTrue( mock_rdd.foreachPartition.called, "Failed. The foreachPartition didn't call in lambda that returned by get_analysis_lambda." ) self.assertTrue( mock_analysis_record.called, "Failed. The mock_analysis_record didn't call in lambda that returned by get_analysis_lambda." ) self.assertEqual( mock_analysis_record.call_args[0], ((2, 3), { 'ip_size': 1, 'traffic': 0, 'ip_size_sum': 2 }, 20, 3, config.content["rule"], mock_alert_sender, mock_historical_data_repository, "mock", ['src_mac'], 1), "Failed. The function analysis_record is called with invalid arguments" )
def apply_options(self): "Set options" Analysis.apply_options(self)
# -*- coding: utf-8 -*- """ Created on Thu May 7 14:18:50 2020 @author: Mark """ from analysis.analysis import Analysis #%% A = Analysis() A.fig1(figsize=(6, 3), dpi=100) A.fig2(scatterer='He', dpi=100, n_iter=10, inelastic_prob=1) A.fig3() A.fig4(dpi=100, multiplot=0, scatterer='He', inelastic_xsect=0.0038) filename = r'C:\Users\Mark\ownCloud\Muelheim Group\Projects\Gas phase background\python code\gasscattering\data\He\Au4f in vacuum - EX320.txt' A.fig5(dpi=100, multiplot=0, scatterer='He', filename=filename) A.exportExcel("test") filename = r'C:\Users\Mark\ownCloud\Muelheim Group\Projects\Gas phase background\python code\gasscattering\data\He\ARM22\Ag3d vacuum EX340 ARM22.TXT' A.fig6(dpi=100, multiplot=0, P=4, inelastic_xsect=0.005, scatterer='He', filename=filename) A.fig2(dpi=100, n_events=4, scatterer='N2', filename=filename)
def test__parse_config(self): # set up input data structure obtained after transformation and aggregation input_data_structure = { 'rule': [{ 'key': True, 'func_name': '', 'input_field': 'src_ip' }, { 'key': True, 'func_name': '', 'input_field': 'src_mac' }, { 'key': False, 'func_name': 'Max', 'input_field': 'ip_size' }, { 'key': False, 'func_name': 'Max', 'input_field': 'ip_size_sum' }, { 'key': False, 'func_name': 'Sum', 'input_field': 'traffic' }], 'operation_type': 'reduceByKey' } # set up structure of config config = TestConfig({ "historical": { "method": "influx", "influx_options": { "measurement": "mock" } }, "alert": { "method": "stdout", "option": {} }, "time_delta": 20, "accuracy": 3, "rule": { "ip_size": 5, "ip_size_sum": 10, "traffic": 15 } }) detection = Analysis(config.content, Mock(), Mock(), input_data_structure) self.assertEqual( detection._time_delta, config.content["time_delta"], "Error: Field _time_delta of the object class Analysis not equal time_delta from config " ) self.assertEqual( detection._accuracy, config.content["accuracy"], "Error: Field _accuracy of the object class Analysis not equal time_delta from config " ) self.assertEqual( detection._rule, config.content["rule"], "Error: Field _rule of the object class Analysis not equal time_delta from config " )
from analysis.analysis import Analysis from utils.general import General general = General() analysis = Analysis() factor_list = [ # ['FCF_P'], ['EV_EBITDA'], # ['P_B'], # ['P_S'], # ['MOM'], # ['EPS'], # ['ROIC'], # ['FCF_OI'], # ['EV_EBITDA', 'ROIC'], # ['P_B', 'EPS'], # ['FCF_P', 'MOM'], # ['FCF_OI', 'P_S'], ] # 權益曲線折線圖 def plot_equity_curve(): request = { 'factor_list': general.factor_list_to_string(factor_list), 'strategy_list': [1], 'window_list': [0], 'method_list': [5015, 5010, 5005, 5020, 5025], # 'group_list': [1, 2, 3, 4, 5], 'group_list': [1],
import pandas as pd import numpy as np from ini.ini import * from enviorment.enviorment import Environment from grach.grach import Grach from data_serise.data_serise import Daily_Data_Serise from analysis.analysis import Analysis # from lstm.lstm import Lstm if __name__ == '__main__': env = Environment() dds = Daily_Data_Serise() dds.read_local_file('index.h5') env.add_data(dds, Index_Data) # grach = Grach(env) # grach.run_grach(is_local=True) # lstm = Lstm(env) # lstm.build_net() # lstm.data_process() # lstm.run() analysis = Analysis(env) analysis.analysis() pass
def __init__(self): Analysis.__init__(self) self.parser.set_defaults(indir = "./", outdir = "./rtt"); self.nodepairs = dict()
def __init__(self): Analysis.__init__(self) self.parser.set_defaults(indir="./", outdir="./rtt") self.nodepairs = dict()