def test_config(self): self.assertEqual( '5', scenario.Scenario(u'simple2/5--2016-09-23--100').config) self.assertEqual('', scenario.Scenario('0.21/2016-06-30').config) self.assertEqual( '', scenario.Scenario('0.15.3/nocache--2016-06-17--10@30').config) self.assertEqual( '10aI', scenario.Scenario('0.22/10aI--2016-11-04--100@50').config)
def validate_scenario_list(scenario_params_list, experiment_path): """Instantiate every scenario without running it to check if every scenario is correctly specified. This prevents scenario initialization errors during the experiment""" logger.debug("Starting to validate scenarios") for scenario_id, scenario_params in enumerate(scenario_params_list): logger.debug( f"Validation scenario {scenario_id + 1}/{len(scenario_params_list)}" ) # TODO: we should not create scenario folder at this point current_scenario = scenario.Scenario(scenario_params, experiment_path, is_dry_run=True) current_scenario.instantiate_scenario_partners() if current_scenario.samples_split_type == 'basic': current_scenario.split_data(is_logging_enabled=False) elif current_scenario.samples_split_type == 'advanced': current_scenario.split_data_advanced(is_logging_enabled=False) elif current_scenario.samples_split_type == 'fully_specified': current_scenario.split_data_fully_specified( is_logging_enabled=False) logger.debug("All scenario have been validated")
def __init__(self, wzrd, db): ''' (Wizard, Database) -> None Initial class creation: - Creates supporting Scenario class with initial session parameters - Gets parameters and players from wizard. - Makes self as the players session. ''' self.id = wzrd.id self.db = db self.players = wzrd.players self.rules = rules.Rules(self) self.map = [] self.scenario = scenario.Scenario(wzrd.conditions['scenario'], self, self.db) self.phase = 'allocation' # Allocation/Day/Evening/Night self.log = {} # Game events that will be show to player self.round = 1 self.points = {} self.first_turn = self.players[0] self.player_turn = self.players[0] self.turn_start_time = time.time() for player in self.players: player.set_session(self) self.scenario.initial_setup() return None
def run_steps(steps: Iterable[scenario.Step]): sc = scenario.Scenario() sc.steps = steps module_file = 'hohohho.py' build(sc, module_file, debug=True) try: module_name = module_file.replace('.py', '') if module_name in sys.modules: module = importlib.reload(sys.modules[module_name]) else: module = importlib.import_module(module_name) module.main() except (ElementAmbiguousError, ElementNotFoundError) as exc: exc_type = 'ElementAmbiguousError' if isinstance( exc, ElementAmbiguousError) else 'ElementNotFoundError' for step in steps: for action in step.actions: if isinstance(action, scenario.ItemAction): for record in action.item_path.path: if record.id == exc.path_id: return exc_type, (step, action, record) assert False except: return 'OtherError', traceback.format_exc() return None, None
def run_exp(scenario, remove_small, remove_timeout, or_level, auc_bound, background_size, binarize, exclude_sites, current_sites, _rnd): config.OR_LEVEL = config.OR_LEVEL if or_level is None else or_level config.REMOVE_SMALL = (config.REMOVE_SMALL if remove_small is None else remove_small) config.REMOVE_TIMEOUT = (config.REMOVE_TIMEOUT if remove_timeout is None else remove_timeout) scenario_obj = scenario_module.Scenario(scenario, exclude_sites=exclude_sites) (tpr, fpr, auroc, C, gamma, acc, y, yp, yd) = analyse.simulated_open_world( scenario_obj, auc_bound=auc_bound, binary=binarize, bg_size=background_size, current_sites=current_sites) _add_as_artifact(y.tolist(), "y_true") _add_as_artifact(yp.tolist(), "y_prediction") _add_as_artifact(yd, "y_domains") return { 'C': C, 'gamma': gamma, 'sites': scenario_obj.get_traces().keys(), 'score': acc, 'type': "accuracy", # todo: remove duplicates (or_level...), after checking that they match 'outlier_removal': scenario_obj.trace_args, 'size_increase': scenario_obj.size_increase(), 'time_increase': scenario_obj.time_increase(), 'fpr': fpr, 'tpr': tpr, 'auroc': auroc }
def test_clear_list(self): a = scenario.Scenario() a.clear_list() self.assertEqual(len(a.data_list()), 0) self.insertItems(a, 1) self.assertEqual(len(a.data_list()), 1) a.clear_list() self.assertEqual(len(a.data_list()), 0)
def add_scenario(self, name=None): self.cpt_scenario += 1 if not name: name = ' '.join(('scenario', str(self.cpt_scenario))) new_scenario = scenario.Scenario(self, name) self.scenario_notebook.add(new_scenario, text=name, compound=tk.TOP) self.dict_scenario[name] = new_scenario return new_scenario
def test_binarized_fake(self): c_list = [counter._test(x) for x in [1, 2, 2, 2, 2, 3, 4]] bg_mock = {'background': c_list[:], 'a': c_list[:], 'b': c_list[:]} s = scenario.Scenario('asdf/2015-12-12--3@7') s.traces = bg_mock res = s.binarized().get_traces() self.assertEquals(res['background'], c_list) self.assertEquals(len(res['foreground']), 2 * len(c_list))
def test_open_world_config(self): s = scenario.Scenario("disabled/2016-05-12--10@40") s._open_world_config = { 'binary': False, 'exclude_sites': [], 'background_size': None } self.assertTrue("background" in s.get_traces().keys())
def test__binarized_fake_vs_fit(self): c_list = [counter._test(x) for x in [1, 2, 2, 2, 2, 3, 4]] bg_mock = {'background': c_list[:], 'a': c_list[:], 'b': c_list[:]} s = scenario.Scenario('asdf/2015-12-12--3@7') s.traces = bg_mock Xa, ya, _ = s.binarized().get_features_cumul(current_sites=False) Xc, yc, _ = counter.to_features_cumul(bg_mock) yc = list(mymetrics.binarized(yc, transform_to=1)) self.assertTrue(np.array_equal(ya, yc), "ya:{}\nyc:{}".format(ya, yc)) self.assertTrue(np.array_equal(Xa, Xc))
def test_median(self): a = scenario.Scenario() self.setSize(a, 3) self.assertEqual(2, a.median()) self.setSize(a, 4) self.assertEqual(2.5, a.median()) self.setSize(a, 5) self.assertEqual(3, a.median()) self.setSize(a, 6) self.assertEqual(3.5, a.median())
def total_packets_in_helper(names, trace_dicts=None, sitenum=4, save=True): '''plot tpi plots in subplots example input: names = ['disabled/bridge--2016-07-06', 'wtf-pad/bridge--2016-07-05'] names = {x.path: x.get_traces() for x in scenario_list} ''' if not trace_dicts: trace_dicts = [scenario.Scenario(name).get_traces() for name in names] fig, axes = plt.subplots(len(names), 1, sharex=True, sharey=False) plt.suptitle("Number of incoming packets per trace") mm = counter.MinMaxer() keys = set(trace_dicts[0].keys()) if 'sina.com.cn' in keys: keys.remove('sina.com.cn') for other_dict in trace_dicts[1:]: keys = keys.intersection(other_dict.keys()) keys = sorted(keys, key=lambda x: sites.cache.index(x))[:sitenum] def color(x): return _color(x, keys) for (name, counter_dict, ax) in zip(names, trace_dicts, axes): total_packets_in(counter_dict, keys, ax, color=color) subset = [counter_dict[x] for x in keys] mm.set_if(min(min([scenario.tpi(v) for v in subset])), max(max([scenario.tpi(v) for v in subset]))) ax.set_title('{}'.format(scenario.Scenario(name).describe())) for ax in axes: ax.set_xlim(mm.min * 0.8, mm.max * 1.2) fig.text(0, 0.5, "relative histograms with kernel-density-estimation", va="center", rotation="vertical") fig.tight_layout(rect=[0, 0.03, 1, 0.95]) if save: plt.savefig( str("/tmp/total_packets_in_" + '_'.join(names).replace('/', '___') + '__' + '_'.join(keys) + "__palette_colorblind")[:250] + ".pdf") return trace_dicts
def init_widgets(self, mainframe): """ Set mainframe and initialize widgets to various places. """ self._mainframe = mainframe #self._neteditor = mainframe.add_view("Network", Neteditor) # mainframe.browse_obj(self._module) self.make_menu() self.make_toolbar() args = mainframe.get_args() if len(args) == 3: # command line provided rootname and dirpath rootname = args[1] dirpath = args[2] name_scenario = rootname self._scenario = scenario.Scenario( rootname, workdirpath=dirpath, name_scenario=name_scenario, logger=self._mainframe.get_logger()) self._scenario.import_xml() elif len(args) == 2: filepath = args[1] self._scenario = scenario.load_scenario( filepath, logger=self._mainframe.get_logger()) #self._scenario = cm.load_obj(filepath) else: # command line provided nothing rootname = 'myscenario' # None# this means no directory will be created # os.path.join(os.path.expanduser("~"),'sumopy','myscenario') dirpath = scenario.DIRPATH_SCENARIO name_scenario = 'My Scenario' self._scenario = scenario.Scenario( rootname, workdirpath=dirpath, name_scenario=name_scenario, logger=self._mainframe.get_logger())
def test___equal__(self): self.assertEqual(scenario.Scenario("wtf-pad/bridge--2017-09-06"), scenario.Scenario("wtf-pad/bridge--2017-09-06")) self.assertNotEqual(scenario.Scenario("0.20/0-ai--2016-06-25"), scenario.Scenario("0.20/20-ai--2016-06-25")) self.assertNotEqual(scenario.Scenario("0.20/0-ai--2016-06-25"), scenario.Scenario("0.20/0-aii--2016-06-25"))
def loadScenario(scenarioID, level=1): scenName = '' if isinstance(scenarioID, str): scenName = scenarioID scenarioID = _scenarios[scenarioID]["Id"] elif isinstance(scenarioID, int): for s in _scenarios: if _scenarios[s]["Id"] == scenarioID: scenName = scen break scen = scenario.Scenario(scenarioID, name=scenName, level=level) print("<Loaded Scenario>\n%s" % scen) return scen
def main(argv, with_svm=True, cumul=True): '''loads stuff, triggers either open or closed-world eval''' if len(argv) == 1: argv.append('.') # by hand: scenarios = counter.for_scenarios(sys.argv[1:]) scenarios = [scenario.Scenario(x, smart=True) for x in argv[1:]] if 'background' in scenarios[0].get_traces(): if len(scenarios) > 1: logging.warn('only first scenario chosen for open world analysis') return simulated_open_world(scenarios[0]) else: closed_world({x.path: x.get_traces() for x in scenarios}, scenarios[0].path, with_svm=with_svm, cumul=cumul)
sys.path.append(modelRepo) filepath = os.getcwd() sourcepath = filepath + os.path.sep + "Sources" if os.path.isfile('tmp'): os.remove('tmp') if os.path.isfile(sourcepath + os.path.sep + 'tmp'): os.remove(sourcepath + os.path.sep +'tmp') runnerlib = imp.load_source(runnerName, filepath + os.path.sep + args[0]) configrunnerlib = imp.load_source(configrunnerName, filepath + os.path.sep + args[1]) configmodellib = imp.load_source(configmodelName,filepath + os.path.sep + args[3]) modellib = imp.load_source(modelName, filepath + os.path.sep + args[2]) scenario = scenario.Scenario(args[4]) model = modellib.model(modelName, configmodellib) runner = runnerlib.Runner(model, configrunnerlib) bos = time.time() # beginning of simulation try: st = runner.run(scenario) # return simulation time except: # save data import sys, traceback traceback.print_exc(file=sys.stdout) print('error during the run -> save data') st = time.time() - bos
def test_size_increase__empty(self): trace = counter._test(0) s = scenario.Scenario('wtf-pad/bridge--2016-07-05') s.traces = {'msn.com': [trace], 'google.com': [trace]} self.assertEqual(-100, s.size_increase())
def on_new_scenario(self): self.sc = scenario.Scenario() self.new_scenario_set()
def __init__(self): self.sc = scenario.Scenario() self.rec = recorder.Recorder() self.is_recording = False self.current_step = None self.step_counter = 1 self.action_list_current_insert_index = None self.active_on_stop_group = [] self.main_wnd = tkinter.Tk() self.main_wnd.title('Pywinauto test generator') box = tkinter.Frame(self.main_wnd) box.pack(fill=tkinter.BOTH, expand=1) frame = tkinter.Frame(box) frame.pack(fill=tkinter.X, expand=1) scrollbar = tkinter.Scrollbar(frame) scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) self.action_list = tkinter.Listbox(frame, selectmode=tkinter.SINGLE, yscrollcommand=scrollbar.set) self.action_list.pack(fill=tkinter.X, expand=1) scrollbar.config(command=self.action_list.yview) self.record_btn = tkinter.Button(box, text='Start', command=self.on_start_record) self.record_btn.pack() btn = tkinter.Button(box, text='New', command=self.on_new_scenario) btn.pack() self.active_on_stop_group.append(btn) btn = tkinter.Button(box, text='Add Step...', command=self.on_add_step) btn.pack() self.active_on_stop_group.append(btn) btn = tkinter.Button(box, text='Delete', command=self.on_del_action) btn.pack() self.active_on_stop_group.append(btn) btn = tkinter.Button(box, text='Run', command=self.on_run_step) btn.pack() self.active_on_stop_group.append(btn) btn = tkinter.Button(box, text='Load...', command=self.on_load_scenario) btn.pack() self.active_on_stop_group.append(btn) btn = tkinter.Button(box, text='Save...', command=self.on_save_scenario) btn.pack() self.active_on_stop_group.append(btn) btn = tkinter.Button(box, text='Tune...', command=self.on_tune_item_path) btn.pack() self.active_on_stop_group.append(btn) frame = tkinter.Frame(box) frame.pack(fill=tkinter.X, expand=1) scrollbar = tkinter.Scrollbar(frame) scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) self.status_area = tkinter.Text(frame, wrap=tkinter.WORD, yscrollcommand=scrollbar.set) self.status_area.pack(fill=tkinter.X, expand=1) scrollbar.config(command=self.status_area.yview)
def test__closest_bg(self): s = scenario.Scenario('disabled/background--2016-08-17--4100@1') self.assertEqual(s, s._closest('background', include_bg=True))
""" from __future__ import print_function import scenario import contributivity import data_splitting import fl_training import contributivity_measures from timeit import default_timer as timer import numpy as np #%% Create scenarii # Create a default scenario my_default_scenario = scenario.Scenario() # Create a custom scenario and comment the main scenario parameters (see scenario.py for more comments) my_custom_scenario = scenario.Scenario() my_custom_scenario.nodes_count = 3 # Number of nodes in the collaborative ML project simulated my_custom_scenario.amounts_per_node = [ 0.05, 0.15, 0.8 ] # Percentages of the data samples for each node my_custom_scenario.samples_split_option = 'Stratified' # If data are split randomly between nodes or stratified to be distinct (toggle between 'Random' and 'Stratified') my_custom_scenario.testset_option = 'Centralised' # If test data are distributed between nodes or stays a central testset (toggle between 'Centralised' and 'Distributed') my_custom_scenario.x_train = my_custom_scenario.x_train[: 600] # Truncate dataset if needed for quicker debugging/testing my_custom_scenario.y_train = my_custom_scenario.y_train[: 600] # Truncate dataset if needed for quicker debugging/testing my_custom_scenario.x_test = my_custom_scenario.x_test[: 100] # Truncate dataset if needed for quicker debugging/testing
warm_up = 100 high_var = True demands_high, distribution_high = generate_demands(periods + warm_up, True) demands_low, distribution_low = generate_demands(periods + warm_up, False) #initially generated demands with generate_demands() and saved them - now, they are loaded using pickle with open("demands_high.txt", "rb") as f: demands_high = pickle.load(f) with open("demands_low.txt", "rb") as f: demands_low = pickle.load(f) r1, r2, r3 = (15, 60), (25, 65), (25, 65) settings1 = {"high_c_shortage": True, "L0": 2} s1 = sc.Scenario("equal retailers test", periods, warm_up, r1, r2, r3, 15, 1, 1, repeat=1, high_var=True, run_me_as=0, demands=demands_high, distribution=distribution_high, settings=settings1) run_scenario(s1)
def main(): args = parse_command_line_arguments() stream, info_logger_id, info_debug_id = init_logger(args) with contextlib.redirect_stdout(stream): logger.debug("Standard output is sent to added handlers.") config = get_config_from_file(args) scenario_params_list = utils.get_scenario_params_list( config["scenario_params_list"]) experiment_path = config["experiment_path"] n_repeats = config["n_repeats"] validate_scenario_list(scenario_params_list, experiment_path) for scenario_id, scenario_params in enumerate(scenario_params_list): logger.info(f"Scenario {scenario_id+1}/{len(scenario_params_list)}: {scenario_params}") # Move log files to experiment folder move_log_file_to_experiment_folder(info_logger_id, experiment_path, constants.INFO_LOGGING_FILE_NAME, "INFO") move_log_file_to_experiment_folder(info_debug_id, experiment_path, constants.DEBUG_LOGGING_FILE_NAME, "DEBUG") # GPU config init_gpu_config() # Close open figures plt.close("all") # Iterate over repeats of all scenarios experiments for i in range(n_repeats): logger.info(f"Repeat {i+1}/{n_repeats}") for scenario_id, scenario_params in enumerate(scenario_params_list): logger.info(f"Scenario {scenario_id + 1}/{len(scenario_params_list)}") logger.info("Current params:") logger.info(scenario_params) current_scenario = scenario.Scenario( scenario_params, experiment_path, scenario_id=scenario_id+1, n_repeat=i+1 ) run_scenario(current_scenario) # Write results to CSV file df_results = current_scenario.to_dataframe() df_results["random_state"] = i df_results["scenario_id"] = scenario_id with open(experiment_path / "results.csv", "a") as f: df_results.to_csv(f, header=f.tell() == 0, index=False) logger.info(f"Results saved to {os.path.relpath(experiment_path)}/results.csv") return 0
def __init__(self, path_app): tk.Tk.__init__(self) path_parent = abspath(join(path_app, pardir)) self.path_icon = join(path_parent, 'Icons') self.path_workspace = join(path_parent, 'Workspace') ## ----- Main app : ----- self.title('NetDim') netdim_icon = tk.PhotoImage( file=join(self.path_icon, 'netdim_icon.gif')) self.tk.call('wm', 'iconphoto', self._w, netdim_icon) ## Netdim objects self.nd_obj = { 'router': 'node', 'oxc': 'node', 'host': 'node', 'antenna': 'node', 'regenerator': 'node', 'splitter': 'node', 'cloud': 'node', 'switch': 'node', 'ethernet': 'trunk', 'wdm': 'trunk', 'route': 'route', 'traffic': 'traffic' } self.st_to_type = { 'router': 'node', 'oxc': 'node', 'host': 'node', 'antenna': 'node', 'regenerator': 'node', 'splitter': 'node', 'switch': 'node', 'cloud': 'node', 'ethernet': 'trunk', 'wdm': 'trunk', 'l2vc': 'l2vc', 'l3vc': 'l3vc', 'static route': 'route', 'BGP peering': 'route', 'OSPF virtual link': 'route', 'Label Switched Path': 'route', 'routed traffic': 'traffic', 'static traffic': 'traffic' } ## User-defined properties and labels per type of object # ordered dicts are needed to have the same menu order node_common_properties = ('name', 'x', 'y', 'longitude', 'latitude', 'ipaddress', 'subnetmask', 'LB_paths', 'AS') # we exclude the AS from node_common_properties. We don't need to # import/export the AS of a node, because when the AS itself is imported, # we rebuild #its logical topology, and that includes # rebuilding the nodes AS dict node_common_ie_properties = node_common_properties[:-1] trunk_common_properties = ( 'name', 'source', 'destination', 'interface', 'distance', 'costSD', 'costDS', 'capacitySD', 'capacityDS', # if there is no failure simulation, the traffic property tells us how # much traffic is transiting on the trunk in a 'no failure' situation # if there is a link in failure, the traffic that is redirected will # also contribute to this 'traffic parameter'. 'trafficSD', 'trafficDS', # unlike the traffic property above, wctraffic is the worst case # traffic. It is the traffic that we use for dimensioning purposes, and # it considers the maximum traffic that the link must be able to # handle, considering all possible failure cases. 'wctrafficSD', 'wctrafficDS', # the trunk which failure results in the worst case traffic 'wcfailure', 'flowSD', 'flowDS', 'ipaddressS', 'subnetmaskS', 'interfaceS', 'ipaddressD', 'subnetmaskD', 'interfaceD', 'sntw', 'AS') route_common_properties = ('name', 'subtype', 'source', 'destination') vc_common_properties = ('name', 'source', 'destination', 'linkS', 'linkD') traffic_common_properties = ('name', 'subtype', 'source', 'destination', 'distance', 'throughput') trunk_common_ie_properties = ('name', 'source', 'destination', 'interface', 'distance', 'costSD', 'costDS', 'capacitySD', 'capacityDS', 'ipaddressS', 'subnetmaskS', 'interfaceS', 'ipaddressD', 'subnetmaskD', 'interfaceD') route_common_ie_properties = ( 'name', 'source', 'destination', ) traffic_common_ie_properties = ('name', 'source', 'destination', 'throughput') self.object_properties = collections.OrderedDict([ ('router', node_common_properties + ('default_route', 'bgp_AS')), ('oxc', node_common_properties), ('host', node_common_properties), ('antenna', node_common_properties), ('regenerator', node_common_properties), ('splitter', node_common_properties), ('cloud', node_common_properties), ('switch', node_common_properties), ('ethernet', trunk_common_properties), ('wdm', trunk_common_properties + ('lambda_capacity', )), ('l2vc', vc_common_properties), ('l3vc', vc_common_properties), ('static route', route_common_properties + ('nh_ip', 'dst_sntw', 'ad')), ('BGP peering', route_common_properties + ('bgp_type', 'ipS', 'ipD', 'weightS', 'weightD')), ('OSPF virtual link', route_common_properties + ('nh_tk', 'dst_sntw')), ('Label Switched Path', route_common_properties + ('lsp_type', 'path')), ('routed traffic', traffic_common_properties), ('static traffic', traffic_common_properties), ]) self.object_label = collections.OrderedDict([ ('Node', ('None', 'Name', 'Position', 'Coordinates', 'IPAddress', 'LB_paths', 'Default_Route')), ('Trunk', ('None', 'Name', 'Type', 'Distance', 'Traffic', 'WCTraffic', 'Sntw')), ('Interface', ( 'None', 'Name', 'Cost', 'Capacity', 'Flow', 'IPaddress', 'Traffic', 'WCTraffic', )), ('L2vc', ( 'None', 'Name', )), ('L3vc', ( 'None', 'Name', )), ('Route', ('None', 'Name', 'Distance', 'Type', 'Path', 'Cost', 'Subnet', 'Traffic')), ('Traffic', ('None', 'Name', 'Distance', 'Throughput')) ]) # object import export (properties) self.object_ie = collections.OrderedDict([ ('router', node_common_ie_properties + ('default_route', 'bgp_AS')), ('oxc', node_common_ie_properties), ('host', node_common_ie_properties), ('antenna', node_common_ie_properties), ('regenerator', node_common_ie_properties), ('splitter', node_common_ie_properties), ('cloud', node_common_ie_properties), ('switch', node_common_ie_properties), ('ethernet', trunk_common_ie_properties), ('wdm', trunk_common_ie_properties + ('lambda_capacity', )), ('static route', route_common_ie_properties + ('nh_ip', 'dst_sntw', 'ad')), ('BGP peering', route_common_ie_properties + ('bgp_type', 'ipS', 'ipD', 'weightS', 'weightD')), ('OSPF virtual link', route_common_ie_properties + ('nh_tk', 'dst_ip')), ('Label Switched Path', route_common_ie_properties + ('lsp_type', 'path')), ('routed traffic', traffic_common_ie_properties), ('static traffic', traffic_common_ie_properties) ]) # ordered dicts are needed to have the same menu order # box properties defines which properties are to be displayed in the # upper left corner of the canvas when hoverin over an object node_box_properties = ('name', 'subtype', 'ipaddress', 'subnetmask', 'LB_paths') trunk_box_properties = ('name', 'subtype', 'interface', 'source', 'destination', 'sntw') vc_box_properties = ('name', 'type', 'source', 'destination', 'linkS', 'linkD') self.box_properties = collections.OrderedDict([ ('router', node_box_properties + ('default_route', 'bgp_AS')), ('oxc', node_box_properties), ('host', node_box_properties), ('antenna', node_box_properties), ('regenerator', node_box_properties), ('splitter', node_box_properties), ('cloud', node_box_properties), ('switch', node_box_properties), ('ethernet', trunk_box_properties), ('wdm', trunk_box_properties + ('lambda_capacity', )), ('l2vc', vc_box_properties), ('l3vc', vc_box_properties), ('static route', route_common_properties + ('nh_ip', 'dst_sntw', 'ad')), ('BGP peering', route_common_properties + ('bgp_type', 'ipS', 'ipD', 'weightS', 'weightD')), ('OSPF virtual link', route_common_properties + ('nh_tk', 'dst_sntw')), ('Label Switched Path', route_common_properties + ('lsp_type', )), ('routed traffic', traffic_common_properties), ('static traffic', traffic_common_properties) ]) # methods for string to object conversions convert_node = lambda n: self.cs.ntw.nf(name=n) convert_link = lambda l: self.cs.ntw.lf(name=l) convert_AS = lambda AS: self.cs.ntw.AS_factory(name=AS) self.convert_nodes_set = lambda ln: set(map(convert_node, eval(ln))) convert_nodes_list = lambda ln: list(map(convert_node, eval(ln))) self.convert_links_set = lambda ll: set(map(convert_link, eval(ll))) convert_links_list = lambda ll: list(map(convert_link, eval(ll))) # dict property to conversion methods: used at import # the code for AS export self.prop_to_type = { 'name': str, 'protocol': str, 'interface': str, 'ipaddress': str, 'subnetmask': str, 'LB_paths': int, 'default_route': str, 'x': float, 'y': float, 'longitude': float, 'latitude': float, 'distance': float, 'costSD': float, 'costDS': float, 'cost': float, 'capacitySD': int, 'capacityDS': int, 'traffic': float, 'trafficSD': float, 'trafficDS': float, 'wctrafficSD': float, 'wctrafficDS': float, 'wcfailure': str, 'flowSD': float, 'flowDS': float, 'ipaddressS': str, 'subnetmaskS': str, 'ipaddressD': str, 'subnetmaskD': str, 'interfaceS': str, 'interfaceD': str, 'sntw': str, 'throughput': float, 'lambda_capacity': int, 'source': convert_node, 'destination': convert_node, 'nh_tk': str, 'nh_ip': str, 'ipS': str, 'ipD': str, 'bgp_AS': str, 'weightS': int, 'weightD': int, 'dst_sntw': str, 'ad': int, 'subtype': str, 'bgp_type': str, 'lsp_type': str, 'path_constraints': convert_nodes_list, 'excluded_nodes': self.convert_nodes_set, 'excluded_trunks': self.convert_links_set, 'path': convert_links_list, 'subnets': str, 'AS': convert_AS } self.prop_to_nice_name = { 'name': 'Name', 'type': 'Type', 'protocol': 'Protocol', 'interface': 'Interface', 'ipaddress': 'IP address', 'subnetmask': 'Subnet mask', 'LB_paths': 'Maximum paths (LB)', 'default_route': 'Default Route', 'x': 'X coordinate', 'y': 'Y coordinate', 'longitude': 'Longitude', 'latitude': 'Latitude', 'distance': 'Distance', 'linkS': 'Source link', 'linkD': 'Destination link', 'costSD': 'Cost S -> D', 'costDS': 'Cost D -> S', 'cost': 'Cost', 'capacitySD': 'Capacity S -> D', 'capacityDS': 'Capacity D -> S', 'traffic': 'Traffic', 'trafficSD': 'Traffic S -> D', 'trafficDS': 'Traffic D -> S', 'wctrafficSD': 'Worst case traffic S -> D', 'wctrafficDS': 'Worst case traffic D -> S', 'wcfailure': 'Worst case failure', 'flowSD': 'Flow S -> D', 'flowDS': 'Flow D -> S', 'ipaddressS': 'IP address (source)', 'subnetmaskS': 'Subnet mask (source)', 'ipaddressD': 'IP address (destination)', 'subnetmaskD': 'Subnet mask (destination)', 'interfaceS': 'Interface (source)', 'interfaceD': 'Interface (destination)', 'weightS': 'Weight (source)', 'weightD': 'Weight (destination)', 'sntw': 'Subnetwork', 'throughput': 'Throughput', 'lambda_capacity': 'Lambda capacity', 'source': 'Source', 'destination': 'Destination', 'nh_tk': 'Next-hop trunk', 'nh_ip': 'Next-hop IP', 'ipS': 'Source IP', 'ipD': 'Destination IP', 'bgp_AS': 'BGP AS', 'dst_sntw': 'Destination subnetwork', 'ad': 'Administrative distance', 'subtype': 'Type', 'bgp_type': 'BGP Type', 'lsp_type': 'LSP Type', 'path_constraints': 'Path constraints', 'excluded_nodes': 'Excluded nodes', 'excluded_trunks': 'Excluded trunks', 'path': 'Path', 'subnets': 'Subnets', 'AS': 'Autonomous system' } self.name_to_prop = {v: k for k, v in self.prop_to_nice_name.items()} colors = ['default', 'red', 'purple'] ## ----- Menus : ----- menubar = tk.Menu(self) upper_menu = tk.Menu(menubar, tearoff=0) upper_menu.add_command(label='Add scenario', command=lambda: self.add_scenario()) upper_menu.add_command(label='Delete scenario', command=lambda: self.delete_scenario()) upper_menu.add_command(label='Duplicate scenario', command=lambda: self.duplicate_scenario()) upper_menu.add_separator() upper_menu.add_command(label='Import graph', command=lambda: self.import_graph()) upper_menu.add_command(label='Export graph', command=lambda: self.export_graph()) upper_menu.add_separator() upper_menu.add_command(label='Exit', command=self.destroy) menubar.add_cascade(label='Main', menu=upper_menu) menu_drawing = tk.Menu(menubar, tearoff=0) menu_drawing.add_command(label='Default drawing parameters', command=lambda: dow.DrawingOptions(self)) menubar.add_cascade(label='Network drawing', menu=menu_drawing) menu_routing = tk.Menu(menubar, tearoff=0) menu_routing.add_command( label='Advanced graph', command=lambda: self.advanced_graph.deiconify()) menu_routing.add_command( label='Advanced algorithms', command=lambda: self.advanced_graph_options.deiconify()) menu_routing.add_command(label='Network Tree View', command=lambda: NetworkTreeView(self)) menu_routing.add_command(label='Wavelength assignment', command=lambda: self.rwa_window.deiconify()) menubar.add_cascade(label='Network routing', menu=menu_routing) # choose which label to display per type of object menu_options = tk.Menu(menubar, tearoff=0) for obj_type, label_type in self.object_label.items(): menu_type = tk.Menu(menubar, tearoff=0) menu_options.add_cascade(label=obj_type + ' label', menu=menu_type) for lbl in label_type: cmd = lambda o=obj_type, l=lbl: self.cs.refresh_labels(o, l) menu_type.add_command(label=lbl, command=cmd) menu_options.add_command(label='Change display', command=lambda: self.cs.change_display()) # show / hide option per type of objects menu_display = tk.Menu(menubar, tearoff=0) for index, type in enumerate(self.object_properties): new_label = ' '.join(('Hide', type)) cmd = lambda t=type, i=index: self.cs.show_hide(menu_display, t, i) menu_display.add_command(label=new_label, command=cmd) menu_options.add_cascade(label='Show/hide object', menu=menu_display) menubar.add_cascade(label='Options', menu=menu_options) self.config(menu=menubar) # scenario notebook self.scenario_notebook = ttk.Notebook(self) self.scenario_notebook.bind('<ButtonRelease-1>', self.change_cs) self.dict_scenario = {} # cs for 'current scenario' (the first one, which we create) self.cs = scenario.Scenario(self, 'scenario 0') self.cpt_scenario = 0 self.scenario_notebook.add(self.cs, text=self.cs.name, compound=tk.TOP) self.dict_scenario['scenario 0'] = self.cs # object management windows self.dict_obj_mgmt_window = {} for obj in self.object_properties: self.dict_obj_mgmt_window[obj] = omw.ObjectManagementWindow( self, obj) # drawing algorithm and parameters: per project self.drawing_algorithm = 'Spring layout' self.drawing_params = { 'Spring layout': collections.OrderedDict([('Coulomb factor', 10000), ('Spring stiffness', 0.5), ('Speed factor', 0.35), ('Equilibrium length', 8.)]), 'F-R layout': collections.OrderedDict([('OPD', 0.), ('limit', True)]) } # advanced graph options self.advanced_graph_options = galg.GraphAlgorithmWindow(self) # routing and wavelength assignment window self.rwa_window = rwaw.RWAWindow(self) # graph generation window self.advanced_graph = adv_gr.AdvancedGraph(self) # create a menu self.main_menu = main_menu.MainMenu(self) self.main_menu.pack(fill=tk.BOTH, side=tk.LEFT) self.scenario_notebook.pack(side=tk.LEFT, fill=tk.BOTH, expand=1) # dict of nodes image for node creation self.dict_image = collections.defaultdict(dict) self.node_size_image = { 'router': (33, 25), 'switch': (54, 36), 'oxc': (35, 32), 'host': (35, 32), 'regenerator': (64, 50), 'splitter': (64, 50), 'antenna': (35, 35), 'cloud': (60, 35), } self.dict_size_image = { 'general': { 'netdim': (75, 75), 'motion': (75, 75), 'multi-layer': (75, 75) }, 'l_type': { 'ethernet': (85, 15), 'wdm': (85, 15), 'static route': (85, 15), 'BGP peering': (85, 15), 'OSPF virtual link': (85, 15), 'Label Switched Path': (85, 15), 'routed traffic': (85, 15), 'static traffic': (85, 15) }, 'ntw_topo': { 'ring': (38, 33), 'tree': (35, 21), 'star': (36, 35), 'full-mesh': (40, 36) } } for color in colors: for node_type in self.cs.ntw.node_subtype: img_path = join(self.path_icon, ''.join( (color, '_', node_type, '.gif'))) img_pil = ImageTk.Image.open(img_path).resize( self.node_size_image[node_type]) img = ImageTk.PhotoImage(img_pil) # set the default image for the button of the frame if color == 'default': self.main_menu.type_to_button[node_type].config(image=img, width=50, height=50) self.dict_image[color][node_type] = img for category_type, dict_size in self.dict_size_image.items(): for image_type, image_size in dict_size.items(): x, y = image_size img_path = join(self.path_icon, image_type + '.png') img_pil = ImageTk.Image.open(img_path).resize(image_size) img = ImageTk.PhotoImage(img_pil) self.dict_image[category_type][image_type] = img self.main_menu.type_to_button[image_type].config(image=img, width=x, height=y + 10) # image for a link failure img_pil = ImageTk.Image.open(join(self.path_icon, 'failure.png'))\ .resize((25,25)) self.img_failure = ImageTk.PhotoImage(img_pil)
def from_mongoentry(entry): git = _value_or_none(entry, 'experiment', 'repositories', 0, 'commit') c = (_value_or_none(entry, 'result', 'C') or _value_or_none(entry, 'result', 'clf', 'py/state', 'estimator', 'py/state', 'C')) gamma = (_value_or_none(entry, 'result', 'gamma') or _value_or_none(entry, 'result', 'clf', 'py/state', 'estimator', 'py/state', 'gamma')) try: size = len(entry['result']['sites']) except (KeyError, TypeError): size = _value_or_none(entry, 'config', 'size') try: type_ = _value_or_none(entry, 'result', 'type') except KeyError: if entry['status'] == 'COMPLETED': type_ = "cumul" else: raise try: open_world = entry['experiment']['name'] == 'wf_open_world' if open_world: # non-empty dict is True open_world = { 'fpr': _value_or_none(entry, 'result', 'fpr'), 'tpr': _value_or_none(entry, 'result', 'tpr'), 'auroc': _value_or_none(entry, 'result', 'auroc'), 'auc_bound': _value_or_none(entry, 'config', 'auc_bound'), 'background_size': _value_or_none( entry, 'config', 'background_size'), 'exclude_sites': _value_or_( entry, [], 'config', 'exclude_sites'), 'binary': _value_or_none(entry, 'config', 'binarize'), 'current_sites': _value_or_none( entry, 'config', 'current_sites') } except KeyError: open_world = False size_overhead = _value_or_none(entry, 'result', 'size_increase') time_overhead = _value_or_none(entry, 'result', 'time_increase') try: scenario_obj = scenario.Scenario( entry['config']['scenario'], open_world=open_world, exclude_sites=_value_or_(entry, [], 'config', 'exclude_sites')) except ValueError: if entry['status'] not in ["COMPLETED", "EXTERNAL"]: scenario_obj = "placeholder for scenario {}".format( entry['config']['scenario']) else: raise yt = (_value_or_none(entry, 'result', 'y_true', 'values') or _value_or_none(entry, 'result', 'y_true') or _load_artifact_or_none(entry, "y_true")) try: yp = [x['values'] for x in _value_or_none( entry, 'result', 'y_prediction')] except TypeError: yp = (_value_or_none(entry, 'result', 'y_prediction') or _load_artifact_or_none(entry, "y_prediction")) yd = (_value_or_none(entry, 'result', 'y_domains') or _load_artifact_or_none(entry, "y_domains")) return Result( scenario_obj, _value_or_none(entry, 'result', 'score'), git, _value_or_none(entry, 'stop_time'), type_, size, open_world, size_overhead=size_overhead, time_overhead=time_overhead, _id=entry['_id'], C=c, gamma=gamma, src=entry, ytrue=yt, ypred=yp, ydomains=yd)
random.append(demand) return random, dist def name_gen(settings, high_var, extra=""): return if __name__ == '__main__': scenarios = [] # block: (L0, Li) = (1,3) settings4 = {"L0": 1, "Li": 3, "high_c_shortage": True, "h0": 0.05} scenarios.append( sc.Scenario("over est,", high_var=True, settings=settings4)) scenarios.append( sc.Scenario("over est", high_var=False, settings=settings4)) settings5 = {"L0": 1, "Li": 3, "high_c_shortage": False, "h0": 0.05} scenarios.append( sc.Scenario("over est,", high_var=True, settings=settings5)) scenarios.append( sc.Scenario("over est", high_var=False, settings=settings5)) settings6 = {"L0": 1, "Li": 3, "high_c_shortage": True, "h0": 0.1} scenarios.append( sc.Scenario("over est,", high_var=True, settings=settings6)) scenarios.append( sc.Scenario("over est", high_var=False, settings=settings6))
def test_empty_data(self): a = scenario.Scenario() result = a.data_list() self.assertEqual(len(result), 0)
def test_single_item(self): a = scenario.Scenario() a.clear_list() a.add_item(123) result = a.data_list() self.assertEqual(len(result), 1)
def test_confidence_interval(self): a = scenario.Scenario() a.confidence_interval = 90 self.assertEqual(90, a.confidence_interval)