def run_optimal(): # Returns the current state of the agent current_state = Monitor.monitor(None, agent) # See what moves are available from the current positions adj_states = Analyze.analyze(current_state) # Get the next action desired_action = Planning.optimalPolicy(adj_states, knowledge) # Apply Environmental uncertainty actual_action = knowledge.action_func(desired_action) # Execute onto the environment next_state = Execution.execute(adj_states + [current_state], actual_action) agent.update(next_state, knowledge.state_value_dict[next_state])
def run(self): # First the selected dataset needs to be loaded dataset_name = self.data_selection.get() if dataset_name == "Iris": print("Selecting Iris!") data = load_data.load_iris() elif dataset_name == "Seeds": data = load_data.load_seeds() elif dataset_name == "Glass": data = load_data.load_glass() elif dataset_name == "Banknote": data = load_data.load_banknote() elif dataset_name == "Customers": data = load_data.load_cust_data() # Now run the selected clustering algorithm score_list = [score_funcs.cluster_sse] if self.alg_selection.get() == "K-Means": Analyze.analyze( data, dataset_name, 10, self.build_kMeans_func(*kMeans_params[dataset_name]), score_list) elif self.alg_selection.get() == "DBSCAN": Analyze.analyze( data, dataset_name, 10, self.build_dbscan_func(*dbscan_params[dataset_name]), score_list) elif self.alg_selection.get() == "Competitive Learning": Analyze.analyze(data, dataset_name, 10, self.build_cl_func(*cl_params[dataset_name]), score_list) elif self.alg_selection.get() == "PSO": Analyze.analyze(data, dataset_name, 10, self.build_pso_function(*pso_params[dataset_name]), score_list) elif self.alg_selection.get() == "ACO": Analyze.analyze(data, dataset_name, 10, self.build_aco_func(*aco_params[dataset_name]), score_list)
if args.analyze is not None: print('run...') json_path = args.analyze[0] # main_directory = os.path.dirname(os.path.realpath(__file__)) output_name = '../results/' + args.analyze[1] f = open(json_path) samples_names = json.loads(f.readline().strip('\n'))['samples'] stats_params_in_json = json.loads(f.readline().strip('\n'))['stats_params'] html = File_Output.HTML(output_name + ".html", samples_names, stats_params_in_json, analyze_params, misc_params) tsv = File_Output.TSV(output_name + ".tsv", samples_names) sites = Analyze.json_to_site(f) my_sites = [] for site in sites: Analyze.analyze(site) if site.kind == '' and len(site.bulk) > 0 and site.bulk['SUM'] >= analyze_params["bulk_dp_interval"][0] and site.bulk['SUM'] <= analyze_params["bulk_dp_interval"][1]: bulk_a1_ratio = float(site.bulk[site.alts['A1']])/site.bulk['SUM'] if bulk_a1_ratio <= (1 - stats_params_in_json["bulk_ref_limit"]): nr_conflicting = 0 nr_c3_conflicting = 0 nr_a1 = 0 for sample in site.samples.values(): if sample.info == 'CONFLICT': nr_conflicting += 1 elif sample.info == 'C3-CONFLICT': nr_c3_conflicting += 1 elif (sample.info == 'HET-C1' or sample.info == 'HET-C2' or sample.info == 'H**O-A1'): nr_a1 += 1 if nr_a1 >= analyze_params["a1_lower_limit"] and nr_conflicting <= analyze_params["conflicting_upper_limit"] and nr_c3_conflicting <= analyze_params["c3_conflicting_upper_limit"]: