def main(exp_per, days, qdegree, graph_obj, error_CT, disease_paramters): beta_sy, beta_asy, mu_sy, mu_asy, gamma_sy, gamma_asy, delta = disease_paramters agents = [] for i in range(graph_obj.size): state = 'Susceptible' if random.random() < exp_per: state = 'Exposed' agent = Agent.Agent(state, i) agents.append(agent) #create graph of agents from graph_obj for indx, agent in enumerate(agents): agent.index = indx for j in graph_obj.adj_list[indx]: agent.neighbours.append(agents[j]) individual_types = [ 'Susceptible', 'Exposed', 'Asymptomatic', 'Symptomatic', 'Recovered' ] def p_infection(p1, p2): # probability of infectiong neighbour def p_fn(my_agent, neighbour_agents): p_inf_symp = p1 p_inf_asymp = p2 p_not_inf = 1 for nbr_agent in neighbour_agents: if nbr_agent.state == 'Symptomatic' and not nbr_agent.quarantined and not my_agent.quarantined: p_not_inf *= (1 - p_inf_symp) if nbr_agent.state == 'Asymptomatic' and not nbr_agent.quarantined and not my_agent.quarantined: p_not_inf *= (1 - p_inf_asymp) return 1 - p_not_inf return p_fn def p_standard(p): def p_fn(my_agent, neighbour_agents): return p return p_fn transmission_prob = {} for t in individual_types: transmission_prob[t] = {} for t1 in individual_types: for t2 in individual_types: transmission_prob[t1][t2] = p_standard(0) transmission_prob['Susceptible']['Exposed'] = p_infection( beta_sy, beta_asy) transmission_prob['Exposed']['Symptomatic'] = p_standard(mu_sy) transmission_prob['Exposed']['Asymptomatic'] = p_standard(mu_asy) transmission_prob['Symptomatic']['Recovered'] = p_standard(gamma_sy) transmission_prob['Asymptomatic']['Recovered'] = p_standard(gamma_asy) transmission_prob['Recovered']['Susceptible'] = p_standard(delta) sim_obj = Simulate.Simulate(graph_obj, agents, transmission_prob) sim_obj.simulate_days(days, qdegree, error_CT) return sim_obj.state_history, sim_obj.quarantine_history
def Build_Tabs(self): # Build class objects of each tab self.IOFile = IOFile.IOFile(self, self.PyMOL, self.Btn_IOFiles, 'IOFile', IOFile.IOFileVars(), self.Prefs) self.Config1 = Config1.Config1(self, self.PyMOL, self.Btn_Config1, 'Config1', Config1.Config1Vars(), self.Prefs) self.Config2 = Config2.Config2(self, self.PyMOL, self.Btn_Config2, 'Config2', Config2.Config2Vars(), self.Prefs) self.Config3 = Config3.Config3(self, self.PyMOL, self.Btn_Config3, 'Config3', Config3.Config3Vars(), self.Prefs) self.GAParam = GAParam.GAParam(self, self.PyMOL, self.Btn_GAParam, 'GAParam', GAParam.GAParamVars(), self.Prefs) self.Simulate = Simulate.Simulate(self, self.PyMOL, self.Btn_Simulate, 'Simulate', Simulate.SimulateVars(), self.Prefs) self.listTabs = [self.IOFile, self.Config1, self.Config2, self.Config3, self.GAParam, self.Simulate] self.listBtnTabs = [self.Btn_IOFiles, self.Btn_Config1, self.Btn_Config2, self.Btn_Config3, self.Btn_GAParam, self.Btn_Simulate] return
def get_best_move(self, grid, game_lev): for r in range(0, 6): for c in range(0, 7): self.gg[r][c] = 0 self.gg[r][c] = grid[r][c].get_color() self.bestcol = -1 total = 0 self.mxval = -99999 for i in range(0, 7): self.colcolcol[i] = 0 self.colval[i] = 0 for r in range(0, 6): for c in range(0, 7): if self.gg[r][c] > 0: self.colcolcol[c] = self.colcolcol[c] + 1 total = total + 1 if total == -42: return -1 for c in range(0, 7): if self.colcolcol[c] == 6: continue self.gg[5 - self.colcolcol[c]][c] = 2 if CheckWin.four_in_a_row_int_board(5 - self.colcolcol[c], c, self.gg): return c for i in range(1, game_lev): # Number of simulations nsim = Simulate(self.gg) self.colval[c] = self.colval[c] + nsim.get_val() self.gg[5 - self.colcolcol[c]][c] = 0 for c in range(0, 7): if self.colcolcol[c] == 6: continue if self.colval[c] > self.mxval: self.bestcol = c self.mxval = self.colval[c] return self.bestcol
import json import tkinter.colorchooser as cch import tkinter.messagebox from tkinter import * from tkinter import filedialog from tkinter.ttk import Combobox import Simulate from Simulate import Sheep from Simulate import Wolf simulate = Simulate.Simulate() simulate.init_sheeps() auto_step: bool = False auto_secs: int = 1000 scale = 1 root = Tk() sheep_color = "#0000ff" wolf_color = "#ff0000" mid_frame = Canvas(root, width=500, height=500, background="#00ff00") bot_frame = Frame(root) alive_sheeps = Label(bot_frame, text="0", fg="red") top2_frame = Frame(root) def on_change_scale(event): dict_ = {-2: 0.4, -1: 0.7, 0: 1, 1: 1.3, 2: 1.6} global scale scale = dict_[sss.get()] repaint_animals()
def one_world(self): time_steps = self.config_obj.time_steps #Initialize agents agents_obj = ReadFile.ReadAgents(self.agents_filename, self.config_obj) #Intialize locations locations_obj = ReadFile.ReadLocations(self.locations_filename, self.config_obj) sim_obj = Simulate.Simulate(self.config_obj, self.model, self.policy_list, self.event_restriction_fn, agents_obj, locations_obj) sim_obj.onStartSimulation() for i in range(time_steps): if self.interactionFiles_list == [] or self.interactionFiles_list == None: interactions_filename = None else: interactions_filename = self.interactionFiles_list[i % len( self.interactionFiles_list)] if self.eventFiles_list == [] or self.eventFiles_list == None: events_filename = None else: events_filename = self.eventFiles_list[i % len( self.eventFiles_list)] sim_obj.onStartTimeStep(interactions_filename, events_filename, i) sim_obj.handleTimeStepForAllAgents() sim_obj.endTimeStep() end_state, machine_cost = sim_obj.endSimulation() total_quarantined_days = 0 wrongly_quarantined_days = 0 total_positives = 0 total_false_positives = 0 for policy in self.policy_list: if (isinstance(policy, Test_Policy)): self.total_positive_pools += policy.positive_pools for agent in agents_obj.agents.values(): for truth in agent.quarantine_list: if (truth == "Right"): total_quarantined_days += 1 elif (truth == "Wrong"): total_quarantined_days += 1 wrongly_quarantined_days += 1 history = agent.get_policy_history("Testing") if (len(history)): t_f_p, t_p = get_accumulated_result(agent, history) total_false_positives += t_f_p self.total_positives += t_p self.total_quarantined_days += total_quarantined_days self.wrongly_quarantined_days += wrongly_quarantined_days self.total_infection += len( agents_obj.agents) - end_state["Susceptible"][-1] self.total_machine_cost += machine_cost self.total_false_positives += total_false_positives return end_state, agents_obj, locations_obj
def normal_spread(): #Function determining the probability of infecting neighbour def p_infection(day,global_state,my_agent,neighbour_agents): # probability of infectiong neighbour p_inf=0.5 p_not_inf=1 for nbr_agent in neighbour_agents: if nbr_agent.individual_type in ['Infected','Asymptomatic'] and not nbr_agent.policy_state['quarantined']: p_not_inf*=(1-p_inf) #print(1-p_not_inf) return 1 - p_not_inf #Standard fixed probability of changing state def p_standard(p): def p_fn(day,global_state,a1,nbrs): return p return p_fn #Define all possible states and corresponding colors individual_types=['Susceptible','Infected','Recovered','Vaccinated','Asymptomatic','Exposed','Asymptomatic Recovered'] color_list=['white', 'black','red','blue','blue','grey','pink'] transmission_prob={} for t in individual_types: transmission_prob[t]={} for t1 in individual_types: for t2 in individual_types: transmission_prob[t1][t2]=p_standard(0) #define the transmission probabilities between states transmission_prob['Susceptible']['Exposed']= p_infection transmission_prob['Exposed']['Infected']= p_standard(0.5) transmission_prob['Exposed']['Asymptomatic']= p_standard(0.3) transmission_prob['Infected']['Recovered']= p_standard(0.2) transmission_prob['Asymptomatic']['Asymptomatic Recovered']= p_standard(0.2) transmission_prob['Recovered']['Susceptible']= p_standard(0) #Initialise grid and agents with states gridtable =np.zeros((50,50)) gridtable[35][6]=1 #Infected = individual_types[1] at (35,6) gridtable[22][43]=1 gridtable[7][2]=1 gridtable[15][2]=1 grid=Grid.Grid(gridtable,individual_types) #policy=Policy.Quarantine_area(grid, individual_types, 4, 0) policy=None sim_obj= Simulate.Simulate(transmission_prob,individual_types,grid,policy) #define reward function to be maximised for given policy def reward_fn(days,no_infected): return -days #Simulate n days of disease spread without policy intervention n_days=10 sim_obj.simulate_days(n_days) #sim_obj.simulate_till_end(reward_fn) sim_obj.grid.animate(False,color_list,0.3) #animate n_days of disease spread sim_obj.grid.plot_time_series() #plot time series of individual_types(Infected, recovered...)
def co_pandemic_spread(): def p_infection(day,global_state,my_agent,neighbour_agents): # probability of infectiong neighbour p_inf=0.4 p_not_inf=1 for nbr_agent in neighbour_agents: if nbr_agent.individual_type in ['Infected','Asymptomatic'] and not nbr_agent.policy_state['quarantined']: p_not_inf*=(1-p_inf) return 1 - p_not_inf def p_infection_flu(day,global_state,my_agent,neighbour_agents): # probability of infectiong neighbour p_inf=0.2 p_not_inf=1 for nbr_agent in neighbour_agents: if nbr_agent.individual_type in ['Flu'] and not nbr_agent.policy_state['quarantined']: p_not_inf*=(1-p_inf) return 1 - p_not_inf def p_standard(p): def p_fn(day,global_state,a1,nbrs): return p return p_fn individual_types=['Susceptible','Infected','Recovered','Flu','Flu Recovered','grey'] color_list=['white', 'black','red','blue','yellow','grey'] transmission_prob={} for t in individual_types: transmission_prob[t]={} for t1 in individual_types: for t2 in individual_types: transmission_prob[t1][t2]=p_standard(0) transmission_prob['Susceptible']['Infected']= p_infection transmission_prob['Susceptible']['Flu']= p_infection_flu transmission_prob['Flu']['Infected']= p_infection transmission_prob['Flu']['Flu Recovered']= p_standard(0.1) transmission_prob['Flu Recovered']['Infected']= p_infection transmission_prob['Infected']['Recovered']= p_standard(0.2) transmission_prob['Recovered']['Susceptible']= p_standard(0) gridtable =np.zeros((50,50)) for i in range(6): x= random.randint(0,49) y= random.randint(0,49) gridtable[x][y]=1 for i in range(4): x= random.randint(0,49) y= random.randint(0,49) gridtable[x][y]=3 grid=Grid.Grid(gridtable,individual_types) #policy=Policy.Vaccinate_block(grid, individual_types,1,0) policy=None sim_obj= Simulate.Simulate(transmission_prob,individual_types,grid,policy) def reward_fn(days,no_infected): return -days sim_obj.simulate_days(20) #sim_obj.simulate_till_end(reward_fn) sim_obj.grid.animate(False,color_list,0.3) sim_obj.grid.plot_time_series()