def experiment_loop(model_type=None,t_lim=99999, num_bots = 0, detailed_evo = False ): ''' detailed_evo (bool) : determines whether intermediate graphs are saved, too, or only the converged one ''' np.random.seed=0 if model_type == "Holme": kw={"n_vertices":25, "n_edges":25} A = holme(**kw) elif model_type == "Weighted Balance": kw={"n_vertices":50, "d":3} A = weighted_balance(**kw) elif model_type == "Weighted Balance General": kw={"n_vertices":25, "n_edges":25, "d":3, "dist":1} A = weighted_balance_general(**kw) elif model_type == "Weighted Balance Bots": A = weighted_balance_bots(n_vertices=500, d=3, alpha=0.4, n_edges=499,initial_graph="barabasi_albert",bot_positions="top",f=lambda x:min(x,0) ,n_bots = num_bots) else: print("Possible values for model_type are: 'Holme', 'Weighted Balance', 'Weighted Balance General', 'Weighted Balance Bot'") return #print("using general mode") #kw = {"n_vertices":25, "n_edges":25, "n_opinions": 0, "d": 3, "phi": 0.5, } #A = coevolution_model_general(**kw) done = False A.draw_graph(path = image_folder + str(num_bots) + model_type) while done == False: A.step() if A.t%100==0: #Finding connected components is way more complex than the model dynamics. Only check for convergence every 100 steps. done = A.convergence() if detailed_evo: if A.t <= 1000 or (A.t%1000==0 and A.t<=10000) or A.t%10000==0: A.draw_graph(path = image_folder + str(num_bots) + model_type) if A.t>t_lim: print("Model did not converge") done = True A.draw_graph(path = image_folder + str(num_bots) + model_type)
def experiment_loop(kwarg_dict, variying_kwarg, metrics, n=100, model_type=None): np.random.seed = 0 results = {key: [] for key in metrics.keys()} for v_kwarg in variying_kwarg[1]: kwarg_dict[variying_kwarg[0]] = v_kwarg subresults = {key: [] for key in metrics.keys()} for i in range(n): if model_type == "Holme": A = holme(**kwarg_dict) elif model_type == "Weighted Balance": A = weighted_balance(**kwarg_dict) else: print("using general mode") A = coevolution_model_general(**kwarg_dict) done = False while done == False: A.step() if A.t % 100 == 0: #Finding connected components is way more complex than the model dynamics. Only check for convergence every 100 steps. done = A.convergence() for key in metrics.keys(): subresults[key].append(metrics[key](A)) for key in subresults.keys(): results[key].append(subresults[key]) results["variation"] = variying_kwarg return results
def experiment_loop(kwarg_dict,variying_kwarg,metrics,n=10,model_type=None,t_lim=t_lim_default,verbose=False): ''' runs `model_type` with options kwarg_dict for each variying_kwarg, each for n times then calculating metrics on model ARGUMENTS kwarg_dict: dict of keyword arguments that will be passed to the model class __init__ fun (check corresponding class in model.py) variying_kwarg: tuple with key and array of varying args e.g. ('phi', [0.1,0.5,0.7]), model is run for each one metrics: dict of functions called on the model object after each completed run, determines output in results n: int number of iterations for model_type: Possible values "Holme", "Weighted Balance", "Weighted Balance General", "Weighted Balance Bot" RETURN result of metrics ''' timestamp=datetime.now().strftime("%Y-%m-%d %H-%M") np.random.seed=0 results = {key: [] for key in metrics.keys()} for v_kwarg in variying_kwarg[1]: if verbose: print(str(v_kwarg) + " from "+str(variying_kwarg[1])) kwarg_dict[variying_kwarg[0]]=v_kwarg subresults = {key: [] for key in metrics.keys()} for i in range(n): print('iteration {} of {}. '.format(i,n)) if model_type == "Holme": A = holme(**kwarg_dict) elif model_type == "Weighted Balance": A = weighted_balance(**kwarg_dict) elif model_type == "Weighted Balance Bots": A = weighted_balance_bots(**kwarg_dict) else: print("using general mode") A = coevolution_model_general(**kwarg_dict) done = False while done == False: A.step() if A.t%100==0: #Finding connected components is way more complex than the model dynamics. Only check for convergence every 100 steps. done = A.convergence() if A.t>t_lim: print("t>t_lim!") done = True for key in metrics.keys(): subresults[key].append(metrics[key](A)) #save subresults after every iteration #with open("./subresults/run{}.pickle".format(timestamp), "wb") as f: #pickle.dump(subresults, f) for key in subresults.keys(): results[key].append(subresults[key]) results["variation"] = variying_kwarg # A.draw_graph(path = image_folder+"graph") return results
def WBT_evolution(): n_vertices = 250 m = weighted_balance(d=3, n_vertices=n_vertices, f=lambda x: np.sign(x) * np.abs(x)**(1 - 0.4), alpha=0.3) k = 1 res.add_op_mat(m) while m.convergence() == False: #do a plot every n rounds for j in range(30): for i in range(n_vertices): #update one node m.step() #add current state to resultbuffer to plot later res.add_op_mat(m) k = k + 1 print(k) res.plotWBT()