def KS(params, eval_func, bins, bmin, h): idx = bins.index(bmin) h_hat = [eval_func(params, b) for b in bins[idx:len(h)]] try: cdf_h_hat = cdf(h_hat) except: traceback.print_exc() print("func: %s"%eval_func.__name__) print("params: "+ str(params )) print(h_hat) cdf_h = cdf(h[idx:]) return max( [abs(cdf_h[i] - cdf_h_hat[i]) for i in range(len(cdf_h_hat))] )
def __init__(self): self.agents = {} self.locations = [(x,y) for x in range(Grid_Size) for y in range(Grid_Size)] agents_per_race = int((Grid_Size**2)*(1-Empty_Space)/Num_of_Races) shuffle(self.locations) loc_idx = 0 minimum_friends = 3 agents = [] for race in range(Num_of_Races): for i in range(agents_per_race): new_agent = Agent(race) potential_friends = agents.copy() for j in range(minimum_friends): if potential_friends: degrees = [degree(agent) for agent in potential_friends] friend = potential_friends[randomFromCDF(cdf(degrees))] new_agent.connectTo(friend) potential_friends.remove(friend) loc = self.locations[loc_idx] loc_idx += 1 self.agents[loc] = new_agent agents.append(new_agent) # start keeping track of empty locations for unhappy agents to move to self.empty_locations = [] for i in range(loc_idx, len(self.locations)): self.empty_locations.append(self.locations[i]) self.steps = 0 self.unhappy_count = 0
def goodness_of_fit(h, bins, bmin, eval_funcs, params, eps, obj_func=KSmod): ''' eval_funcs : a list of evaluation functions params : a dictionary of estimated parameter vectors for the functions in eval_funcs ''' counts = collections.OrderedDict() #count of synthetic data sets with KS_synthetic > KS_est KS_est = {} num_data_sets = int((eps**-2)/4)+1 verbose("Generating %d data sets..."%num_data_sets) idx = bins.index(bmin) # generate distributions from parameters distributions = {} for func in eval_funcs: distribution = [] for i in range(idx): distribution.append(0) for i in range(idx, len(h)): distribution.append(func(params[func], bins[i])) print("%s distribution: "%func.__name__) print(distribution) distributions[func] = cdf(distribution) KS_est[func] = KS(params[func], func, bins, bmin, h) counts[func] = 0 for i in range(num_data_sets): h_syn = generateData(h, bins, bmin, distributions) for func in eval_funcs: params_syn = estimate(h_syn[func], bins, bmin, func, obj_func=obj_func) KS_syn = KS(params_syn, func, bins, bmin, h_syn[func]) counts[func] += 1 if KS_syn>KS_est[func] else 0 for func in eval_funcs: counts[func] /= num_data_sets return counts # the p-values for each of the estimated functions
def preferentialHomophilyNetwork(legislators): ''' Returns adjacency matrix based on preferential attachment with homophily ''' for i in range(Num_of_Representatives): legislators[i].linkTo(legislators[i],1) # create self-link for rep1 in sample(legislators, Num_of_Representatives): # (a shuffle without changing the original list) i = legislators.index(rep1) rep1_issues = sorted(State.issues, key = lambda i: rep1.priorities[i], reverse=True) potential_friends = [] for rep2 in legislators: # first find all potential friends with homophily j = legislators.index(rep2) if rep1!=rep2: link_strength = 0 sum_pri_diffs = 0 for issue in rep1_issues[0:10]: similarity = binaryTreeSimilarity(rep1.positions[issue], rep2.positions[issue]) pri_diff = 1 - abs(rep1.priorities[issue] - rep2.priorities[issue]) link_strength += similarity*pri_diff sum_pri_diffs += pri_diff link_strength = (link_strength/sum_pri_diffs)*2 - 1 if link_strength > Friend_Threshold: potential_friends.append(rep2) degrees = [1 for rep in potential_friends] for k in range(Minimum_Friends): degrees = [len(rep.links.values()) for rep in potential_friends] if degrees: friend_idx = randomFromCDF(cdf(degrees)) rep2 = potential_friends[friend_idx] rep1.linkTo(rep2, 1) potential_friends.remove(rep2) # can't friend someone twice matrix = [[legislators[i].getLinkValueTo(legislators[j]) for i in range(Num_of_Representatives)] for j in range(Num_of_Representatives)] return matrix
def __init__(self, affiliation, priority_issues=[], default_positions={}): self.priorities = {} self.positions = {} self.links = {} # network link strengths to other legislators self.affiliation = affiliation # generae priorities by preferential attachment priorities = [1 for i in range(Num_of_Issues)] for issue in priority_issues: priorities[issue] = Priority_Multiplier * (priority_issues.index(issue) + 1) for i in range(Num_of_Issues**2): priorities[randomFromCDF(cdf(priorities))] += 1 # normalize the priorities to sum = 1 priorities = pdf(priorities) shuffle(priorities) #assign priorities and positions for issue in State.issues: self.priorities[issue] = priorities[issue] self.positions[issue] = getrandbits(Solution_Bit_Length) if issue in default_positions.keys(): # set the last bits according to default position b = Ideology_Bit_Depth #self.positions[issue] = (self.positions[issue]>>b)<<b + default_positions[issue] self.positions[issue] = default_positions[issue]