def community_test(DAG,k,threshold): communities = comm.community(DAG, k, threshold) census = [] #creates a list, 'census', where each item is a list of the nodes in a certain community for community in communities: comm_members = [] for box in box_list: for node in box.nodes(): if box.node[node]['community'] == community: comm_members.append(node) census.append(comm_members) success = 0. #initialise the 'success' variable #iterate across each list of community members (the items in 'census') for comm_members in census: for box in box_list: nodes = box.nodes() #for a given community, count how many of the nodes come from each box... counter = 0. for node in nodes: if node in comm_members: counter += 1. #If over half of the nodes in a given community come from the same box, these nodes are considered to have been successfully placed together... #...whilst the rest are unsuccessfully placed in a community if len(comm_members) > 0: fraction = counter/float(len(comm_members)) #calculates the fraction of a community that come from a single box if fraction > 0.5: success += counter/(len(box_list)*len(nodes)) #adds to the success score the RHS, which is the fraction of nodes deemed successful from this community return success
def simulate(c=None, T=80): if c is None: c = community(300, 3, 10) Transfer = transfer(c) # Data to store c.allIdeas, to then make animation dataX = np.zeros((T, c.numberMembers)) dataY = np.zeros((T, c.numberMembers)) dataZ = np.zeros((T, c.numberMembers)) posX = np.zeros((T, c.numberMembers)) posY = np.zeros((T, c.numberMembers)) ideaDist = np.zeros((T, c.domainSize * 2 + 1)) # Iterate the idea transfer throughout community for t in range(0, T): np.random.seed() dataX[t, :] = c.allIdeas[:, 0] dataY[t, :] = c.allIdeas[:, 1] dataZ[t, :] = c.allIdeas[:, 2] posX[t, :] = c.allPositions[:, 0] posY[t, :] = c.allPositions[:, 1] ideaDist[t, :] = c.ideaDistribution[0][0] #Transfer.deterministicMerge() Transfer.probabilisticMerge() data = [dataX, dataY, dataZ, posX, posY, ideaDist] return data
def main(): search_conf_file = sys.argv[1] original_conf_file = sys.argv[2] partition_conf_file = sys.argv[3] #partition_conf_file = "partition.json" # # get original config and score # original_conf = json.loads(open(original_conf_file, 'r').read()) # # hierarchically type tuning # partition_tree = json.loads(open(partition_conf_file, 'r').read()) level = len(partition_tree) - 1 while level >= 0: search_conf = json.loads(open(search_conf_file, 'r').read()) change_items = search_conf['config'] # create communities based on the current level of the partition tree global community_obj community_obj = com.community(partition_tree, change_items, level) if __debug__: fp = open('ivy.log', 'a') print >> fp, "----level:", level print >> fp, "community configs:" print >> fp, "lens:", len(community_obj.confs) print >> fp, community_obj.confs fp.close() level -= 1
def ETLNAUTIA(communityType): data = DataNAUTIA(communityType) com.community(communityType) camp.camp(data.getBibliography(), data.getEntities()) ctr.country(data.getBibliography()) cd.campData(data.getBibliography(), data.getEntities(), data.getLocalLeaders()) gd.generalData(data.getBibliography()) se.socioEconomic(data) g.government(data.getBibliography()) fa.fisicoAmbiental(data.getBibliography(), data.getEntities()) u.urbanism(data.getEntities(), data.getGeneralForm(), data.getPublicSpace()) inf.infrastructure(data) s.services(data) sh.shelter(data.getEntities(), data.getShelter(), data.getHouseHold()) fs.foodSecurity(data)
def simulate(rl, numTrials=1, maxIterations=100, verbose=False, sort=False): T = maxIterations totalRewards = [] # The rewards we get on each trial dataX = np.zeros((T, rl.c.numberMembers)) dataY = np.zeros((T, rl.c.numberMembers)) dataZ = np.zeros((T, rl.c.numberMembers)) posX = np.zeros((T, rl.c.numberMembers)) posY = np.zeros((T, rl.c.numberMembers)) ideaDist = np.zeros((T, rl.c.domainSize * 2 + 1)) for trial in range(numTrials): # Form a brand new community c = community(rl.c.numberMembers, rl.c.numberIdeas, rl.c.domainSize) # Form a brand new agent agentMDP = mdp.agentMDP(c) agentMDP.tau = rl.tau # Update QLearning community to new community rl.c = agentMDP.c rl.index = agentMDP.index Transfer = transfer(c) state = agentMDP.startState() sequence = [state] totalDiscount = 1 totalReward = 0 for t in range(maxIterations): action = rl.getAction(state) transitions = agentMDP.succAndProbReward(state, action) if trial == numTrials - 1: dataX[t, :] = c.allIdeas[:, 0] dataY[t, :] = c.allIdeas[:, 1] dataZ[t, :] = c.allIdeas[:, 2] posX[t, :] = c.allPositions[:, 0] posY[t, :] = c.allPositions[:, 1] ideaDist[t, :] = c.ideaDistribution[0][0] i = sample([prob for newState, prob, reward in transitions]) newState, prob, reward = transitions[i] for n in range(agentMDP.tau): Transfer.probabilisticMerge() newState = agentMDP.observeCommunity() totalReward += totalDiscount * reward totalDiscount *= agentMDP.discount() state = newState if verbose: print(("Trial %d (totalReward = %s): %s" % (trial, totalReward, sequence))) totalRewards.append(totalReward) data = [dataX, dataY, dataZ, posX, posY, ideaDist, totalRewards] return data
def detect(filename, nop = -1, debug = False): comdet_tmppath = 'comdet_tmp' fileinput = open(filename, 'r') lines = fileinput.readlines() p1 = wrapper.Pool() for line in lines: line = line.strip() if not line: continue split = line.split(' ') n1 = p1.get_node(split[0]) if not n1: n1 = p1.add_node(split[0]) n1.add_elem(split[1]) if os.path.exists(comdet_tmppath): shutil.rmtree(comdet_tmppath) os.mkdir(comdet_tmppath) os.chdir(comdet_tmppath) com_t = community.community() result = com_t.start(p1, nop, debug) os.chdir('..') if os.path.exists(comdet_tmppath): shutil.rmtree(comdet_tmppath) return result
def detect(filename, nop=-1, debug=False): comdet_tmppath = 'comdet_tmp' fileinput = open(filename, 'r') lines = fileinput.readlines() p1 = wrapper.Pool() for line in lines: line = line.strip() if not line: continue split = line.split(' ') n1 = p1.get_node(split[0]) if not n1: n1 = p1.add_node(split[0]) n1.add_elem(split[1]) if os.path.exists(comdet_tmppath): shutil.rmtree(comdet_tmppath) os.mkdir(comdet_tmppath) os.chdir(comdet_tmppath) com_t = community.community() result = com_t.start(p1, nop, debug) os.chdir('..') if os.path.exists(comdet_tmppath): shutil.rmtree(comdet_tmppath) return result
def main(): bitcode = sys.argv[1] search_conf_file = sys.argv[2] original_conf_file = sys.argv[3] partition_conf_file = sys.argv[4] #partition_conf_file = "partition.json" # # delete log file if exists # try: os.remove("log.dd") except OSError: pass if __debug__: try: os.remove("ivy.log") except OSError: pass # # get original config and score # original_conf = json.loads(open(original_conf_file, 'r').read()) run_config(original_conf, original_conf, bitcode, 0) original_score = utilities.get_dynamic_score() if __debug__: fp = open('ivy.log', 'a') print >> fp, "---------" print >> fp, "original score" print >> fp, original_score fp.close() # # hierarchically type tuning # partition_tree = json.loads(open(partition_conf_file, 'r').read()) curr_conf = None curr_score = -1 level = len(partition_tree) - 1 while level >= 0: if __debug__: fp = open('ivy.log', 'a') print >> fp, "partition level and config" print >> fp, level, partition_tree[level] fp.close() search_conf = json.loads(open(search_conf_file, 'r').read()) change_items = search_conf['config'] # delete from the search space the configs that have been pruned if curr_conf != None: reach_bottom_label = 1 for i in range(len(curr_conf['config'])): curr_type = curr_conf['config'][i].values()[0]["type"] type_vector = change_items[i].values()[0]["type"] index = type_vector.index(curr_type) if index != 0: reach_bottom_label = 0 del type_vector[index + 1:] if change_items[i].keys()[0] == "call": del change_items[i].values()[0]["switch"][index + 1:] if reach_bottom_label == 1: break # create communities based on the current level of the partition tree global community_obj community_obj = com.community(partition_tree, change_items, level) if __debug__: fp = open('ivy.log', 'a') print >> fp, "community configs:" print >> fp, community_obj.confs fp.close() # # record the change set # search_changes = community_obj.confs change_set = [] type_set = [] switch_set = [] for i in range(len(search_changes)): type_vector = search_changes[i]["type"] if isinstance(type_vector, list): if len(type_vector) == 1: tmp_set = [] tmp_type_set = [] tmp_switch_set = [] tmp_set.append(search_changes[i]) tmp_type_set.append(type_vector) if "switch" in community_obj.get_vars( search_changes[i]["name"])[0].keys(): tmp_switch_set.append( community_obj.get_vars( search_changes[i]["name"])[0]["switch"]) else: tmp_switch_set.append([]) to_highest_precision(tmp_set, tmp_type_set, tmp_switch_set) else: type_set.append(type_vector) change_set.append(search_changes[i]) if (len(community_obj.get_vars(search_changes[i]["name"])) == 1) & ("switch" in community_obj.get_vars( search_changes[i]["name"])[0].keys()): switch_set.append( community_obj.get_vars( search_changes[i]["name"])[0]["switch"]) else: switch_set.append([]) # # search for valid configuration # print "Searching for valid configuration using delta-debugging algorithm ..." # get current score if curr_score == -1: curr_score = original_score ## 0.95 curr_conf = original_conf # keep searching while the type set is not searched throughout while not is_empty(type_set): search_config(change_set, type_set, switch_set, search_conf, original_conf, bitcode, original_score) # get the score of modified program run_config(search_conf, original_conf, bitcode, 0) modified_score = utilities.get_dynamic_score() if modified_score <= curr_score: curr_conf = search_conf curr_score = modified_score if __debug__: fp = open('ivy.log', 'a') print >> fp, "---------" print >> fp, "updating new score" print >> fp, curr_score fp.close() # print the intermediate level configuration utilities.print_config( curr_conf, "dd2_valid_level" + str(level) + "_" + bitcode + ".json") fleveline = open("log.dd", "a") fleveline.write( "----------------------------------------------------------------------\n" ) fleveline.close() # check the modified configuration ''' if is_bottom_type(change_set): if __debug__ : fp = open('ivy.log', 'a') print>>fp, "stop searching : reach bottom types" print>>fp, search_conf print>>fp, "change set:" print>>fp, change_set fp.close() break ''' level -= 1 # print tuning result if (curr_score <= original_score) & (curr_conf != None): #print valid configuration file and diff file diff = utilities.print_diff(curr_conf, original_conf, "dd2_diff_" + bitcode + ".json") if diff: utilities.print_config(curr_conf, "dd2_valid_" + bitcode + ".json") print "original_score: ", original_score print "modified_score: ", curr_score fp = open('time.txt', 'a') print >> fp, curr_score, "/", original_score fp.close() print "Check valid_" + bitcode + ".json for the valid configuration file" return print "No configuration is found!"
from community import community from transfer import transfer import mdp, util, utilRL, copy, time, cloudpickle import numpy as np ############################# Choose a simulation ############################# from simulateRL import simulate #from controlSimulate import simulate startTime = time.time() ######################### Take care of filename (pkl extension added later) ########################## fn = 'ag_dist_rad_gre' ######################## community parameters ############################## c = community(100, 4, 10) c.allRadii = np.random.normal(0.4, 0.1, c.numberMembers) ######################### agent parameters ############################ agentMDP = mdp.agentMDP(community=c, index=0, sampleSize=4) agentMDP.T = 100 agentMDP.tau = 10 rl = mdp.QLearningAlgorithm(agentMDP.index, agentMDP.tau, agentMDP.c, agentMDP.actions, agentMDP.discount(), mdp.communityFeatureExtractor, explorationProb=0.2, alpha=0.1)