def run_rel_comp(): subcommand = sys.argv[1] data_path = sys.argv[3] out_lim = int(sys.argv[4]) num_region = int(sys.argv[5]) use_node_hash = sys.argv[6]=='y' input_json = sys.argv[7] loc, link_delay = net_init.load_network(input_json)
def run_mc(): subcommand = sys.argv[1] seed = int(sys.argv[2]) data_path = sys.argv[3] out_lim = int(sys.argv[4]) num_new = int(sys.argv[5]) input_json = sys.argv[6] use_logger = sys.argv[7] == 'y' loc, link_delay, role, proc_delay = net_init.load_network(input_json) record_epochs = [int(i) for i in sys.argv[8:]] max_epoch = max(record_epochs) +1 num_node = len(loc) num_msg = out_lim*3 *2 # assume numIn = numOut, num include both window = 2*num_msg in_lim = num_node print("\033[93m" + 'num msg per topo '+ str(num_msg) + "\033[0m") print("\033[93m" + 'mat complete window size'+ str(window) + "\033[0m") start = time.time() adv_nodes = [i for i in range(config.num_adv)] perigee = Experiment( link_delay, role, proc_delay, num_node, in_lim, out_lim, data_path, adv_nodes, window, 'mc', loc, num_new, use_logger, seed ) perigee.init_graph_mc() perigee.start(max_epoch, record_epochs, num_msg)
def __init__(self, topo, in_lim, out_lim, name, num_keep, num_2hop, num_rand, num_epoch, adapts, num_msg, churn_rate): self.in_lim = in_lim self.out_lim = out_lim self.num_out = out_lim self.outdir = os.path.dirname(name) self.loc, self.ld, self.roles, self.proc_delay, self.pub_prob = load_network( topo) self.num_node = len(self.loc) self.num_epoch = num_epoch self.num_msg = num_msg self.churn_rate = churn_rate self.selectors = { i: Selector(i, num_keep, num_rand, num_msg, self.num_node) for i in range(self.num_node) } # elf.num_cand = num_node # could be less if num_node is large self.snapshots = [] # self.pools = Pool(processes=num_thread) self.directions = ['incoming', 'outgoing', 'bidirect'] self.nodes = { i: Communicator(i, self.proc_delay[i], in_lim, out_lim, []) for i in range(self.num_node) } self.oracle = SimpleOracle(in_lim, out_lim, self.num_node) self.out_hist = [] self.sparse_tables = {i: SparseTable(i) for i in range(self.num_node)} # self.conns_snapshot = [] # self.broad_nodes = [] # hist of broadcasting node # self.timer = time.time() # self.pubs = [k for k,v in self.roles.items() if v=='PUB'] self.adapts = adapts self.pub_hist = [] self.dists_hist = defaultdict(list) self.dist_file = name # log setting # self.use_logger = use_logger # self.logdir = self.outdir + '/' + 'logs' # if not os.path.exists(self.logdir): # os.makedirs(self.logdir) # self.loggers = {} # self.init_logger() self.init_graph_conn = os.path.join(self.outdir, 'init.json') self.snapshot_dir = os.path.join(self.outdir, 'snapshots') self.snapshot_exploit_dir = os.path.join(self.outdir, 'snapshots-exploit') self.write_adapts_node(os.path.join(self.outdir, 'adapts')) if not os.path.exists(self.snapshot_dir): os.makedirs(self.snapshot_dir) if not os.path.exists(self.snapshot_dir): os.makedirs(self.snapshot_dir) self.num_keep = num_keep self.num_2hop = num_2hop self.num_rand = num_rand assert (num_keep + num_2hop + num_rand == self.out_lim)
def run_mf(): subcommand = sys.argv[1] data_path = sys.argv[3] out_lim = int(sys.argv[4]) num_region = int(sys.argv[5]) use_node_hash = sys.argv[6]=='y' input_json = sys.argv[7] assert(config.use_abs_time) loc, link_delay = net_init.load_network(input_json) record_epochs = [int(i) for i in sys.argv[8:]] max_epoch = max(record_epochs) +1 window = int(config.num_msg*config.window_constant * math.ceil(num_region * math.log(config.num_node))) # T > L log N print(window, num_region, config.num_node) node_delay = net_init.GenerateInitialDelay(config.num_node) node_hash = None # [LinkDelay,NodeHash,NodeDelay] = readfiles.Read(NodeDelay, NetworkType, num_node) # [ node_delay, # node_hash, _, # _, _, # _, _, # _] = initnetwork.GenerateInitialNetwork( # config.network_type, # config.num_node, # subcommand, # out_lim) # tester.print_mat(link_delay, False) if config.use_reduce_link: print("\033[91m" + 'Use reduced link latency' + "\033[0m") net_ini.reduce_link_latency(config.num_node, int(0.2*config.num_node), link_delay) else: print("\033[93m" + 'Not use reduced link latency'+ "\033[0m") if not use_node_hash: print("\033[93m" + 'Not Use asymmetric node hash'+ "\033[0m") node_hash = None else: print("\033[91m" + 'Use asymmetric node hash'+ "\033[0m") if config.use_matrix_completion: print("\033[93m" + 'Use matrix completion'+ "\033[0m") if config.use_abs_time: print("\033[93m" + '\tuse absolute time'+ "\033[0m") else: print("\033[93m" + '\tuse relative time'+ "\033[0m") else: print("\033[93m" + 'Use 2hop selections'+ "\033[0m") print("\033[93m" + 'num region '+ str(num_region) + "\033[0m") print("\033[93m" + 'num msg '+ str(config.num_msg) + "\033[0m") print("\033[93m" + 'window '+ str(window) + "\033[0m") start = time.time() adv_nodes = [i for i in range(config.num_adv)] perigee = Experiment( node_hash, link_delay, node_delay, config.num_node, config.in_lim, out_lim, num_region, data_path, adv_nodes, window, 'mf-bandit' , loc ) perigee.init_graph() perigee.start(max_epoch, record_epochs, config.num_msg)
def __init__(self, topo, num_out, num_in, num_epoch, T, num_topo, stars, mc_epochs, mc_lr, mc_exit_loss_diff, num_rand, top_n_peer, plt_name, print_log, churn_rate, update_interval): self.T = T self.N = num_out self.H_ref = None self.loc = None self.ld = None self.proc_delay = None self.roles = None self.loc, self.ld, self.roles, self.proc_delay, self.pub_prob = load_network( topo) s = 0 for k, v in self.pub_prob.items(): # if self.roles[k] == 'PUB': s += v print('publishing prob summation', s) self.num_out = num_out self.num_in = num_in self.num_node = len(self.loc) self.num_epoch = num_epoch self.num_topo = num_topo self.num_rand = num_rand self.update_interval = update_interval self.churn_rate = churn_rate assert (T % num_topo == 0) self.table_directions = ['incoming', 'outgoing', 'bidirect'] self.nodes = { i: Communicator(i, self.proc_delay[i], num_in, num_out, []) for i in range(self.num_node) } self.oracle = SimpleOracle(num_in, num_out, self.num_node) self.out_hist = [] # self.pubs = [k for k,v in self.roles.items() if v=='PUB'] self.pub_hist = [] self.sparse_tables = {i: SparseTable(i) for i in range(self.num_node)} self.stars = stars self.schedulers = { i: NodeScheduler(i, self.update_interval, self.num_topo) for i in self.stars } # self.mco = mc_optimizer.McOptimizer(self.star_i, mc_epochs, mc_lr, mc_exit_loss_diff, top_n_peer) dirpath = os.path.dirname(plt_name) if not print_log: self.log_files = { i: os.path.join(dirpath, 'log' + str(i) + '.txt') for i in self.stars } else: self.log_files = {i: None for i in self.stars} self.mcos = { i: mc_optimizer.McOptimizer(i, mc_epochs, mc_lr, mc_exit_loss_diff, top_n_peer, self.log_files[i]) for i in self.stars } # self.selector = SimpleSelector(self.star_i, self.num_node,num_out,num_in, None, num_rand) self.selectors = { i: SimpleSelector(i, self.num_node, num_out, num_in, None, num_rand, self.log_files[i]) for i in self.stars } self.dist_file = plt_name self.init_graph_conn = os.path.join(dirpath, 'init.json') self.dists_hist = defaultdict(list) self.snapshot_dir = os.path.join(dirpath, 'snapshots') self.snapshot_exploit_dir = os.path.join(dirpath, 'snapshots-exploit') self.write_adapts_node(os.path.join(dirpath, 'adapts')) self.epoch_graph_dir = os.path.join(dirpath, 'graphs') self.num_thread = min(64, len(self.stars)) self.worker_pools = Pool(processes=self.num_thread)