def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.rounds times and reports aggregated regret ------------------------------------------------------------""" seed_set = np.argsort(self.outdegs)[::-1][:self.seed_size].tolist() for epoch_idx in np.arange(1, self.epochs + 1): self.get_context() # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread( seed_set) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_" + self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.spread.append(online_spread)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs+1): print(epoch_idx) self.get_context() context_idx = self.context_classifier(self.context_vector) explore_bool = binomial(1, self.epsilons[epoch_idx-1]) # Exploration Epoch, set influence estimates accordingly if(explore_bool): inf_ests = [(alpha/(alpha+beta)) * (alpha+math.sqrt((alpha*beta)/(alpha+beta+1))) if(alpha+beta > 0) else (0) for alpha, beta in zip(self.alphas, self.betas)] # Exploitation Epoch, set influence estimates accordingly else: inf_ests = [(alpha/(alpha+beta)) if(alpha+beta > 0) else (0) for alpha, beta in zip(self.alphas, self.betas)] self.dump_graph(inf_ests, ("tim_"+self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) seed_set = list(timgraph.get_seed_set(self.epsilon)) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread(seed_set) # Update influence estimates and counters only if at exploration epoch if(explore_bool): total_cost = self.active_update(tried_cnts, success_cnts, context_idx) else: total_cost = 0 # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append((oracle_spread + total_cost) - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, inf_ests)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs+1): print(epoch_idx) self.get_context() context_idx = self.context_classifier(self.context_vector) under_explored = self.under_explored_nodes(context_idx, epoch_idx) # If there are enough under-explored edges, return them if(len(under_explored) == self.seed_size): exploration_phase = True seed_set = under_explored # Otherwise, run TIM else: exploration_phase = False self.dump_graph(self.inf_ests[context_idx], ("tim_"+self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, (self.seed_size - len(under_explored)), bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) seed_set.extend(under_explored) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread(seed_set) if(exploration_phase): total_cost = self.active_update(tried_cnts, success_cnts, context_idx, epoch_idx) else: total_cost = 0 # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append((oracle_spread + total_cost) - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, self.inf_ests[context_idx])
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs+1): # Retrieve the parameters and compute inf. estimates self.get_context() alphas = self.local_alphas + self.global_alpha betas = self.local_betas + self.global_beta mus = alphas / (alphas + betas) sigmas = (1 / (alphas + betas)) * np.sqrt((alphas * betas)/(alphas+betas+1)) inf_ests = [max(mus[idx] + (self.theta * sigmas[idx]), 0) if(self.local_alphas[idx] + self.local_betas[idx] > 0) else (0) for idx, _ in enumerate(self.edges[:,0])] # Run TIM self.dump_graph(inf_ests, ("tim_"+self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) timgraph = None # Simulates the chosen seed_set's performance in real world influenced_nodes = self.simulate_spread(seed_set) online_spread = len(influenced_nodes) # Update influence estimates, counters and globals success_cnts, tried_cnts = self.random_update(influenced_nodes, seed_set) fail_cnts = tried_cnts - success_cnts self.update_globals(success_cnts, fail_cnts) self.spread.append(online_spread) self.exponentiated_gradient() # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None influenced_nodes = self.simulate_spread(oracle_set) oracle_spread = len(influenced_nodes) self.regret.append(oracle_spread - online_spread) self.update_l2_error(real_infs, inf_ests)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.rounds times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs+1): if((epoch_idx+1) % 100 == 0): print(epoch_idx+1) self.get_context() context_idx = self.context_classifier(self.context_vector) under_explored = self.under_explored_nodes(context_idx, epoch_idx) # If there are enough under-explored edges, return them if(len(under_explored) == self.seed_size): seed_set = under_explored # Otherwise, run TIM else: self.dump_graph(self.inf_ests[context_idx], ("tim_"+self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, (self.seed_size - len(under_explored)), bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) seed_set.extend(under_explored) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread(seed_set) # Update influence estimates and counters self.counters[context_idx] += tried_cnts self.successes[context_idx] += success_cnts for edge_idx, cnt in enumerate(self.counters[context_idx]): self.inf_ests[context_idx][edge_idx] = self.successes[context_idx][edge_idx] / cnt if(cnt > 0) else (0) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, self.inf_ests[context_idx])
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.rounds times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs+1): self.get_context() context_idx = self.context_classifier(self.context_vector) self.dump_graph(self.inf_ests[context_idx], ("tim_"+self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, (self.seed_size), bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) timgraph = None # Simulates the chosen seed_set's performance in real world influenced_nodes = self.simulate_spread(seed_set) online_spread = len(influenced_nodes) # Update influence estimates and counters self.random_update(context_idx, influenced_nodes, seed_set) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None influenced_nodes = self.simulate_spread(oracle_set) oracle_spread = len(influenced_nodes) self.regret.append(oracle_spread - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, self.inf_ests[context_idx])
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.rounds times and reports aggregated regret ------------------------------------------------------------""" seed_set = np.argsort(self.outdegs)[::-1][:self.seed_size].tolist() for epoch_idx in np.arange(1, self.epochs+1): self.get_context() # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread(seed_set) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.spread.append(online_spread)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs+1): if((epoch_idx+1) % 100 == 0): print(epoch_idx+1) self.get_context() alphas = self.local_alphas + self.global_alpha betas = self.local_betas + self.global_beta inf_ests = [np.random.beta(alphas[idx], betas[idx]) for idx in range(self.edge_cnt)] self.dump_graph(inf_ests, ("tim_"+self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread(seed_set) fail_cnts = tried_cnts - success_cnts self.local_alphas += success_cnts self.local_betas += fail_cnts self.update_globals(tried_cnts, success_cnts, fail_cnts) self.spread.append(online_spread) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_"+self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.update_l2_error(real_infs, inf_ests)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs + 1): print(epoch_idx) self.get_context() context_idx = self.context_classifier(self.context_vector) explore_bool = binomial(1, self.epsilons[epoch_idx - 1]) # Exploration Epoch, set influence estimates accordingly if (explore_bool): inf_ests = [(alpha / (alpha + beta)) * (alpha + math.sqrt( (alpha * beta) / (alpha + beta + 1))) if (alpha + beta > 0) else (0) for alpha, beta in zip(self.alphas, self.betas)] # Exploitation Epoch, set influence estimates accordingly else: inf_ests = [(alpha / (alpha + beta)) if (alpha + beta > 0) else (0) for alpha, beta in zip(self.alphas, self.betas)] self.dump_graph(inf_ests, ("tim_" + self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) seed_set = list(timgraph.get_seed_set(self.epsilon)) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread( seed_set) # Update influence estimates and counters only if at exploration epoch if (explore_bool): total_cost = self.active_update(tried_cnts, success_cnts, context_idx) else: total_cost = 0 # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_" + self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append((oracle_spread + total_cost) - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, inf_ests)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.rounds times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs + 1): if ((epoch_idx + 1) % 100 == 0): print(epoch_idx + 1) self.get_context() context_idx = self.context_classifier(self.context_vector) under_explored = self.under_explored_nodes(context_idx, epoch_idx) # If there are enough under-explored edges, return them if (len(under_explored) == self.seed_size): seed_set = under_explored # Otherwise, run TIM else: self.dump_graph(self.inf_ests[context_idx], ("tim_" + self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, (self.seed_size - len(under_explored)), bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) seed_set.extend(under_explored) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread( seed_set) # Update influence estimates and counters self.counters[context_idx] += tried_cnts self.successes[context_idx] += success_cnts for edge_idx, cnt in enumerate(self.counters[context_idx]): self.inf_ests[context_idx][edge_idx] = self.successes[ context_idx][edge_idx] / cnt if (cnt > 0) else (0) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_" + self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, self.inf_ests[context_idx])
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs + 1): print(epoch_idx) self.get_context() context_idx = self.context_classifier(self.context_vector) under_explored = self.under_explored_nodes(context_idx, epoch_idx) # If there are enough under-explored edges, return them if (len(under_explored) == self.seed_size): exploration_phase = True seed_set = under_explored # Otherwise, run TIM else: exploration_phase = False self.dump_graph(self.inf_ests[context_idx], ("tim_" + self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, (self.seed_size - len(under_explored)), bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) seed_set.extend(under_explored) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread( seed_set) if (exploration_phase): total_cost = self.active_update(tried_cnts, success_cnts, context_idx, epoch_idx) else: total_cost = 0 # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_" + self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append((oracle_spread + total_cost) - online_spread) self.spread.append(online_spread) self.update_l2_error(real_infs, self.inf_ests[context_idx])
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs + 1): # Retrieve the parameters and compute inf. estimates self.get_context() alphas = self.local_alphas + self.global_alpha betas = self.local_betas + self.global_beta mus = alphas / (alphas + betas) sigmas = (1 / (alphas + betas)) * np.sqrt( (alphas * betas) / (alphas + betas + 1)) inf_ests = [ max(mus[idx] + (self.theta * sigmas[idx]), 0) if (self.local_alphas[idx] + self.local_betas[idx] > 0) else (0) for idx, _ in enumerate(self.edges[:, 0]) ] # Run TIM self.dump_graph(inf_ests, ("tim_" + self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread( seed_set) fail_cnts = tried_cnts - success_cnts self.local_alphas += success_cnts self.local_betas += fail_cnts self.update_globals(tried_cnts, success_cnts, fail_cnts) self.spread.append(online_spread) self.exponentiated_gradient() # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_" + self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.update_l2_error(real_infs, inf_ests)
def run(self): """------------------------------------------------------------ High level function that runs the online influence maximization algorithm for self.epochs times and reports aggregated regret ------------------------------------------------------------""" for epoch_idx in np.arange(1, self.epochs + 1): if ((epoch_idx + 1) % 100 == 0): print(epoch_idx + 1) self.get_context() alphas = self.local_alphas + self.global_alpha betas = self.local_betas + self.global_beta inf_ests = [ np.random.beta(alphas[idx], betas[idx]) for idx in range(self.edge_cnt) ] self.dump_graph(inf_ests, ("tim_" + self.graph_file)) timgraph = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) tim_set = timgraph.get_seed_set(self.epsilon) seed_set = list(tim_set) timgraph = None # Simulates the chosen seed_set's performance in real world online_spread, tried_cnts, success_cnts = self.simulate_spread( seed_set) fail_cnts = tried_cnts - success_cnts self.local_alphas += success_cnts self.local_betas += fail_cnts self.update_globals(tried_cnts, success_cnts, fail_cnts) self.spread.append(online_spread) # Oracle run real_infs = self.context_influences(self.context_vector) self.dump_graph(real_infs, ("tim_" + self.graph_file)) oracle = PyTimGraph(bytes("tim_" + self.graph_file, "ascii"), self.node_cnt, self.edge_cnt, self.seed_size, bytes("IC", "ascii")) oracle_set = list(oracle.get_seed_set(self.epsilon)) oracle = None oracle_spread, _, _ = self.simulate_spread(oracle_set) self.regret.append(oracle_spread - online_spread) self.update_l2_error(real_infs, inf_ests)