def runAlgorithms(self, algorithms): self.tim_ = [] for alg_name, alg in list(algorithms.items()): self.AlgReward[alg_name] = [] self.BatchCumlateReward[alg_name] = [] self.resultRecord() optS = self.oracle(self.G, self.seed_size, self.TrueP) for iter_ in range(self.iterations): optimal_reward, live_nodes, live_edges = runICmodel_n( G, optS, self.TrueP) self.result_oracle.append(optimal_reward) print('oracle', optimal_reward) for alg_name, alg in list(algorithms.items()): S = alg.decide() reward, live_nodes, live_edges = runICmodel_n(G, S, self.TrueP) alg.updateParameters(S, live_nodes, live_edges, iter_) self.AlgReward[alg_name].append(reward) self.resultRecord(iter_) self.showResult()
def runAlgorithms(self, algorithms): self.tim_ = [] for alg_name, alg in list(algorithms.items()): self.AlgReward[alg_name] = [] self.BatchCumlateReward[alg_name] = [] self.resultRecord() for iter_ in range(self.iterations): TrueP = self.get_TrueP(iter_) optS = self.oracle(self.G, self.seed_size, TrueP) optimal_reward, live_nodes, live_edges = runICmodel_n( G, optS, TrueP) self.result_oracle.append(optimal_reward) print('oracle', optimal_reward) for alg_name, alg in list(algorithms.items()): S = alg.decide(self.topic_list[iter_]) reward, live_nodes, live_edges = runICmodel_n(G, S, TrueP) if alg.feedback == 'node': alg.updateParameters(S, live_nodes, self.topic_list[iter_]) elif alg.feedback == 'edge': alg.updateParameters(S, live_nodes, live_edges, self.topic_list[iter_]) self.AlgReward[alg_name].append(reward) self.resultRecord(iter_) self.showResult()
def generalGreedy(G, k, p): ''' Finds initial seed set S using general greedy heuristic Input: G -- networkx Graph object k -- number of initial nodes needed p -- propagation probability Output: S -- initial set of k nodes to propagate ''' import time start = time.time() R = 1 # number of times to run Random Cascade S = [] # set of selected nodes # add node to S if achieves maximum propagation for current chosen + this node for i in range(k): s = PQ() # priority queue for v in G.nodes(): if v not in S: s.add_task(v, 0) # initialize spread value for j in range(R): # run R times Random Cascade [priority, count, task] = s.entry_finder[v] s.add_task(v, priority - runICmodel_n(G, S + [v], p)[0] / R) # add normalized spread value task, priority = s.pop_item() S.append(task) # print(i, k, time.time() - start) return S
def runAlgorithms(self): for alg_name, alg in list(self.algorithms.items()): self.AlgReward[alg_name] = [] self.BatchCumlateReward[alg_name + '_cum'] = [] self.Loss[alg_name] = [] for iter_ in range(self.iterations): optS = self.oracle(self.G, self.seed_size, self.TrueP) rand_S = random_seeds(self.G, self.seed_size, self.TrueP) logging.info("\n") logging.info("Iter {}/{}".format(iter_ + 1, self.iterations)) optimal_reward, live_nodes, live_edges = runICmodel_n( G, optS, self.TrueP) rand_S_reward, _, _ = runICmodel_n(G, rand_S, self.TrueP) self.oracle_reward.append(optimal_reward) for alg_name, alg in list(self.algorithms.items()): S = alg.decide() if alg_name == 'IMFB_MLE': epsilon = 1 ga = 1 if random.random() < epsilon: rand_S = random_seeds(alg.G, alg.seed_size, alg.currentP) S = rand_S epsilon = epsilon * ga reward, live_nodes, live_edges = runICmodel_node_feedback( G, S, self.TrueP) alg.updateParameters(S, live_nodes, live_edges, iter_) S = alg.decide() # 再重新用oracle选seed reward, live_nodes, live_edges = runICmodel_node_feedback( G, S, self.TrueP) elif alg_name == 'IMFB': reward, live_nodes, live_edges = runICmodel_n( G, S, self.TrueP) alg.updateParameters(S, live_nodes, live_edges, iter_) elif alg_name == 'IMGUCB': reward, live_nodes, live_edges = runICmodel_n( G, S, self.TrueP) alg.updateParameters(S, live_nodes, live_edges, iter_) else: reward, live_nodes, live_edges = runICmodel_n( G, S, self.TrueP) alg.updateParameters(S, live_nodes, live_edges, iter_) self.AlgReward[alg_name].append(reward) self.BatchCumlateReward[alg_name + '_cum'].append( self.BatchCumlateReward[alg_name + '_cum'][-1] + reward if iter_ > 0 else reward) self.Loss[alg_name] = alg.getLoss() logging.info("{}: reward:{}, loss:{}".format( alg_name, reward, self.Loss[alg_name][-1])) logging.info("{}:{}".format('oracle', optimal_reward)) logging.info("{}:{}".format('random seed baseline', rand_S_reward)) logging.info('total time: %.2f' % (time.time() - self.tttmmm)) self.actual_iters += 1 if self.record: self.resultRecord(iter_) if self.show: self.showResult(iter_)