Esempio n. 1
0
    def build_graph(self):
        graph = Graph(self.mode)
        logits = graph.build(self.features)

        transition_params = tf.get_variable("transitions", [Config.model.fc_unit, Config.model.fc_unit])
        viterbi_sequence, _ = tf.contrib.crf.crf_decode(logits, transition_params, self.features['length'])

        self.predictions = viterbi_sequence
        tf.identity(self.predictions, 'prediction')

        if self.mode != tf.estimator.ModeKeys.PREDICT:
            self._build_loss(logits, transition_params)
            if self.mode == tf.estimator.ModeKeys.TRAIN:
                self._build_train_op()
            else:
                seg_precision = tf.placeholder(tf.float32, None, 'seg_p_ph')
                seg_recall = tf.placeholder(tf.float32, None, 'seg_r_ph')
                seg_f1_measure = tf.placeholder(tf.float32, None, 'seg_f1_ph')
                tf.summary.scalar('seg_precision', seg_precision, ['prf'], 'seg_score')
                tf.summary.scalar('seg_recall', seg_recall, ['prf'], 'seg_score')
                tf.summary.scalar('seg_f1_measure', seg_f1_measure, ['prf'], 'seg_score')

                tag_precision = tf.placeholder(tf.float32, None, 'tag_p_ph')
                tag_recall = tf.placeholder(tf.float32, None, 'tag_r_ph')
                tag_f1_measure = tf.placeholder(tf.float32, None, 'tag_f1_ph')
                tf.summary.scalar('tag_precison', tag_precision, ['prf'], 'tag_score')
                tf.summary.scalar('tag_recall', tag_recall, ['prf'], 'tag_score')
                tf.summary.scalar('tag_f1_measure', tag_f1_measure, ['prf'], 'tag_score')
Esempio n. 2
0
    def build_graph(self):
        graph = Graph(self.mode)
        logits = graph.build(self.inputs)
        self.predictions = tf.argmax(logits, 1)

        if self.mode != tf.estimator.ModeKeys.PREDICT:
            self._build_loss(logits)
            self._build_train_op()
            self._build_metric()
def point4(true_graph: Graph, budget, repetitions, simulations):
    # Copy the original graph
    graph = Graph(copy=true_graph)
    graph.adj_matrix = np.where(true_graph.adj_matrix > 0, 0.5, 0)

    x_list = []
    x2_list = []
    y_list = []
    y2_list = []

    total_error = 0.0

    # Main procedure
    for r in range(repetitions):
        print("Iteration: " + str(r + 1) + "/" + str(repetitions), end="")
        #epsilon = (1 - r / repetitions) ** 2
        seeds = choose_seeds_from_sampling(graph, budget, simulations)
        graph.influence_episode(seeds, true_graph.adj_matrix)

        # in this case where return only of the indices of the non-zero value
        indices = np.where(graph.adj_matrix > 0)

        error = get_total_error(graph, true_graph)
        total_error += error

        x_list.append(r)
        x2_list.append(r)
        y_list.append(total_error)
        y2_list.append(0)
        print("", end="\r")
    print("", end="")

    plt.plot(x_list,
             y_list,
             label='Bandit Approximation',
             color='tab:blue',
             linestyle='-')
    plt.plot(x2_list,
             y2_list,
             label='Ideal 0 Value',
             color='tab:orange',
             linestyle='--')
    plt.title("Unknown Activation Probabilities - Approximation Error")
    plt.ylabel("Approximation Error")
    plt.xlabel("Time")
    plt.legend()

    plt.show()
Esempio n. 4
0
    def build_graph(self):
        graph = Graph()
        if self.mode == tf.estimator.ModeKeys.TRAIN:
            logits = graph(self.inputs, self.mode)
            pred = tf.argmax(logits, -1)

            self._build_loss(logits)
            self._build_train_op()
        else:
            inputs = {
                'tree_word_id':
                tf.placeholder(tf.int64, [None, None], name='tree_word_id'),
                'tree_pos_id':
                tf.placeholder(tf.int64, [None, None], name='tree_pos_id'),
                'token_word_id':
                tf.placeholder(tf.int64, [None, None], name='token_word_id'),
                'token_pos_id':
                tf.placeholder(tf.int64, [None, None], name='token_pos_id'),
                'history_action_id':
                tf.placeholder(tf.int64, [None, None],
                               name='history_action_id'),
                'buff_top_id':
                tf.placeholder(tf.int64, [None], name='buff_top_id'),
                'deque_word_id':
                tf.placeholder(tf.int64, [None, None], name='deque_word_id'),
                'deque_pos_id':
                tf.placeholder(tf.int64, [None, None], name='deque_pos_id'),
                'deque_length':
                tf.placeholder(tf.int64, [None], name='deque_length'),
                'children_order':
                tf.placeholder(tf.int64, [None, None, None],
                               name='children_order'),
                'stack_order':
                tf.placeholder(tf.int64, [None, None], name='stack_order'),
                'stack_length':
                tf.placeholder(tf.int64, [None], name='stack_length'),
                'token_length':
                tf.placeholder(tf.int64, [None], name='token_length'),
                'history_action_length':
                tf.placeholder(tf.int64, [None], name='history_action_length')
            }

            logits = graph(inputs, self.mode)
            prob = tf.nn.softmax(logits)
            self.loss = tf.constant(
                0
            )  # the parsing doesn't follow expected transition when eval, loss makes no sense
            uf = tf.placeholder(tf.float32, None, 'uf_ph')
            lf = tf.placeholder(tf.float32, None, 'lf_ph')
            tf.summary.scalar('U-F-score', uf, ['f_score'], 'score')
            tf.summary.scalar('L-F-score', lf, ['f_score'], 'score')

            self.evaluation_hooks = [EvalHook()]
            self.prediction_hooks = [PredHook()]
            self.predictions = {
                'pred_head':
                tf.placeholder(tf.int64, [None, None, None], name='pred_head'),
                'pred_dep':
                tf.placeholder(tf.int64, [None, None, None], name='pred_dep')
            }
Esempio n. 5
0
    def __init__(self, graph_file, wavelengths, iterations, lambda_arr, \
                 lambda_dep, method, rndseed):

        self.arrivals = 0
        self.blocks = 0

        self.count = 0
        self.iterations = int(iterations)

        self.interferences = []
        self.distances = []
        self.method = int(method)

        self.lambda_arr = 1 / float(lambda_arr)
        self.lambda_dep = 1 / float(lambda_dep)

        self.queue = []
        self.graph = Graph(file=graph_file, wl=int(wavelengths))

        self.nointerference = False

        self.methods = ('Classic', 'FF', 'FF/LF', 'FF/LF/2', 'MinCost')

        # Initialize the random seed
        seed(rndseed)
Esempio n. 6
0
    def _build_graph(self):
        graph = Graph()
        logits, labels = graph(self.inputs, self.targets, self.mode)

        self.predictions = tf.argmax(logits, -1)
        if self.mode != tf.estimator.ModeKeys.PREDICT:
            self._build_loss(logits, labels)
            self._build_train_op()
Esempio n. 7
0
    def build_graph(self):
        graph = Graph(self.mode)
        logits, locs, softmax_logits = graph.build(self.inputs)

        softmax_logits_dict = OrderedDict({
            f'softmax_feat{n+1}': softmax_logits[n]
            for n in range(len(softmax_logits))
        })
        locs_dict = OrderedDict(
            {f'locs_feat{n+1}': locs[n]
             for n in range(len(locs))})
        softmax_logits_dict.update(locs_dict)
        self.predictions = softmax_logits_dict

        if self.mode != tf.estimator.ModeKeys.PREDICT:
            self._build_loss(logits, locs)
            self._build_optimizer()
Esempio n. 8
0
    def build_graph(self):
        graph = Graph(self.mode)
        outputs = graph.build(self.inputs)
        softmax_w = tf.get_variable('w', [Config.model.vocab_num, Config.model.embedding_size], tf.float32,
                                    slim.xavier_initializer())
        softmax_b = tf.get_variable('b', [Config.model.vocab_num], tf.float32, tf.constant_initializer(0.0))

        if self.mode == tf.estimator.ModeKeys.TRAIN:
            self._build_loss(outputs,softmax_w,softmax_b)
            self._build_train_op()
        else:
            for_logits = tf.tensordot(outputs[0], tf.transpose(softmax_w),[[2],[0]])
            for_logits = tf.nn.bias_add(for_logits, softmax_b)
            for_loss = tf.losses.sparse_softmax_cross_entropy(self.targets['for_labels'],for_logits)
            back_logits = tf.tensordot(outputs[1], tf.transpose(softmax_w),[[2],[0]])
            back_logits = tf.nn.bias_add(back_logits, softmax_b)
            back_loss = tf.losses.sparse_softmax_cross_entropy(self.targets['back_labels'],back_logits)
            self.loss = 0.5 * (for_loss + back_loss)
Esempio n. 9
0
    def build_graph(self):
        graph = Graph()
        if self.mode == tf.estimator.ModeKeys.TRAIN:
            logits = graph(self.inputs, self.mode)
            self._build_loss(logits)
            self._build_train_op()
        else:
            inputs = {
                'tree_word_id':
                tf.placeholder(tf.int64, [None, None], name='tree_word_id'),
                'tree_pos_id':
                tf.placeholder(tf.int64, [None, None], name='tree_pos_id'),
                'buff_word_id':
                tf.placeholder(tf.int64, [None, None], name='buff_word_id'),
                'buff_pos_id':
                tf.placeholder(tf.int64, [None, None], name='buff_pos_id'),
                'history_action_id':
                tf.placeholder(tf.int64, [None, None],
                               name='history_action_id'),
                'comp_head_order':
                tf.placeholder(tf.int64, [None, None], name='comp_head_order'),
                'comp_dep_order':
                tf.placeholder(tf.int64, [None, None], name='comp_dep_order'),
                'comp_rel_id':
                tf.placeholder(tf.int64, [None, None], name='comp_rel_id'),
                'is_leaf':
                tf.placeholder(tf.int64, [None, None], name='is_leaf'),
                'stack_order':
                tf.placeholder(tf.int64, [None, None], name='stack_order'),
                'stack_length':
                tf.placeholder(tf.int64, [None], name='stack_length'),
                'buff_length':
                tf.placeholder(tf.int64, [None], name='buff_length'),
                'history_action_length':
                tf.placeholder(tf.int64, [None], name='history_action_length')
            }

            logits = graph(inputs, self.mode)
            prob = tf.nn.softmax(logits)
            self.loss = tf.constant(
                0
            )  # the parsing doesn't follow expected transition when eval, loss makes no sense
            head_acc = tf.placeholder(tf.float32, None, 'head_ph')
            dep_acc = tf.placeholder(tf.float32, None, 'dep_ph')
            tf.summary.scalar('UAS', head_acc, ['acc'], 'score')
            tf.summary.scalar('LAS', dep_acc, ['acc'], 'score')

            self.evaluation_hooks = [EvalHook()]
            self.prediction_hooks = [PredHook()]
            self.predictions = {
                'pred_head':
                tf.placeholder(tf.int64, [None, None], name='pred_head'),
                'pred_dep':
                tf.placeholder(tf.int64, [None, None], name='pred_dep')
            }
Esempio n. 10
0
    def build_graph(self):
        graph = Graph(self.mode)
        arc_logits, label_logits = graph.build(self.inputs)
        tf.identity(arc_logits, 'arc_logits')
        tf.identity(label_logits, 'label_logits')

        self.predictions = {
            'arc_logits': arc_logits,
            'label_logits': label_logits
        }

        if self.mode != tf.estimator.ModeKeys.PREDICT:
            self._build_loss(arc_logits, label_logits)
            if self.mode == tf.estimator.ModeKeys.TRAIN:
                self._build_train_op()
            else:
                arc_acc = tf.placeholder(tf.float32, None, 'arc_ph')
                label_acc = tf.placeholder(tf.float32, None, 'label_ph')
                tf.summary.scalar('UAS', arc_acc, ['acc'], 'score')
                tf.summary.scalar('LAS', label_acc, ['acc'], 'score')
Esempio n. 11
0
    def build_graph(self):
        graph = Graph()
        logits = graph(self.inputs, self.mode)

        def hard_constraints():
            ninf = -np.inf
            params = np.zeros([Config.model.class_num, Config.model.class_num])
            with open(os.path.join(Config.data.processed_path, Config.data.label_file)) as f:
                labels = f.read().splitlines()
                i = 0
                while i < len(labels):
                    j = 0
                    while j < len(labels):
                        if labels[i][0] == 'B' and labels[j][0] in ['B', 'S', 'O', 'r']:
                            params[i, j] = ninf
                        elif labels[i][0] == 'I' and labels[j][0] in ['B', 'S', 'O', 'r']:
                            params[i, j] = ninf
                        elif labels[i][0] == 'E' and labels[j][0] in ['I', 'E']:
                            params[i, j] = ninf
                        elif labels[i][0] == 'S' and labels[j][0] in ['I', 'E']:
                            params[i, j] = ninf
                        elif labels[i][0] == 'O' and labels[j][0] in ['I', 'E']:
                            params[i, j] = ninf
                        elif labels[i][0] == 'r' and labels[j][0] in ['I', 'E', 'r']:
                            params[i, j] = ninf
                        elif labels[i][0] == 'B' and labels[j][0] in ['I', 'E'] and labels[i][1:] != labels[j][1:]:
                            params[i, j] = ninf
                        elif labels[i][0] == 'I' and labels[j][0] in ['I', 'E'] and labels[i][1:] != labels[j][1:]:
                            params[i, j] = ninf
                        j += 1
                    i += 1
            return params

        transition_params = hard_constraints()
        transition_params = tf.constant(transition_params, tf.float32)

        viterbi_sequence, _ = tf.contrib.crf.crf_decode(logits, transition_params,
                                                        tf.cast(self.inputs['length'], tf.int32))

        self.predictions = viterbi_sequence
        tf.identity(self.predictions, 'prediction')

        if self.mode != tf.estimator.ModeKeys.PREDICT:
            self._build_loss(logits, transition_params)
            if self.mode == tf.estimator.ModeKeys.TRAIN:
                self._build_train_op()
            else:
                self.evaluation_hooks = [PRFScoreHook()]
                precision = tf.placeholder(tf.float32, None, 'p_ph')
                recall = tf.placeholder(tf.float32, None, 'r_ph')
                f1_measure = tf.placeholder(tf.float32, None, 'f1_ph')
                tf.summary.scalar('precision', precision, ['prf'], 'score')
                tf.summary.scalar('recall', recall, ['prf'], 'score')
                tf.summary.scalar('f1_measure', f1_measure, ['prf'], 'score')
    def make_df(self, doc_path, label):
        net = Graph()
        G, matrix = net.create_graph(doc_path, string_to_list=True)  # 이미 tfidf_reweight.csv 로 된 애들을 만들어놔서 그걸로 시작해야함
        wt_mean, wt_var = self.cal_edge_weight(matrix)
        edge_num = self.cal_edge_num(matrix)
        com_mean, com_var, core_count, deg_val, clo_val, bet_val = self.cal_net_feature(G)

        """ tfidf 값 포함 안해둠 """
        feature_df_one = {'wt_mean': wt_mean,
                          'wt_var': wt_var,
                          'edge_num': edge_num,
                          'com_mean': com_mean,
                          'com_var': com_var,
                          'core_count': core_count,
                          'deg_centrality': deg_val,
                          'clo_centrality': clo_val,
                          'bet_centrality': bet_val,
                          'label': label,
                          'index': doc_path.split('/')[-1][:-4]
                          }

        return feature_df_one
Esempio n. 13
0
ydata = np.asarray(data[1])
zdata = np.asarray(data[2])
xdata
ydata

c = ('b', 'b', 'r', 'g', 'b', 'r', 'g', 'b', 'r', 'g', 'k')

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

ax.scatter(xdata, ydata, zdata, c = c , marker = 'o', s = 400)




plt.show()

g = Graph()
g.add_vertex(n0)
g.add_vertex(s1)
g.add_vertex(p1)
g.add_vertex(l1)

g.add_edge(n0, s1)
g.add_edge(s1, p1)
g.add_edge(p1, l1)
g.get_vertices()



Esempio n. 14
0
def main(argv):
    # fake_scores = fake_score_generator(death_dataset)
    # death_dataset = add_feature(death_dataset, fake_scores)
    # names = {}
    # names_dict = create_name_dict(names_list)
    # filename = 'death'
    # names_list = get_file_names(filename)

    count_flag = False
    gender_flag = False
    proximity_flag = False
    tf_idf_flag = False
    graph_flag = False
    run_file_flag = False
    ablation_flag = False
    best_subset_flag = False

    books_inverted_index = Index()
    # do a fresh indexing and save
    # books_inverted_index.add_all_books()
    # save_index(books_inverted_index)
    # or load from previous indexing

    if len(sys.argv) > 1:

        for arg in sys.argv[1:]:
            if arg == 'index_books':
                # do a fresh indexing and save
                books_inverted_index.add_all_books()

                save_index(books_inverted_index)

            elif arg == 'load_books':
                print(
                    "loading books directly as inverted_index object into the program"
                )
                books_inverted_index = load_index()

            elif arg == 'count_features':
                count_flag = True

            elif arg == 'gender_feature':
                gender_flag = True

            elif arg == 'proximity_feature':
                proximity_flag = True

            elif arg == 'tf_idf':
                tf_idf_flag = True

            elif arg == 'ablation':
                ablation_flag = True

            elif arg == 'best_subset':
                best_subset_flag = True

            elif arg == 'graph_feature':
                graph_flag = True
                graph = Graph()

            elif arg == 'all_features':
                count_flag = True
                gender_flag = True
                proximity_flag = True
                tf_idf_flag = True
                graph_flag = True
                graph = Graph()

            elif arg == 'run_file':
                run_file_flag = True

            elif arg == 'quick':
                books_inverted_index = load_index()
                count_flag = True
                gender_flag = True
                proximity_flag = True
                tf_idf_flag = False
                graph_flag = False
                graph = Graph()

            else:
                sys.exit("Wrong usage!")

    else:
        books_inverted_index = load_index()
        count_flag = True
        gender_flag = True
        proximity_flag = True
        tf_idf_flag = True
        graph_flag = True
        graph = Graph()

    classifier = Classifier()
    classifier.read_separate_train_test_files(evaluate=True)
    # classifier.split_data()

    # reading names for training and test sets
    training_names = classifier.get_names(training=True)
    test_names = classifier.get_names(test=True)

    # creating features for the training set
    features_index, training_features = create_features(
        training_names, books_inverted_index, graph, count_flag, gender_flag,
        proximity_flag, tf_idf_flag, graph_flag)
    # creating features for the test set
    features_index, test_features = create_features(
        test_names, books_inverted_index, graph, count_flag, gender_flag,
        proximity_flag, tf_idf_flag, graph_flag)

    classifier.set_features(training_features, test_features)
    classifier.save_features()

    y_pred_log = classifier.logistic_regression()
    # classifier.svc_polynomial()
    # classifier.svc_guassian_kernel()
    y_pred_svc = classifier.svc_sigmoid()
    y_pred_dt = classifier.decision_tree()
    y_pred_knn = classifier.k_nearest_neighbors()
    y_pred_nb = classifier.naive_base()

    # create the run file out of the knn's results
    if run_file_flag == True:
        classifier.make_new_run_file(y_pred_dt, 'dt')
        classifier.make_new_run_file(y_pred_log, 'logit')
        classifier.make_new_run_file(y_pred_svc, 'svc')
        classifier.make_new_run_file(y_pred_knn, 'knn')
        classifier.make_new_run_file(y_pred_nb, 'naive')

    # classifier.feature_selection()

    classifier.plot_f1_scores(classifier.method_name,
                              classifier.f_scores,
                              plot_title='Death Prediction',
                              file_name='f1_scores')

    y_pred_list = [y_pred_log, y_pred_svc, y_pred_dt, y_pred_knn, y_pred_nb]

    classifier.plot_with_error_bars('death', y_pred_list,
                                    classifier.method_name, 'Death Prediction',
                                    'death_fscore_error')

    if gender_flag:
        gender_training_features = training_features[2]
        gender_test_features = test_features[2]
        classifier.evaluate_gender_prediction(gender_training_features,
                                              gender_test_features,
                                              print_flag=True)

    if ablation_flag:
        ablation_test(classifier, features_index, training_features,
                      test_features)

    if best_subset_flag:
        best_subset_selection(classifier, training_features, test_features)
Esempio n. 15
0
import os


def print_graph(g):
    graph = graphviz.Graph(format='png', strict=True, filename='network')
    for n in g.keys():
        graph.node(n, n)

    for n in g.keys():
        for t, w in g[n]:
            graph.edge(n, t, label=str(w))
    graph.render()
    os.remove('network')


g = Graph()
Edges = [('yahoo.com', 'google.com', 3), ('yahoo.com', 'facebook.com', 2), ('yahoo.com', 'twitter.com', 2), ('google.com', 'facebook.com', 5), ('google.com', 'twitter.com', 3), \
        ('google.com', 'instagram.com', 1), ('google.com', 'reddit.com', 5), ('LordVoldemodem', 'google.com', 1), ('reddit.com', 'instagram.com', 1), ('instagram.com', 'twitter.com', 1), \
        ('twitter.com', 'facebook.com', 1), ('DesktopD', 'DesktopC', 3), ('DesktopD', 'Mobile1', 1), ('Mobile1', 'DesktopC', 2), ('DesktopC', 'Modem', 1)]

for edge in Edges:
    a, b, c = edge
    g.add_edge(a, b, c)
print(g)
print(g.dfs('yahoo.com'))
print(g.bfs('twitter.com'))
a, b = g.dijkstra('yahoo.com', 'reddit.com')
print('Shortest distance is ' + str(a))
print('Path: ' + str(b))
print(g.bfs('Mobile1'))
Esempio n. 16
0
class Simulator(object):
    def __init__(self, graph_file, wavelengths, iterations, lambda_arr, \
                 lambda_dep, method, rndseed):

        self.arrivals = 0
        self.blocks = 0

        self.count = 0
        self.iterations = int(iterations)

        self.interferences = []
        self.distances = []
        self.method = int(method)

        self.lambda_arr = 1 / float(lambda_arr)
        self.lambda_dep = 1 / float(lambda_dep)

        self.queue = []
        self.graph = Graph(file=graph_file, wl=int(wavelengths))

        self.nointerference = False

        self.methods = ('Classic', 'FF', 'FF/LF', 'FF/LF/2', 'MinCost')

        # Initialize the random seed
        seed(rndseed)

    def allocate(self, src, dst):
        return self.graph.allocate(src, dst, self.method, self.nointerference)

    def enqueue(self, evt):
        heappush(self.queue, evt)

    def start_loop(self):
        count = self.count
        iterations = self.iterations

        while True:
            evt = heappop(self.queue)

            blocked = evt.process()

            if self.nointerference and blocked:
                self.snapshot.append((self.blocks, self.arrivals))
            #print evt, (blocked and "BLOCK" or "")

            count += 1

            if iterations == count:
                break

        self.count = count

    def simple_run(self, num_samples):
        """
        Simply returns the blocking probability from a simple run
        """
        self.nointerference = True
        self.enqueue(Arrival(self))

        count = self.count
        iterations = self.iterations
        stopafter = iterations / num_samples
        current = 0
        snapshot = []

        while True:
            evt = heappop(self.queue)

            blocked = evt.process()

            if current == stopafter:
                current = 0
                snapshot.append((self.blocks, self.arrivals))

            count += 1
            current += 1

            if iterations == count:
                break

        self.count = count
        return snapshot

    def run(self, wantpdf=False):
        self.enqueue(Arrival(self))

        self.start_loop()

        sys.stdout.write("[%8s] %4d rate, %6d blocks, %6d arrivals [%.04f]\r" % (
            self.methods[self.method],
            1.0 / self.lambda_arr, self.blocks, self.arrivals, float(self.blocks) / float(self.arrivals)
        ))

        if wantpdf:
            sys.stdout.write('\n')

        sys.stdout.flush()

        while not wantpdf:
            avg, median, std, min, max, conf = stats(self.interferences)

            if conf >= 0.05:
                self.iterations *= 2

                sys.stdout.write("Doubling iterations %d\r" % self.iterations)
                sys.stdout.flush()
                self.start_loop()
            else:
                sys.stdout.write("[%8s] %4d rate, %.6f interference, %6d blocks, %6d arrivals [%.04f]\r" % (
                    self.methods[self.method],
                    1.0 / self.lambda_arr, avg, self.blocks, self.arrivals, float(self.blocks) / float(self.arrivals)
                ))
                sys.stdout.write('\n')
                sys.stdout.flush()
                return (avg, median, std, min, max, conf)

        # If we are here we are just interested in the pdf
        # So just returns the results

        return (self.distances, self.interferences)
def point7(graphs, prices, conv_rates, n_phases, k, budget, n_experiments, T,
           simulations):
    window_size = 2 * int((np.sqrt(T)))
    # init revenue and n_customer for each graph, expeeriment and day
    revenue = np.zeros([len(graphs), n_experiments, T])
    n_customers = np.zeros([len(graphs), n_experiments, T])

    phases_lens = np.zeros([len(graphs), n_phases], dtype=int)

    best_graphs_seeds = []
    for g in range(len(graphs)):
        seeds, _ = greedy_algorithm(graphs[g], budget, k)
        best_graphs_seeds.append(seeds)

    for exper in range(n_experiments):
        for g in range(len(graphs)):
            learner = SWTS_Learner(len(prices), prices, window_size, T)
            env = Non_Stationary_Environment(len(prices), conv_rates[g], T)
            # init the graph for point 4
            graph = Graph(copy=graphs[g])
            graph.adj_matrix = np.where(graphs[g].adj_matrix > 0, 0.5, 0)

            print(
                f'Experiment : {exper+1}/{n_experiments} Graph : {g+1}/{len(graphs)}'
            )
            for t in tqdm(range(T)):
                r = 0
                # every day the sellers make social influence
                seeds = choose_seeds_from_sampling(graph, budget, simulations)
                potential_customers = graph.influence_episode(
                    seeds, graphs[g].adj_matrix)
                best_potential_customers = graph.influence_episode(
                    best_graphs_seeds[g], graphs[g].adj_matrix, sampling=False)
                indeces = np.where(graph.adj_matrix > 0)

                curr_phase = int(t / (T / n_phases))
                phases_lens[g][curr_phase] += potential_customers

                # Retrieve for each of them alpha and beta, compute the deviation and update probability
                for i in range(len(indeces[0])):
                    x = indeces[0][i]
                    y = indeces[1][i]
                    alpha = graph.beta_parameters_matrix[x][y].a
                    beta = graph.beta_parameters_matrix[x][y].b
                    mu = alpha / (alpha + beta)
                    graph.adj_matrix[x][y] = mu

                n_customers[g][exper][t] = best_potential_customers

                for _ in range(potential_customers):
                    pulled_arm = learner.pull_arm()
                    reward = env.round(pulled_arm, t)
                    learner.update(pulled_arm, reward, t)
                    r += prices[pulled_arm] * reward

                # revenue of the day
                revenue[g, exper, t] = r

    # average over experiments
    avg_revenue = np.average(revenue, 1)
    avg_customers = np.average(n_customers, 1)

    # compute the true expected revenue
    true_expect_revenue = np.zeros([len(graphs), n_phases, len(prices)])
    for g, conv_rate in enumerate(conv_rates):
        for phase in range(n_phases):
            true_expect_revenue[g][phase] = conv_rate[phase] * prices

    time = range(T)

    for g in range(len(graphs)):
        opt_revenue = []
        actual_revenue = []
        regret = []

        for day in range(T):
            phase_size = T / n_phases
            curr_phase = int(day / phase_size)
            # compute the clairvoyant revenue
            avg_customers_per_graph = np.mean(avg_customers, 1)
            opt = np.max(true_expect_revenue[g]
                         [curr_phase]) * avg_customers_per_graph[g]
            # revenue of the algorithm
            actual = avg_revenue[g][day]
            # compute the instantaneous regret
            regret.append(opt - actual)
            opt_revenue.append(opt)
            actual_revenue.append(actual)
        # print the instantaneous revenue
        plt.figure(1)
        ax1 = plt.subplot(221)
        ax1.set_title(f'Graph {g}: Instantaneous Revenue')
        plt.plot(time, actual_revenue, label='TS_SW')
        plt.plot(time, opt_revenue, '--', label='clairvoyant')
        plt.ylabel('revenue')
        plt.xlabel('Time Horizon')
        plt.legend(loc="lower right")

        # print the cumulative revenue
        ax2 = plt.subplot(222)
        ax2.set_title(f'Graph {g}: Cumulative Revenue')
        plt.plot(time, np.cumsum(actual_revenue), label='TS_SW')
        plt.plot(time, np.cumsum(opt_revenue), '--', label='clairvoyant')
        plt.xlabel('Time Horizon')
        plt.ylabel('revenue')
        plt.legend(loc="lower right")

        # print the cumulative regret
        ax3 = plt.subplot(223)
        ax3.set_title(f'Graph {g}: Cumulative Regret')
        plt.plot(time, np.cumsum(regret), label='TS_SW')
        plt.legend(loc="lower right")
        plt.xlabel('Time Horizon')
        plt.ylabel('regret')
        # plt.savefig(f'results/point5 graph{g+1}')
        plt.show()
        # print the cumulative regret
        ax3 = plt.subplot(223)
        ax3.set_title(f'Graph {g}: Cumulative Regret')
        plt.plot(time, np.cumsum(regret), label='TS_SW')
        plt.legend(loc="lower right")
        plt.xlabel('Time Horizon')
        plt.ylabel('regret')
        # plt.savefig(f'results/point5 graph{g+1}')
        plt.show()


points = [2, 3, 4, 5, 6, 7]

for point in points:
    if point is 2:
        graphs = [Graph(100, 0.2), Graph(150, 0.2), Graph(200, 0.1)]
        budget = 4
        scale_factor = 1.001
        num_experiments = 50

        point2(graphs, budget, scale_factor, num_experiments)

    # -----------------------------------------------------------------------------
    if point is 3:
        graphs = [Graph(100, 0.2), Graph(150, 0.2), Graph(200, 0.1)]
        budget = 4
        scale_factor = 1.001
        num_experiments = 50

        point3(graphs, budget, scale_factor, num_experiments)