def weight_setup(self, weighting):
        """
        """

        if weighting == "overlap":
            self.weights  = overlap_generator(overlap, self.graph)
        elif weighting == "unit":
            self.weights  = overlap_generator(unit, self.graph)
        elif weighting == "min_norm":
            self.weights  = overlap_generator(min_norm, self.graph)
        else:
            self.weights  = overlap_generator(normalized_overlap, self.graph)
Exemple #2
0
 def weight_setup(self, weighting):
     """
     Calculating the edge weights.
     :param weighting: Type of edge weights.
     """
     if weighting == "overlap":
         self.weights = overlap_generator(overlap, self.graph)
     elif weighting == "unit":
         self.weights = overlap_generator(unit, self.graph)
     elif weighting == "min_norm":
         self.weights = overlap_generator(min_norm, self.graph)
     else:
         self.weights = overlap_generator(normalized_overlap, self.graph)
Exemple #3
0
    def build(self):
        """
        Method to create the computational graph.
        """
        self.computation_graph = tf.Graph()
        with self.computation_graph.as_default():

            self.walker_layer = DeepWalker(self.args, self.vocab_size,
                                           self.degrees)
            self.cluster_layer = Clustering(self.args)
            self.regularizer_layer = Regularization(self.args)

            self.gamma = tf.placeholder("float")
            self.loss = self.walker_layer()
            self.loss = self.loss + self.gamma * self.cluster_layer(
                self.walker_layer)
            self.loss = self.loss + self.regularizer_layer(self.walker_layer)

            self.batch = tf.Variable(0)
            self.step = tf.placeholder("float")

            self.learning_rate_new = tf.train.polynomial_decay(
                self.args.initial_learning_rate, self.batch,
                self.true_step_size, self.args.minimal_learning_rate,
                self.args.annealing_factor)

            self.train_op = tf.train.AdamOptimizer(
                self.learning_rate_new).minimize(self.loss,
                                                 global_step=self.batch)

            self.init = tf.global_variables_initializer()

        self.weights = overlap_generator(self.args, self.graph)
Exemple #4
0
 def optimize(self):
     """
     Method to run the optimization and halt it when overfitting started.
     The output matrices are all saved when optimization has finished.
     """
     self.best_modularity = 0
     self.stop_index = 0
     with tf.Session(graph=self.computation_graph) as session:
         self.init.run()
         self.logs = log_setup(self.args)
         print("Optimization started.\n")
         self.build_graph()
         feed_dict = {
             self.S_0: overlap_generator(self.G),
             self.B1: np.array(nx.adjacency_matrix(self.G).todense()),
             self.B2: modularity_generator(self.G)
         }
         for i in tqdm(range(self.args.iteration_number)):
             start = time.time()
             H = session.run(self.H, feed_dict=feed_dict)
             current_modularity = self.update_state(H)
             end = time.time()
             log_updater(self.logs, i, end - start, current_modularity)
             if self.stop_index > self.args.early_stopping:
                 break
         self.initiate_dump(session, feed_dict)
Exemple #5
0
 def __init__(self, args, graph):
     """
     Every model needs the same initialization -- args, graph.
     We delete the sampler object to save memory.
     We also build the computation graph up. 
     """
     self.args = args
     self.graph = graph
     self.targets = overlap_generator(self.args.target_weighting,
                                      self.graph)
     self.weights = overlap_generator(self.args.regularization_weighting,
                                      self.graph)
     self.nodes = self.graph.nodes()
     self.vocab_size = len(self.nodes)
     self.true_step_size = ((len(self.weights.keys()) / 2) *
                            args.batch_size * self.args.epochs)
     self.edges = nx.edges(self.graph)
     self.build()