예제 #1
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.5,
              lr=0.01,
              weight_decay=5e-4,
              use_bias=False,
              p1=1.,
              p2=1.,
              n_power_iterations=1,
              epsilon=0.03,
              xi=1e-6):

        if self.backend == "tensorflow":
            with tf.device(self.device):
                self.model = tfGCN(self.graph.num_node_attrs,
                                   self.graph.num_node_classes,
                                   hiddens=hiddens,
                                   activations=activations,
                                   dropout=dropout,
                                   weight_decay=weight_decay,
                                   lr=lr,
                                   use_bias=use_bias)
                self.index_all = tf.range(self.graph.num_nodes,
                                          dtype=self.intx)
        else:
            raise NotImplementedError

        self.p1 = p1  # Alpha
        self.p2 = p2  # Beta
        self.xi = xi  # Small constant for finite difference
        # Norm length for (virtual) adversarial training
        self.epsilon = epsilon
        self.n_power_iterations = n_power_iterations  # Number of power iterations
예제 #2
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.5,
              weight_decay=5e-4,
              lr=0.01,
              use_bias=False):

        if self.backend == "tensorflow":
            with tf.device(self.device):
                self.model = tfGCN(self.graph.num_node_attrs,
                                   self.graph.num_node_classes,
                                   hiddens=hiddens,
                                   activations=activations,
                                   dropout=dropout,
                                   weight_decay=weight_decay,
                                   lr=lr,
                                   use_bias=use_bias)
        else:
            self.model = pyGCN(self.graph.num_node_attrs,
                               self.graph.num_node_classes,
                               hiddens=hiddens,
                               activations=activations,
                               dropout=dropout,
                               weight_decay=weight_decay,
                               lr=lr,
                               use_bias=use_bias).to(self.device)
예제 #3
0
    def build(self,
              hiddens=[16],
              activations=['relu'],
              dropout=0.5,
              lr=0.01,
              weight_decay=5e-4,
              use_bias=False,
              p1=1.,
              p2=1.,
              n_power_iterations=1,
              epsilon=0.03,
              xi=1e-6):

        with tf.device(self.device):
            self.model = tfGCN(self.graph.num_node_attrs,
                               self.graph.num_node_classes,
                               hiddens=hiddens,
                               activations=activations,
                               dropout=dropout,
                               weight_decay=weight_decay,
                               lr=lr,
                               use_bias=use_bias)
            self.register_cache(
                "index_all", tf.range(self.graph.num_nodes, dtype=self.intx))

        self.register_cache("p1", p1)  # Alpha
        self.register_cache("p2", p2)  # Beta
        self.register_cache("xi", xi)  # Small constant for finite difference
        # Norm length for (virtual) adversarial training
        self.register_cache("epsilon", epsilon)
        self.register_cache("n_power_iterations",
                            n_power_iterations)  # Number of power iterations
예제 #4
0
    def build(self,
              hiddens=[32],
              activations=['relu'],
              dropout=0.5,
              weight_decay=0.,
              lr=0.01,
              use_bias=False):

        with tf.device(self.device):
            self.model = tfGCN(self.graph.num_node_attrs,
                               self.graph.num_node_classes,
                               hiddens=hiddens,
                               activations=activations,
                               dropout=dropout,
                               weight_decay=weight_decay,
                               lr=lr,
                               use_bias=use_bias,
                               experimental_run_tf_function=False)