Example #1
0
    def gcs(self, inputs, stack, iteration):
        """
        Creates a graph convolutional layer with a skip connection.
        :param inputs: list of input Tensors, namely
            - input node features
            - input node features for the skip connection
            - normalized adjacency matrix;
        :param stack: int, current stack (used to retrieve kernels);
        :param iteration: int, current iteration (used to retrieve kernels);
        :return: output node features.
        """
        X = inputs[0]
        X_skip = inputs[1]
        fltr = inputs[2]

        if self.share_weights and iteration >= 1:
            iter = 1
        else:
            iter = iteration
        kernel_1, kernel_2, bias = self.kernels[stack][iter]

        # Convolution
        output = K.dot(X, kernel_1)
        output = ops.filter_dot(fltr, output)

        # Skip connection
        skip = K.dot(X_skip, kernel_2)
        skip = Dropout(self.dropout_rate)(skip)
        output += skip

        if self.use_bias:
            output = K.bias_add(output, bias)
        output = self.gcn_activation(output)
        return output
Example #2
0
    def call(self, inputs):
        x, a = inputs

        mlp_out = self.mlp(x)
        z = mlp_out
        for k in range(self.propagations):
            z = (1 - self.alpha) * ops.filter_dot(a, z) + self.alpha * mlp_out
        output = self.activation(z)

        return output
Example #3
0
    def call(self, inputs):
        x, a = inputs

        output = ops.dot(x, self.kernel)
        output = ops.filter_dot(a, output)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        output = self.activation(output)

        return output
Example #4
0
    def call(self, inputs):
        x, a = inputs

        T_0 = x
        output = ops.dot(T_0, self.kernel[0])

        if self.K > 1:
            T_1 = ops.filter_dot(a, x)
            output += ops.dot(T_1, self.kernel[1])

        for k in range(2, self.K):
            T_2 = 2 * ops.filter_dot(a, T_1) - T_0
            output += ops.dot(T_2, self.kernel[k])
            T_0, T_1 = T_1, T_2

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        output = self.activation(output)

        return output
Example #5
0
    def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        # Convolution
        output = ops.dot(features, self.kernel)
        output = ops.filter_dot(fltr, output)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        return output
Example #6
0
    def call(self, inputs):
        features = inputs[0]
        laplacian = inputs[1]

        # Convolution
        T_0 = features
        output = ops.dot(T_0, self.kernel[0])

        if self.K > 1:
            T_1 = ops.filter_dot(laplacian, features)
            output += ops.dot(T_1, self.kernel[1])

        for k in range(2, self.K):
            T_2 = 2 * ops.filter_dot(laplacian, T_1) - T_0
            output += ops.dot(T_2, self.kernel[k])
            T_0, T_1 = T_1, T_2

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        return output
Example #7
0
    def call(self, inputs):
        x, a = inputs

        output = K.dot(x, self.kernel_1)
        output = ops.filter_dot(a, output)
        skip = K.dot(x, self.kernel_2)
        output += skip

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        return output
Example #8
0
    def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        # Compute MLP hidden features
        mlp_out = self.mlp(features)

        # Propagation
        Z = mlp_out
        for k in range(self.propagations):
            Z = (1 - self.alpha) * ops.filter_dot(fltr, Z) + self.alpha * mlp_out

        if self.activation is not None:
            output = self.activation(Z)
        else:
            output = Z
        return output
Example #9
0
    def call(self, inputs):
        # Note that I is useless, because thee layer cannot be used in graph
        # batch mode.
        if len(inputs) == 3:
            X, A, I = inputs
        else:
            X, A = inputs
            I = None

        N = K.shape(A)[-1]
        # Check if the layer is operating in batch mode (X and A have rank 3)
        mode = ops.autodetect_mode(A, X)
        self.reduce_loss = mode in (ops._modes['M'], ops._modes['B'])

        # Get normalized adjacency
        if K.is_sparse(A):
            I_ = tf.sparse.eye(N, dtype=A.dtype)
            A_ = tf.sparse.add(A, I_)
        else:
            I_ = tf.eye(N, dtype=A.dtype)
            A_ = A + I_
        fltr = ops.normalize_A(A_)

        # Node embeddings
        Z = K.dot(X, self.kernel_emb)
        Z = ops.filter_dot(fltr, Z)
        if self.activation is not None:
            Z = self.activation(Z)

        # Compute cluster assignment matrix
        S = K.dot(X, self.kernel_pool)
        S = ops.filter_dot(fltr, S)
        S = activations.softmax(S, axis=-1)  # softmax applied row-wise

        # Link prediction loss
        S_gram = ops.matmul_A_BT(S, S)
        if K.is_sparse(A):
            LP_loss = tf.sparse.add(A, -S_gram)  # A/tf.norm(A) - S_gram/tf.norm(S_gram)
        else:
            LP_loss = A - S_gram
        LP_loss = tf.norm(LP_loss, axis=(-1, -2))
        if self.reduce_loss:
            LP_loss = K.mean(LP_loss)
        self.add_loss(LP_loss)

        # Entropy loss
        entr = tf.negative(tf.reduce_sum(tf.multiply(S, K.log(S + K.epsilon())), axis=-1))
        entr_loss = K.mean(entr, axis=-1)
        if self.reduce_loss:
            entr_loss = K.mean(entr_loss)
        self.add_loss(entr_loss)

        # Pooling
        X_pooled = ops.matmul_AT_B(S, Z)
        A_pooled = ops.matmul_AT_B_A(S, A)

        if K.ndim(A_pooled) == 3:
            self.mixed_mode = True

        output = [X_pooled, A_pooled]

        if I is not None:
            I_mean = tf.segment_mean(I, I)
            I_pooled = ops.repeat(I_mean, tf.ones_like(I_mean) * self.k)
            output.append(I_pooled)

        if self.return_mask:
            output.append(S)

        return output