コード例 #1
0
def test_graphsage_passing_regularisers():
    with pytest.raises(ValueError):
        GraphSAGE(layer_sizes=[4],
                  n_samples=[2],
                  input_dim=2,
                  kernel_initializer="fred")

    GraphSAGE(layer_sizes=[4],
              n_samples=[2],
              input_dim=2,
              kernel_initializer="ones")

    GraphSAGE(
        layer_sizes=[4],
        n_samples=[2],
        input_dim=2,
        kernel_initializer=initializers.ones(),
    )

    GraphSAGE(
        layer_sizes=[4],
        n_samples=[2],
        input_dim=2,
        kernel_regularizer=regularizers.l2(0.01),
    )

    with pytest.raises(ValueError):
        GraphSAGE(layer_sizes=[4],
                  n_samples=[2],
                  input_dim=2,
                  kernel_regularizer="wilma")
コード例 #2
0
def get_model(input_shape):
    model = Sequential([
        Dense(units=64,
              input_shape=input_shape,
              kernel_initializer=he_uniform(),
              bias_initializer=ones(),
              activation=relu),
        Dense(units=128, activation=relu),
        Dense(units=128, activation=relu),
        Dense(units=128, activation=relu),
        Dense(units=128, activation=relu),
        Dense(units=64, activation=relu),
        Dense(units=64, activation=relu),
        Dense(units=64, activation=relu),
        Dense(units=64, activation=relu),
        Dense(units=3, activation=softmax)
    ])
    return model
コード例 #3
0
def get_regularized_model(input_shape, dropout_rate, weight_decay):
    model = Sequential([
        Dense(units=64,
              input_shape=input_shape,
              kernel_initializer=he_uniform(),
              bias_initializer=ones(),
              activation=relu,
              kernel_regularizer=l2(weight_decay)),
        Dense(units=128, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dense(units=128, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dropout(rate=dropout_rate),
        Dense(units=128, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dense(units=128, activation=relu, kernel_regularizer=l2(weight_decay)),
        BatchNormalization(),
        Dense(units=64, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dense(units=64, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dropout(rate=dropout_rate),
        Dense(units=64, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dense(units=64, activation=relu, kernel_regularizer=l2(weight_decay)),
        Dense(units=3, activation=softmax)
    ])
    return model
コード例 #4
0
    def build(self, input_shapes):
        """
        Builds the layer

        Args:
            input_shapes (list of int): shapes of the layer's inputs (node features and adjacency matrix)

        """
        feat_shape = input_shapes[0]
        input_dim = int(feat_shape[-1])

        # Variables to support integrated gradients
        self.delta = self.add_weight(name="ig_delta",
                                     shape=(),
                                     trainable=False,
                                     initializer=initializers.ones())
        self.non_exist_edge = self.add_weight(
            name="ig_non_exist_edge",
            shape=(),
            trainable=False,
            initializer=initializers.zeros(),
        )

        # Initialize weights for each attention head
        for head in range(self.attn_heads):
            # Layer kernel
            kernel = self.add_weight(
                shape=(input_dim, self.units),
                initializer=self.kernel_initializer,
                regularizer=self.kernel_regularizer,
                constraint=self.kernel_constraint,
                name="kernel_{}".format(head),
            )
            self.kernels.append(kernel)

            # # Layer bias
            if self.use_bias:
                bias = self.add_weight(
                    shape=(self.units, ),
                    initializer=self.bias_initializer,
                    regularizer=self.bias_regularizer,
                    constraint=self.bias_constraint,
                    name="bias_{}".format(head),
                )
                self.biases.append(bias)

            # Attention kernels
            attn_kernel_self = self.add_weight(
                shape=(self.units, 1),
                initializer=self.attn_kernel_initializer,
                regularizer=self.attn_kernel_regularizer,
                constraint=self.attn_kernel_constraint,
                name="attn_kernel_self_{}".format(head),
            )
            attn_kernel_neighs = self.add_weight(
                shape=(self.units, 1),
                initializer=self.attn_kernel_initializer,
                regularizer=self.attn_kernel_regularizer,
                constraint=self.attn_kernel_constraint,
                name="attn_kernel_neigh_{}".format(head),
            )
            self.attn_kernels.append([attn_kernel_self, attn_kernel_neighs])
        self.built = True
コード例 #5
0
        plt.axis('off')

    plt.colorbar()
    plt.suptitle('Weight matrices variation')

    plt.show()


model = Sequential([
    Dense(
        units=4,
        input_shape=(4, ),
        activation=relu,
        trainable=False,  # to freeze the layer
        kernel_initializer=random_uniform(),
        bias_initializer=ones()),
    Dense(units=2,
          activation=relu,
          kernel_initializer=lecun_uniform(),
          bias_initializer=zeros()),
    Dense(units=4, activation=softmax)
])

model.summary()

W0_layers = get_weights(model)
b0_layers = get_biases(model)

X_train = np.random.random((100, 4))
y_train = X_train