Esempio n. 1
0
def GCN_check(name, adj, weights, layer_config):
    num_layer = len(layer_config)

    model = Network()
    for i in range(num_layer - 2):
        model.add(Aggregate('A{}'.format(i), adj))
        model.add(
            Linear('W{}'.format(i), layer_config[i], layer_config[i + 1],
                   'xavier').set_W(weights[i]))
        model.add(Tanh('Tanh{}'.format(i)))

    model.add(Aggregate('A{}'.format(num_layer - 2), adj))
    model.add(
        Linear('W{}'.format(num_layer - 2), layer_config[-2], layer_config[-1],
               'xavier').set_W(weights[-1]))

    loss = SoftmaxCrossEntropyLoss(name='loss')
    # loss = EuclideanLoss(name='loss')

    print("Model " + name)
    for layer in model.layer_list:
        print(":\t" + repr(layer))
    print(':\t' + repr(loss))

    print('Forward Computation: ', model.str_forward('X'))
    print('Backward Computation:', model.str_backward('Z-Y'))
    print()
    model.str_update()
    print()

    return model, loss
Esempio n. 2
0
    def __call__(self, q_input, a_input, *args, **kwargs):
        # convolve input feature maps with filters
        q_conv_out = conv2d(input=q_input,
                            filters=self.W,
                            filter_shape=self.filter_shape)
        a_conv_out = conv2d(input=a_input,
                            filters=self.W,
                            filter_shape=self.filter_shape)
        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        if self.non_linear == "tanh":
            q_conv_out_tanh = Tanh(q_conv_out +
                                   self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_tanh = Tanh(a_conv_out +
                                   self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_tanh,
                                    ws=self.pool_size,
                                    ignore_border=True)  # max
            a_output = pool.pool_2d(input=a_conv_out_tanh,
                                    ws=self.pool_size,
                                    ignore_border=True)
        elif self.non_linear == "relu":
            q_conv_out_relu = ReLU(q_conv_out +
                                   self.b.dimshuffle('x', 0, 'x', 'x'))
            a_conv_out_relu = ReLU(a_conv_out +
                                   self.b.dimshuffle('x', 0, 'x', 'x'))
            q_output = pool.pool_2d(input=q_conv_out_relu,
                                    ws=self.pool_size,
                                    ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out_relu,
                                    ws=self.pool_size,
                                    ignore_border=True)
        else:
            q_output = pool.pool_2d(input=q_conv_out,
                                    ws=self.pool_size,
                                    ignore_border=True)
            a_output = pool.pool_2d(input=a_conv_out,
                                    ws=self.pool_size,
                                    ignore_border=True)

        return q_output, a_output
Esempio n. 3
0
 def forward(self, x, hidden_state_prev, params):
     assert len(x.shape) == 2
     affine_hidden, affine_input, affine_output, tanh = Affine(), Affine(
     ), Affine(), Tanh()
     hidden_state_raw = affine_hidden(hidden_state_prev, params['h2h'],
                                      params['h2h_b'])
     hidden_state_raw += affine_input(x, params['i2h'], params['i2h_b'])
     hidden_state = tanh(hidden_state_raw)
     logits = affine_output(hidden_state, params['h2o'], params['h2o_b'])
     self.cache = (affine_hidden, affine_input, affine_output, tanh, params)
     return hidden_state, logits
Esempio n. 4
0
    def test_TwoDifferentModelsShouldHaveDifferentGradients(self):
        x = np.random.rand(5)

        real_model = Seq([
            Linear(5, 3, initialize='ones'),
            Tanh(),
            Linear(3, 5, initialize='ones'),
            Tanh()
        ])
        y = real_model.forward(x)
        real_grad = real_model.backward(np.ones(5))

        num_model = Seq([
            Linear(5, 3, initialize='ones'),
            Relu(),
            Linear(3, 5, initialize='ones'),
            Relu()
        ])
        num_grad = numerical_gradient.calc(num_model.forward, x)
        num_grad = np.sum(num_grad, axis=1)
        self.assertFalse(numerical_gradient.are_similar(real_grad, num_grad))
Esempio n. 5
0
    def test_TwoLinearLayersTanh(self):
        x = np.random.rand(5)

        real_model = Seq([
            Linear(5, 3, initialize='ones'),
            Tanh(),
            Linear(3, 5, initialize='ones'),
            Tanh()
        ])
        y = real_model.forward(x)
        real_grad = real_model.backward(np.ones(5))

        num_model = Seq([
            Linear(5, 3, initialize='ones'),
            Tanh(),
            Linear(3, 5, initialize='ones'),
            Tanh()
        ])
        num_grad = numerical_gradient.calc(num_model.forward, x)

        num_grad = np.sum(num_grad, axis=1)
        self.assertTrue(numerical_gradient.are_similar(real_grad, num_grad))
Esempio n. 6
0
    def test_CheapTanh(self):
        x = np.random.rand(10)

        cheap_tanh = CheapTanh()

        def f1():
            cheap_tanh.forward(x)

        tanh_layer = Tanh()

        def f2():
            tanh_layer.forward(x)

        t1 = timeit.timeit(f1, number=10000)
        t2 = timeit.timeit(f2, number=10000)
        self.assertGreater(t2, t1)
Esempio n. 7
0
from train import train, evaluate
from keras.utils import to_categorical
import numpy as np

iris = load_iris()
X = iris.data
y = iris.target

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

y_train_one_hot = to_categorical(y_train).astype(np.int)
y_test_one_hot = to_categorical(y_test).astype(np.int)

net = NetWork([
    Linear(input_dim=4, output_dim=5),
    Tanh(),
    Linear(input_dim=5, output_dim=3),
    Dropout(p=0.3),
    Softmax(input_dim=3)
])

svc = SVC()
lr = LogisticRegression()

# 训练SVM分类器
svc.fit(X_train, y_train)

# 训练逻辑斯蒂回归分类器
lr.fit(X_train, y_train)

# 训练神经网络
Esempio n. 8
0
    FullyConnected(32,
                   config.NUM_CLASSES,
                   xavier_uniform_init,
                   use_weight_norm=True,
                   use_bias=False)
]

tanh_model = [
    FullyConnected(config.INPUT_DIM,
                   256,
                   xavier_uniform_init,
                   use_weight_norm=True,
                   use_bias=False),
    Dropout(0.4),
    BatchNorm(input_dim=256),
    Tanh(),
    FullyConnected(256,
                   64,
                   xavier_uniform_init,
                   use_weight_norm=True,
                   use_bias=False),
    Dropout(0.4),
    BatchNorm(input_dim=64),
    Tanh(),
    FullyConnected(64,
                   32,
                   xavier_uniform_init,
                   use_weight_norm=True,
                   use_bias=False),
    Dropout(0.1),
    BatchNorm(input_dim=32),