示例#1
0
    def test_calc_delta(self):
        l1 = SoftMaxLayer()
        n = Sequential([l1])
        x = np.array([15.0, 10.0, 2.0])
        y = n.forward(x)
        self.assertEqual(y.shape, (3, ))
        nll = NegativeLogLikelihoodLoss()
        t = np.array([0.0, 0.0, 1.0])
        self.assertEqual(y.shape, t.shape)
        J1 = nll.loss(y, t)
        self.assertEqual(J1.shape, (3, ))
        assert_almost_equal(J1, [0.0, 0.0, 13.0067176], decimal=5)

        cel = CrossEntropyLoss()
        t = np.array([0.0, 0.0, 1.0])
        J2 = cel.loss(x, t)
        self.assertEqual(J2.shape, (3, ))
        assert_almost_equal(J2, [0.0, 0.0, 13.0067176], decimal=5)

        delta_in = -nll.dJdy_gradient(y, t)
        assert_almost_equal(delta_in, [0.0, 0.0, 445395.349996])
        delta_out1 = n.backward(delta_in)
        assert_almost_equal(delta_out1, [-0.9933049, -0.0066928, 0.9999978],
                            decimal=5)
        #

        delta_out2 = -cel.dJdy_gradient(x, t)
        assert_almost_equal(delta_out2, [-0.9933049, -0.0066928, 0.9999978],
                            decimal=5)
示例#2
0
    def test_variable_dict(self):
        xv = np.array([0.5, 0.1, 0.5])
        yv = np.array([0.2, 0.4, 0.5])
        valin = ['x', 'y']
        x = Input(valin, 'x')
        y = Input(valin, 'y')
        xyv = [xv, yv]
        Wxv = np.array([[2.1, 3.1, 2.2], [2.2, 3.2, 4.2], [2.2, 5.2, 4.2]])
        Wyv = np.array([[2.1, 2.1, 2.2], [1.6, 1.2, 6.2], [2.1, 3.1, 2.2]])
        Wx = MWeight(3, 3, weights=Wxv)
        Wy = MWeight(3, 3, weights=Wyv)

        net = ComputationalGraphLayer(Sigmoid(Wx.dot(x)) + Tanh(Wy.dot(y)))
        netDict = Sequential(VariableDictLayer(valin), net)
        out = net.forward(xyv)
        self.assertEqual(out.shape, (3, ))
        assert_almost_equal(out, sigmoid(Wxv.dot(xv)) + np.tanh(Wyv.dot(yv)))
        dJdy = net.backward(np.array([1.0, 1.0, 1.0]))

        self.assertEqual(len(dJdy), 2)
        for ind, key in enumerate(dJdy):
            self.assertEqual(dJdy[ind].shape, xyv[ind].shape)
            assert_almost_equal(dJdy[ind],
                                np.sum(net.numeric_gradient(xyv)[ind], 0))

        auxdict = {'x': 0, 'y': 1}
        out = netDict.forward({'x': xv, 'y': yv})
        self.assertEqual(out.shape, (3, ))
        assert_almost_equal(out, sigmoid(Wxv.dot(xv)) + np.tanh(Wyv.dot(yv)))
        dJdy = netDict.backward(np.array([1.0, 1.0, 1.0]))
        self.assertEqual(len(dJdy), 2)
        for key in dJdy:
            self.assertEqual(dJdy[key].shape, xyv[auxdict[key]].shape)
            assert_almost_equal(
                dJdy[key], np.sum(net.numeric_gradient(xyv)[auxdict[key]], 0))
示例#3
0
    def build(self, input_shape):
        self.expand_conv = Sequential([
            tf.layers.Conv2D(input_shape[3] * self._expansion_factor,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            tf.layers.BatchNormalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.depthwise_conv = Sequential([
            DepthwiseConv2D(3,
                            strides=self._strides,
                            padding='same',
                            use_bias=False,
                            kernel_initializer=self._kernel_initializer,
                            kernel_regularizer=self._kernel_regularizer),
            tf.layers.BatchNormalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.linear_conv = Sequential([
            tf.layers.Conv2D(self._filters,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            tf.layers.BatchNormalization(),
            tf.layers.Dropout(self._dropout_rate)
        ])

        super().build(input_shape)
示例#4
0
    def test_SigmoidLayer(self):
        l1 = SigmoidLayer()
        n = Sequential([l1])
        y = n.forward(np.array([0]))
        self.assertEqual(y.shape, (1, ))
        assert_array_equal(y, np.array([0.5]))

        d = n.backward(np.array([1]))
        self.assertEqual(d.shape, (1, ))
        assert_array_equal(d, np.array([0.25]))
示例#5
0
 def test_calc_loss(self):
     l1 = SoftMaxLayer()
     n = Sequential([l1])
     x = np.array([15.0, 10.0, 2.0])
     y = n.forward(x)
     self.assertEqual(y.shape, (3, ))
     nll = NegativeLogLikelihoodLoss()
     t = np.array([0.0, 0.0, 1.0])
     self.assertEqual(y.shape, t.shape)
     J = nll.loss(y, t)
     self.assertEqual(J.shape, (3, ))
     assert_almost_equal(J, [0.0, 0.0, 13.0067176], decimal=5)
示例#6
0
    def __init__(self, input_size, output_size):
        self.input_size = input_size
        self.output_size = output_size
        self.n1 = SumGroup(
            MulGroup(
                Sequential(LinearLayer(input_size + output_size, output_size),
                           SigmoidLayer), GenericLayer),
            MulGroup(
                Sequential(LinearLayer(input_size + output_size, output_size),
                           SigmoidLayer),
                Sequential(LinearLayer(input_size + output_size, output_size),
                           TanhLayer)))
        self.n2 = MulGroup(
            Sequential(GenericLayer, TanhLayer),
            Sequential(LinearLayer(input_size + output_size, output_size),
                       SigmoidLayer))

        self.ct = np.zeros(output_size)
        self.ht = np.zeros(output_size)
示例#7
0
    def test_neuron_one_input(self):
        xv = np.array([0.5, 0.1, 0.5])
        x = Input(['x'], 'x')
        Wv = np.array([[0.1, 0.1, 0.2], [0.5, 0.2, 0.2]])
        W = MWeight(3, 2, weights=Wv)
        bv = np.array([0.3, 0.1])
        b = VWeight(2, weights=bv)
        net = ComputationalGraphLayer(Sigmoid(W.dot(x) + b))
        out = net.forward(xv)
        self.assertEqual(out.shape, (2, ))
        check_out = 1.0 / (1.0 + np.exp(-Wv.dot(xv) - bv))
        assert_almost_equal(out, check_out)
        dJdy = net.backward(np.array([1.0, 1.0]))
        self.assertEqual(dJdy.shape, (3, ))
        assert_almost_equal(dJdy, np.sum(net.numeric_gradient(xv), 0))
        assert_almost_equal(dJdy, (check_out * (1 - check_out)).dot(Wv))

        net2 = Sequential(
            LinearLayer(3, 2, weights=np.hstack([Wv, bv.reshape(2, 1)])),
            SigmoidLayer)
        out2 = net2.forward(xv)
        assert_almost_equal(out, out2)
        dJdy2 = net.backward(np.array([1.0, 1.0]))
        assert_almost_equal(dJdy, dJdy2)
示例#8
0
    def test_LinearLayer(self):
        l1 = LinearLayer(5, 6, 'ones')
        n = Sequential([l1])
        y = n.forward(np.array([2.0, 1.0, 2.0, 3.0, 4.0]))
        self.assertEqual(y.shape, (6, ))
        assert_array_equal(y, np.array([
            13.0,
            13.0,
            13.0,
            13.0,
            13.0,
            13.0,
        ]))

        l2 = LinearLayer(6, 2, 'ones')
        n.add(l2)
        y = n.forward(np.array([2.0, 1.0, 2.0, 3.0, 4.0]))
        self.assertEqual(y.shape, (2, ))
        assert_array_equal(y, np.array([79.0, 79.0]))

        d = n.backward(np.array([2.0, 3.0]))
        self.assertEqual(d.shape, (5, ))
        assert_array_equal(d, np.array([30., 30., 30., 30., 30.]))
示例#9
0
    def test_complex(self):
        xv = np.array([0.5, 0.1])
        hv = np.array([0.2, 0.4, 0.5])
        cv = np.array([1.3, 2.4, 0.2])
        vars2 = ['xh', 'c']
        xsize = 2
        hcsize = 3
        xh = Input(vars2, 'xh')
        c = Input(vars2, 'c')
        Wf = MWeight(xsize + hcsize, hcsize)
        bf = VWeight(hcsize)
        net = Sequential(VariableDictLayer(vars2),
                         ComputationalGraphLayer(Sigmoid(Wf.dot(xh) + bf) * c))
        xhc = {'xh': np.hstack([xv, hv]), 'c': cv}
        out = net.forward(xhc)
        self.assertEqual(out.shape, (3, ))
        assert_almost_equal(
            out,
            sigmoid(Wf.net.W.get().dot(np.hstack([xv, hv])) + bf.net.W.get()) *
            cv)

        vars3 = ['x', 'h', 'c']
        x = Input(vars3, 'x')  #xsize
        h = Input(vars3, 'h')  #hcsize
        c = Input(vars3, 'c')  #hcsize
        Wf = MWeight(xsize + hcsize, hcsize)
        bf = VWeight(hcsize)
        net = Sequential(
            VariableDictLayer(vars3),
            ComputationalGraphLayer(Sigmoid(Wf.dot(Concat([x, h])) + bf) * c))
        print net
        xhc = {'x': xv, 'h': hv, 'c': cv}
        out = net.forward(xhc)
        self.assertEqual(out.shape, (3, ))
        assert_almost_equal(
            out,
            sigmoid(Wf.net.W.get().dot(np.hstack([xv, hv])) + bf.net.W.get()) *
            cv)
示例#10
0
 #     np.array([1.0,1.0,1.0,1.0,1.0])
 # )
 # norm = NormalizationLayer(
 #     np.array([0.0,0.0,0.0,-3.0]),
 #     np.array([5.0,5.0,5.0,3.0]),
 #     np.array([-1.0,-1.0,-1.0,-1.0]),
 #     np.array([1.0,1.0,1.0,1.0])
 # )
 norm = NormalizationLayer(np.array([0.0, 0.0]), np.array([5.0, 5.0]),
                           np.array([0.0, 0.0]), np.array([1.0, 1.0]))
 W1 = utils.SharedWeights('gaussian', 2 + 1, 2)
 W2 = utils.SharedWeights('gaussian', 2 + 1, 3)
 Q = Sequential(
     norm,
     LinearLayer(2, 2, weights=W1),
     TanhLayer,
     LinearLayer(2, 3, weights=W2),
     # TanhLayer
 )
 W3 = utils.SharedWeights('gaussian', 2 + 1, 2)
 W4 = utils.SharedWeights('gaussian', 2 + 1, 3)
 # W3 = utils.SharedWeights(np.array([[10.0,-10.0,0.0],[-10.0,10.0,0.0]]),2+1,2)
 #W2 = utils.SharedWeights('gaussian',2+1,2)
 Q_hat = Sequential(
     norm,
     LinearLayer(2, 2, weights=W3),
     ReluLayer,
     LinearLayer(2, 3, weights=W4),
     # TanhLayer
 )
 #Q, Q_hat, replay_memory_size, minibatch_size = 100, learning_rate = 0.1, gamma = 0.95, policy = 'esp-greedy', epsilon = 0.3
示例#11
0
    img1 = ax.imshow(image1, cmap=plt.get_cmap('Greys'))
    ax1 = fig.add_subplot(1, 2, 2)
    img2 = ax1.imshow(image2, cmap=plt.get_cmap('Greys'))
    plt.show()


train = load_mnist_dataset("training", "mnist")

mean_val = [np.zeros(784) for i in range(10)]
tot_val = np.zeros(10)
for x, t in train:
    mean_val[np.argmax(t)] += x
    tot_val[np.argmax(t)] += 1

normalization_net = Sequential(
    NormalizationLayer(0, 255, -1, 1),
    SignLayer,
)

for i in range(10):
    mean_val[i] = mean_val[i] / tot_val[i]
    num = mean_val[i].reshape(28, 28)
    plt.imshow(normalization_net.forward(num), cmap=plt.get_cmap('Greys'))
    # plt.imshow(num))
    plt.show()

hop_net = Hopfield(784)

stored_numers = [0, 1]  #numbers stored in the network

for i in stored_numers:
    hop_net.store(normalization_net.forward(mean_val[i]))
示例#12
0
 def addSequential(self, net):
     if isinstance(self.get(), Sequential):
         self.net = self.get().add(net)
     else:
         self.net = Sequential(self.get(), net)
示例#13
0
    # p.print_model(2,agent.net,np.array([[0,0],[5.0,5.0]]),[4,5])
    # plt.show()
else:
    # norm = NormalizationLayer(
    #     np.array([0.0,0.0,-10.0,-10.0]),
    #     np.array([5.0,5.0,10.0,10.0]),
    #     np.array([-1.0,-1.0,-1.0,-1.0]),
    #     np.array([1.0,1.0,1.0,1.0])
    # )
    norm = NormalizationLayer(np.array([0.0, 0.0]), np.array([5.0, 5.0]),
                              np.array([-1.0, -1.0]), np.array([1.0, 1.0]))

    n = Sequential(
        norm,
        # LinearLayer(2,5,weights='gaussian'),
        # TanhLayer,
        #AddGaussian(1),
        LinearLayer(2, 4, weights='gaussian'),
        RandomGaussianLayer(1),
        SoftMaxLayer)
    agent = GenericAgent(n, 4, 40, 5.0)
    agent.set_training_options(
        Trainer(),
        NegativeLogLikelihoodLoss(),
        GradientDescentMomentum(
            learning_rate=0.1,
            momentum=0.7)  #GradientDescent(learning_rate=0.2)
    )

start = np.array([3.5, 3.5])
obstacles = [
    # np.array([2.5,2.5,1.0])
示例#14
0
 def dot(self, operation):
     if isinstance(self.get(), Sequential):
         return Op(self.get().insert(0, operation.get()))
     else:
         return Op(Sequential(operation.get(), self.get()))
示例#15
0
        [
            {
                "size": 32,
                "output_layer": TanhLayer,
                "weights": W
            },
            {
                "size": 784,
                "output_layer": TanhLayer
            }  #, "weights": W.T()}
        ])
    ae.choose_network([0, 1])
    #ae.choose_network()
    model = Sequential([
        NormalizationLayer(0, 255, -0.1, 0.1),
        ae,
        NormalizationLayer(-1, 1, 0, 255),
    ])

plt.figure(12)
plt.figure(13)

train = [(t / 255.0, t / 255.0) for (t, v) in train[:100]]
# train = [(t,t) for (t,v) in train[:100]]

display = ShowTraining(epochs_num=epochs)

trainer = Trainer(show_training=True, show_function=display.show)

J_list, dJdy_list = trainer.learn(
    model=ae,
示例#16
0
from layers import Layers
from network import Sequential
from models import VNNetMulticlassClassifier

# 0- Load dataset
dataset = [[2.7810836, 2.550537003, 0], [1.465489372, 2.362125076, 0],
           [3.396561688, 4.400293529, 0], [1.38807019, 1.850220317, 0],
           [3.06407232, 3.005305973, 0], [7.627531214, 2.759262235, 1],
           [5.332441248, 2.088626775, 1], [6.922596716, 1.77106367, 1],
           [8.675418651, -0.242068655, 1], [7.673756466, 3.508563011, 1]]

X = [x[:-1] for x in dataset]
Y = [x[-1] for x in dataset]

# 1- initialise neural network
neural_network = Sequential()

# 2- create network layer
l = Layers()
layer_1 = l.dense_layer(output_dim=2,
                        input_dim=2,
                        init='random',
                        activation='sigmoid')
layer_2 = l.dense_layer(output_dim=2,
                        input_dim=2,
                        init='random',
                        activation='sigmoid')
print layer_1

# 3- add layer into neural network
neural_network.add_layer(layer_1)
示例#17
0
    def build(self, input_shape):
        self.input_conv = Sequential([
            tf.layers.Conv2D(32,
                             3,
                             strides=2,
                             padding='same',
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            tf.layers.BatchNormalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.bottleneck_1_1 = Bottleneck(
            16,
            expansion_factor=1,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_2_1 = Bottleneck(
            24,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_2_2 = Bottleneck(
            24,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_3_1 = Bottleneck(
            32,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_3_2 = Bottleneck(
            32,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_3_3 = Bottleneck(
            32,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_4_1 = Bottleneck(
            64,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_4_2 = Bottleneck(
            64,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_4_3 = Bottleneck(
            64,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_4_4 = Bottleneck(
            64,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_5_1 = Bottleneck(
            96,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_5_2 = Bottleneck(
            96,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_5_3 = Bottleneck(
            96,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_6_1 = Bottleneck(
            160,
            expansion_factor=6,
            strides=2,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_6_2 = Bottleneck(
            160,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self.bottleneck_6_3 = Bottleneck(
            160,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.bottleneck_7_1 = Bottleneck(
            320,
            expansion_factor=6,
            strides=1,
            activation=self._activation,
            dropout_rate=self._dropout_rate,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)

        self.output_conv = Sequential([
            tf.layers.Conv2D(32,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            tf.layers.BatchNormalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        super().build(input_shape)
示例#18
0
        if np.argmax(model.forward(img)) != np.argmax(target):
            err += 1
    print(1.0 - err / float(len(test))) * 100.0


if load_net:
    print "Load Network"
    model = StoreNetwork.load(name_net)
else:
    print "New Network"
    #Two layer network
    model = Sequential([
        NormalizationLayer(0, 255, -0.1, 0.1),
        LinearLayer(784, 10, weights='norm_random'),
        # TanhLayer,
        # LinearLayer(50, 10, weights='norm_random'),
        # TanhLayer,
        # NormalizationLayer(0,10,0,1),
        # SigmoidLayer()
    ])

# display = ShowTraining(epochs_num = epochs)

trainer = Trainer(show_training=False)  #, show_function = display.show)

J_list, dJdy_list, J_test = trainer.learn(
    model=model,
    train=train,
    test=test,
    # loss = NegativeLogLikelihoodLoss(),
    loss=CrossEntropyLoss(),
示例#19
0
    #     np.array([1.0,1.0,1.0,1.0,1.0])
    # )
    # norm = NormalizationLayer(
    #     np.array([0.0,0.0,0.0,-3.0]),
    #     np.array([5.0,5.0,5.0,3.0]),
    #     np.array([-1.0,-1.0,-1.0,-1.0]),
    #     np.array([1.0,1.0,1.0,1.0])
    # )
    norm = NormalizationLayer(np.array([0.0, 0.0]), np.array([5.0, 5.0]),
                              np.array([-1.0, -1.0]), np.array([1.0, 1.0]))
    W1 = utils.SharedWeights('gaussian', 2 + 1, 4)
    # W2 = utils.SharedWeights('gaussian',3+1,2)
    n = Sequential(
        norm,
        LinearLayer(2, 4, weights=W1),
        # TanhLayer,
        # #AddGaussian(1),
        # LinearLayer(3,2,weights=W2),
        RandomGaussianLayer(1),
        SoftMaxLayer)
    agent = GenericAgent(n, 4, 25, 0.0)
    agent.set_training_options(
        Trainer(show_training=True),
        NegativeLogLikelihoodLoss(),
        GradientDescentMomentum(
            learning_rate=0.1,
            momentum=0.5)  #GradientDescent(learning_rate=0.2)
    )


def data_gen(t=0):
    cart = Cart()
示例#20
0
load = 0
if load:
    lstm = GenericLayer.load('lstm.net')
else:
    l = LSTMNet(vocab_size,
                hidden_size,
                Wi=Wi,
                Wf=Wf,
                Wc=Wc,
                Wo=Wo,
                bi=bi,
                bf=bf,
                bc=bc,
                bo=bo)
    lstm = Sequential(
        l,
        LinearLayer(hidden_size, vocab_size),
    )

sm = SoftMaxLayer()

# lstm.on_message('init_nodes',20)
#
# x = to_one_hot_vect(char_to_ix['b'],vocab_size)
# print len(x)
# for i in range(20):
#     print lstm.forward(x,update = True)
#
# print lstm.backward(x)

epochs = 100
示例#21
0
        np.array(targets[:n * 9 / 10]).astype(np.float))
    test = zip(
        np.array(data[n / 10:]).astype(np.float),
        np.array(targets[n / 10:]).astype(np.float))

    return train, test


train, test = gen_data()

model = Sequential([
    LinearLayer(2, 20, weights='random'),
    TanhLayer(),
    #SigmoidLayer(),
    # HeavisideLayer(),
    # LinearLayer(10, 20, weights='random'),
    # SigmoidLayer(),
    LinearLayer(20, num_classes, weights='random', L1=0.001),
    # ReluLayer(),
    # SigmoidLayer()
    SoftMaxLayer()
])

# model = Sequential([
#     LinearLayer(2, 5, weights='random'),
#     SigmoidLayer(),
#     #LinearLayer(3, 3, weights='random'),
#     #SigmoidLayer(),
#     LinearLayer(5, 4, weights='random'),
#     # Parallel([
#     #     LinearLayer(5, 1, weights='random'),
#     #     LinearLayer(5, 1, weights='random'),
示例#22
0
from losses import HuberLoss, SquaredLoss
from optimizers import GradientDescent, GradientDescentMomentum, AdaGrad
from network import Sequential
from layers import TanhLayer, LinearLayer, ReluLayer, NormalizationLayer, ComputationalGraphLayer, SelectVariableLayer
from printers import ShowTraining

epochs = 10

norm = NormalizationLayer(np.array([0.0, 0.0]), np.array([5.0, 5.0]),
                          np.array([0.0, 0.0]), np.array([1.0, 1.0]))

W1 = utils.SharedWeights(np.array([[0, 0, 0.0], [0, 0, 0.0]]), 2 + 1, 2)
#W1 = utils.SharedWeights('gaussian',2+1,2)
Q = Sequential(
    norm,
    LinearLayer(2, 2, weights=W1),
    # TanhLayer
)
W2 = utils.SharedWeights(np.array([[10.0, -10.0, 0.0], [-10.0, 10.0, 0.0]]),
                         2 + 1, 2)
#W2 = utils.SharedWeights('gaussian',2+1,2)
Q_hat = Sequential(
    norm,
    LinearLayer(2, 2, weights=W2),
    # TanhLayer
)

# from computationalgraph import Input, HotVect
# x = Input(['x','a'],'x')
# a = Input(['x','a'],'a')
# c=ComputationalGraphLayer(x*HotVect(a))