Example #1
0
def functionPlot(*args):
    lstm.save('lstm.net')
    str = ''
    x = to_one_hot_vect(char_to_ix['c'], vocab_size)
    for i in range(200):
        y = sm.forward(lstm.forward(x))
        str += ix_to_char[np.random.choice(range(vocab_size), p=y.ravel())]
        x = to_one_hot_vect(np.argmax(y), vocab_size)
    print str
    display.show(*args)
Example #2
0
def gen_data():
    # N clusters:
    # data, targets = datasets.make_classification(
    #     n_samples=n, n_features=2, n_informative=2, n_redundant=0, n_classes=num_classes, class_sep=3.0, n_clusters_per_class=1)

    data, targets = datasets.make_gaussian_quantiles(mean=(0, 0),
                                                     cov=1,
                                                     n_samples=n,
                                                     n_classes=num_classes)

    # Circles:
    # data, targets = datasets.make_circles(
    #     n_samples=n, shuffle=True, noise=0.1, random_state=None, factor=0.1)

    # Moons:
    # data, targets = datasets.make_moons(n_samples=n, shuffle=True, noise=0.05)

    # print data
    # print targets

    targets = [to_one_hot_vect(target, num_classes) for target in targets]

    train = zip(
        np.array(data[:n * 9 / 10]).astype(np.float),
        np.array(targets[:n * 9 / 10]).astype(np.float))
    test = zip(
        np.array(data[n / 10:]).astype(np.float),
        np.array(targets[n / 10:]).astype(np.float))

    return train, test
Example #3
0
    def reinforcement(self, x, r):
        self.states_history.append(x)
        self.command_history.append(self.command)
        if len(self.command_history) >= self.memory_size:
            # print np.argmax(self.command_history,axis=1)
            if r != 0:
                self.target = r * np.multiply(self.e,
                                              np.array(self.command_history))
                # self.target = r*np.array(self.command_history)
                self.trainer.learn_minibatch(
                    self.model,
                    zip(self.states_history, self.target),
                    self.loss,
                    self.optimiser,
                )

        self.command = utils.to_one_hot_vect(
            np.argmax(self.model.forward(x, True)), self.action_size)
        return self.command
Example #4
0
def load_mnist_dataset(dataset="training", path="."):
    if dataset is "training":
        fname_img = os.path.join(path, 'train-images.idx3-ubyte')
        fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
    elif dataset is "testing":
        fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
        fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
    else:
        raise ValueError, "dataset must be 'testing' or 'training'"

    # Load everything in some numpy arrays
    with open(fname_lbl, 'rb') as flbl:
        magic, num = struct.unpack(">II", flbl.read(8))
        lbl = numpy.fromfile(flbl, dtype=numpy.int8)

    with open(fname_img, 'rb') as fimg:
        magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
        img = numpy.fromfile(fimg, dtype=numpy.uint8).reshape(
            len(lbl), rows * cols).astype(numpy.float)

    return zip(img, numpy.array([to_one_hot_vect(v, 10) for v in lbl]))
def lossFun(inputs, targets, hprev):
    """
  inputs,targets are both list of integers.
  hprev is Hx1 array of initial hidden state
  returns the loss, gradients on model parameters, and last hidden state
  """
    xs, hs, ys, ps = {}, {}, {}, {}
    hs[-1] = np.copy(hprev)
    loss = 0

    # forward pass
    for t in xrange(len(inputs)):
        xs[t] = np.zeros((vocab_size, 1))  # encode in 1-of-k representation
        xs[t][inputs[t]] = 1
        hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t - 1]) +
                        bh)  # hidden state
        ys[t] = np.dot(
            Why, hs[t]) + by  # unnormalized log probabilities for next chars
        ps[t] = np.exp(ys[t] - np.max(ys[t])) / np.sum(
            np.exp(ys[t] - np.max(ys[t])))  # probabilities for next chars
        loss += -np.log(ps[t][targets[t], 0])  # softmax (cross-entropy loss)

        assert_array_equal(van.window_step, t)
        assert_array_equal(van.state[t - 1], hs[t - 1].T[0])
        assert_array_equal(
            van.statenet[t].forward([xs[t].T[0], hs[t - 1].T[0]]), hs[t].T[0])
        assert_array_equal(
            van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),
            Wxh)
        assert_array_equal(
            van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),
            Whh)
        assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),
                           bh.T[0])

        assert_array_equal(
            vantr.statenet[t].net.elements[0].elements[0].elements[1].W.get(),
            Wxh)
        assert_array_equal(
            vantr.statenet[t].net.elements[0].elements[1].elements[1].W.get(),
            Whh)
        assert_array_equal(
            vantr.statenet[t].net.elements[0].elements[2].W.get(), bh.T[0])
        assert_array_equal(
            vantr.outputnet[t].net.elements[0].elements[1].W.get(), Why)
        assert_array_equal(vantr.outputnet[t].net.elements[1].W.get(), by.T[0])

        #
        # #Neg
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])
        # assert_array_almost_equal(van.outputnet[t].forward(hs[t].T[0]),ps[t].T[0])
        # assert_array_almost_equal(van.outputnet[t].forward(van.statenet[t].forward([xs[t].T[0],hs[t-1].T[0]])),ps[t].T[0])
        # assert_array_almost_equal(van.forward(xs[t].T[0]),ps[t].T[0])
        #
        # Cross
        assert_array_equal(
            van.outputnet[t].net.elements[0].elements[1].W.get(), Why)
        assert_array_equal(van.outputnet[t].net.elements[1].W.get(), by.T[0])
        assert_array_equal(van.outputnet[t].forward(hs[t].T[0]), ys[t].T[0])
        assert_array_equal(
            van.outputnet[t].forward(van.statenet[t].forward(
                [xs[t].T[0], hs[t - 1].T[0]])), ys[t].T[0])
        assert_array_equal(
            van.outputnet[t].forward(van.statenet[t].forward(
                [xs[t].T[0], van.state[t - 1]])), ys[t].T[0])
        assert_array_equal(van.forward(xs[t].T[0]), ys[t].T[0])
        assert_array_equal(soft.forward(ys[t].T[0]), ps[t].T[0])

    # backward pass: compute gradients going backwards
    dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(
        Why)
    dbh, dby = np.zeros_like(bh), np.zeros_like(by)
    dhnext = np.zeros_like(hs[0])

    for t in reversed(xrange(len(inputs))):
        dy = np.copy(ps[t])
        dy[targets[
            t]] -= 1  # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
        dWhy += np.dot(dy, hs[t].T)
        dby += dy
        dh = np.dot(Why.T, dy) + dhnext  # backprop into h
        dhraw = (1 - hs[t] * hs[t]) * dh  # backprop through tanh nonlinearity
        dbh += dhraw
        dWxh += np.dot(dhraw, xs[t].T)
        dWhh += np.dot(dhraw, hs[t - 1].T)

        #
        # #Neg
        # van.backward(negLog.dJdy_gradient(ps[t].T[0],to_one_hot_vect(targets[t],vocab_size)),opt)
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].x,hs[t].T[0])
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].dW,dWhy)
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].dW,dby.T[0])
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[0].elements[1].W,Why)
        # assert_array_almost_equal(van.outputnet[t].net.elements[0].elements[1].W,by.T[0])
        #
        #Cross
        assert_array_equal(van.outputnet[t].net.elements[0].elements[1].x,
                           hs[t].T[0])
        assert_array_equal(van.outputnet[t].net.forward(hs[t].T[0]),
                           ys[t].T[0])
        assert_array_equal(
            soft.forward(van.outputnet[t].net.forward(hs[t].T[0])), ps[t].T[0])
        assert_array_equal(
            soft.forward(van.outputnet[t].net.forward(hs[t].T[0])) -
            to_one_hot_vect(targets[t], vocab_size), dy.T[0])

        err = cross.dJdy_gradient(ys[t].T[0],
                                  to_one_hot_vect(targets[t], vocab_size))

        assert_array_equal(
            soft.forward(van.outputnet[t].net.forward(hs[t].T[0])) -
            to_one_hot_vect(targets[t], vocab_size), dy.T[0])
        assert_array_equal(
            ps[t].T[0] - to_one_hot_vect(targets[t], vocab_size), dy.T[0])
        assert_array_equal(err, dy.T[0])

        van.backward(err, opt)

        assert_array_equal(
            van.outputnet[t].net.elements[0].elements[1].W.get_dW(), dWhy)
        assert_array_equal(
            van.outputnet[t].net.elements[0].elements[1].W.get(), Why)
        assert_array_equal(van.outputnet[t].net.elements[1].W.get_dW(),
                           dby.T[0])
        assert_array_almost_equal(van.outputnet[t].net.elements[1].W.get(),
                                  by.T[0])
        #

        assert_array_equal(
            van.statenet[t].net.elements[0].elements[0].elements[1].W.get_dW(),
            dWxh)
        assert_array_equal(
            van.statenet[t].net.elements[0].elements[1].elements[1].W.get_dW(),
            dWhh)
        assert_array_equal(
            van.statenet[t].net.elements[0].elements[2].W.get_dW(), dbh.T[0])
        assert_array_equal(
            van.statenet[t].net.elements[0].elements[0].elements[1].W.get(),
            Wxh)
        assert_array_equal(
            van.statenet[t].net.elements[0].elements[1].elements[1].W.get(),
            Whh)
        assert_array_equal(van.statenet[t].net.elements[0].elements[2].W.get(),
                           bh.T[0])
        assert_array_equal(van.dJdh[t], dhnext.T[0])

        dhnext = np.dot(Whh.T, dhraw)

    opt.update_model()
    trainer.learn_window(
        vantr,
        zip(to_hot_vect(inputs, vocab_size), to_hot_vect(targets, vocab_size)),
        crosstr, opttr)

    for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
        np.clip(dparam, -5, 5,
                out=dparam)  # clip to mitigate exploding gradients

    return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs) - 1]
    def test_all(self):
        n, p, epoch = 0, 0, -1
        mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(
            Whh), np.zeros_like(Why)
        mbh, mby = np.zeros_like(bh), np.zeros_like(
            by)  # memory variables for Adagrad
        smooth_loss = -np.log(
            1.0 / vocab_size) * seq_length  # loss at iteration 0
        while n <= 400:
            print(n, p, epoch)
            # prepare inputs (we're sweeping from left to right in steps seq_length long)
            if p + seq_length + 1 > len(data) or n == 0:
                van.clear_memory()
                vantr.clear_memory()
                hprev = np.zeros((hidden_size, 1))  # reset RNN memory
                p = 0  # go from start of data
                epoch += 1
            # print (n,p,epoch)
            inputs = [char_to_ix[ch] for ch in data[p:p + seq_length]]
            targets = [char_to_ix[ch] for ch in data[p + 1:p + seq_length + 1]]
            if epoch == epochs:
                trainer2.learn_throughtime(
                    vantr2,
                    zip(to_hot_vect(inputs_all, vocab_size),
                        to_hot_vect(targets_all, vocab_size)),
                    CrossEntropyLoss(),
                    AdaGrad(learning_rate=learning_rate, clip=5), epochs)
                assert_array_equal(
                    vantr2.statenet[0].net.elements[0].elements[0].elements[1].
                    W.get(), Wxh)
                assert_array_equal(
                    vantr2.statenet[0].net.elements[0].elements[1].elements[1].
                    W.get(), Whh)
                assert_array_equal(
                    vantr2.statenet[0].net.elements[0].elements[2].W.get(),
                    bh.T[0])
                assert_array_equal(
                    vantr2.outputnet[0].net.elements[0].elements[1].W.get(),
                    Why)
                assert_array_equal(vantr2.outputnet[0].net.elements[1].W.get(),
                                   by.T[0])

                txtvan = ''
                x = to_one_hot_vect(inputs[0], vocab_size)
                for i in range(200):
                    y = soft.forward(vantr2.forward(x))
                    txtvan += ix_to_char[np.argmax(
                        y)]  #np.random.choice(range(vocab_size), p=y.ravel())]
                    x = to_one_hot_vect(np.argmax(y), vocab_size)
                vantr2.clear_memory()

                sample_ix = sample(hprev, inputs[0], 200)
                txt = ''.join(ix_to_char[ix] for ix in sample_ix)
                print '----\n %s \n %s \n----' % (txt, txtvan)

                epoch = 0

            # sample from the model now and then
            # if n % epochs == 0:
            #   sample_ix = sample(hprev, inputs[0], 200)
            #   txt = ''.join(ix_to_char[ix] for ix in sample_ix)
            #   print '----\n %s \n %s ----' % (txt,txtvan )

            # forward seq_length characters through the net and fetch gradient
            loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(
                inputs, targets, hprev)

            smooth_loss = smooth_loss * 0.999 + loss * 0.001
            if n % epochs == 0:
                print 'iter %d, loss: %f' % (n, smooth_loss)  # print progress
            # print 'iter %d, loss: %f' % (n, smooth_loss) # print progress

            # perform parameter update with Adagrad
            for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
                                          [dWxh, dWhh, dWhy, dbh, dby],
                                          [mWxh, mWhh, mWhy, mbh, mby]):
                mem += dparam * dparam
                param += -learning_rate * dparam / np.sqrt(
                    mem + 1e-8)  # adagrad update

            p += seq_length  # move data pointer
            n += 1  # iteration counter
Example #7
0
 def forward(self, x, update=False):
     self.x = x
     self.y = np.sign(self.W.dot(x) + np.random.normal(0, self.sigma))
     self.e = self.delta * self.e + (1 - self.delta) * self.y * self.x
     return utils.to_one_hot_vect(self.y / 2.0 + 1.0, 2)
Example #8
0
 def forward(self, x, update=False):
     self.states_history.append(x)
     self.command_history.append(self.command)
     self.command = utils.to_one_hot_vect(np.argmax(self.model.forward(x)),
                                          self.action_size)
     return self.command
Example #9
0
 def forward(self, x, update=False):
     self.x = np.argmax(x)
     return utils.to_one_hot_vect(self.policy(self.x), self.action_size)
Example #10
0
def functionPlot(*args):
    lstm.save('lstm.net')
    str = ''
    x = to_one_hot_vect(char_to_ix['c'], vocab_size)
    for i in range(200):
        y = sm.forward(lstm.forward(x))
        str += ix_to_char[np.random.choice(range(vocab_size), p=y.ravel())]
        x = to_one_hot_vect(np.argmax(y), vocab_size)
    print str
    display.show(*args)


trainer = Trainer(show_training=True, show_function=functionPlot)

train = [to_one_hot_vect(char_to_ix[ch], vocab_size) for ch in data[0:-1]]
target = [to_one_hot_vect(char_to_ix[ch], vocab_size) for ch in data[1:]]

J, dJdy = trainer.learn_throughtime(lstm, zip(train, target),
                                    CrossEntropyLoss(), opt, 1, window_size)

# J, dJdy = trainer.learn_window(
#     v,
#     zip(train[:5],target[:5]),
#     NegativeLogLikelihoodLoss(),
#     #CrossEntropyLoss(),
#     AdaGrad(learning_rate=1e-1),
# )
# print J

# J, dJdy = trainer.learn_window(
Example #11
0
# print v.forward(x)
# print v.backward(x)

epochs = 50

# opt = GradientDescent(learning_rate=0.01),
# opt = GradientDescentMomentum(learning_rate=0.01,momentum=0.5),
opt = AdaGrad(learning_rate=0.1)  #,clip=100.0),

display = ShowTraining(
    epochs_num=epochs
)  #, weights_list = {'Wx':v.Wxh,'Whh':v.Whh,'Why':v.Why,'by':v.by,'bh':v.bh})

trainer = Trainer(show_training=True, show_function=display.show)

train = [to_one_hot_vect(char_to_ix[ch], vocab_size) for ch in data[0:-1]]
target = [to_one_hot_vect(char_to_ix[ch], vocab_size) for ch in data[1:]]

# J, dJdy = trainer.learn_window(
#     v,
#     zip(train[:5],target[:5]),
#     NegativeLogLikelihoodLoss(),
#     #CrossEntropyLoss(),
#     AdaGrad(learning_rate=1e-1),
# )
# print J

# J, dJdy = trainer.learn_window(
#     v,
#     zip(train[:5],target[:5]),
#     NegativeLogLikelihoodLoss(),
Example #12
0
 def forward(self, x, update = False):
     self.x = x
     return utils.to_one_hot_vect(np.random.choice(range(x.size), p=x.ravel()),x.size)
Example #13
0
# from computationalgraph import Input, HotVect
# x = Input(['x','a'],'x')
# a = Input(['x','a'],'a')
# c=ComputationalGraphLayer(x*HotVect(a))
# c.forward([[10,10],1])

printer = ShowTraining(epochs_num=epochs, weights_list={'Q': W1})
trainer = Trainer(show_training=True, show_function=printer.show)

data_train = np.random.rand(1000, 2) * 5
train = []
for x in data_train:
    out = Q_hat.forward(x)
    train.append(
        Q_hat.forward(x) * utils.to_one_hot_vect(np.argmax(out), out.size))

data_test = np.random.rand(1000, 2) * 5
test = []
for x in data_test:
    out = Q_hat.forward(x)
    test.append(
        Q_hat.forward(x) * utils.to_one_hot_vect(np.argmax(out), out.size))

J_list, dJdy_list, J_test = trainer.learn(
    model=Q,
    train=zip(data_train, train),
    test=zip(data_test, test),
    # loss = NegativeLogLikelihoodLoss(),
    # loss = CrossEntropyLoss(),
    loss=SquaredLoss(),