Beispiel #1
0
    for i in range(N):
        y[i, labels[i]] = 1

    split = int(0.9 * N)

    train_idx, test_idx = idx[:split], idx[split:]

    Xtrain, Xtest = X[train_idx], X[test_idx]
    ytrain, ytest = y[train_idx], y[test_idx]

    X_in = T.placeholder(T.floatx(), [None, 28, 28, 1])
    Y_in = T.placeholder(T.floatx(), [None, 10])

    conv_net = Conv((2, 2, 10)) >> Conv((2, 2, 20)) >> Flatten() >> Linear(10)
    logits = conv_net(X_in)
    predictions = T.argmax(logits, -1)
    loss = T.mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y_in))

    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

    sess = T.interactive_session()

    def train(n_iter, batch_size=20):
        for i in range(n_iter):
            idx = np.random.permutation(Xtrain.shape[0])[:batch_size]
            result = sess.run([loss, train_op], { X_in : Xtrain[idx], Y_in : ytrain[idx] })
            print("Loss:", result[0])

        preds = sess.run(predictions, { X_in : Xtest }).astype(np.int32)
        print("Error: ", 1 - (preds == labels[test_idx]).sum() / float(N - split))
    train(1000)
Beispiel #2
0
 def _sample(self, num_samples):
     a = self.get_parameters('natural')[Stats.X]
     d = self.shape()[-1]
     gumbel_noise = Gumbel(T.zeros_like(a), T.ones_like(a)).sample(num_samples)
     return T.one_hot(T.argmax(a[None] + gumbel_noise, -1), d)