Esempio n. 1
0
def mnist():
  X, Y = util.get_mnist()
  X = X.reshape(len(X), 28, 28, 1)
  dim = X.shape[1]
  colors = X.shape[-1]

  # for mnist
  d_sizes = {
    'conv_layers': [(2, 5, 2, False), (64, 5, 2, True)],
    'dense_layers': [(1024, True)],
  }
  g_sizes = {
    'z': 100,
    'projection': 128,
    'bn_after_project': False,
    'conv_layers': [(128, 5, 2, True), (colors, 5, 2, False)],
    'dense_layers': [(1024, True)],
    'output_activation': tf.sigmoid,
  }


  # setup gan
  # note: assume square images, so only need 1 dim
  gan = DCGAN(dim, colors, d_sizes, g_sizes)
  gan.fit(X)
Esempio n. 2
0
def main():
    print('Starting autoencoder')
    X, y = util.get_mnist()

    #X = (X > 0.5).astype(np.float32)

    model = VeAutoencoderSmall(784, [300, 100])
    model.fit(X, epochs=10)

    done = False
    while not done:
        i = np.random.choice(len(X))
        x = X[i]
        im = model.predict_probs([x]).reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap='gray')
        plt.title('Original')

        plt.subplot(1, 2, 2)
        plt.imshow(im, cmap='gray')
        plt.title('Reconstruction')

        ans = input('Generate another?')
        if ans and ans[0] in ('n' or 'N'):
            done = True
def mnist():
  X, Y = util.get_mnist()
  X = X.reshape(len(X), 28, 28, 1)
  dim = X.shape[1]
  colors = X.shape[-1]

  # for mnist
  d_sizes = {
    'conv_layers': [(2, 5, 2, False), (64, 5, 2, True)],
    'dense_layers': [(1024, True)],
  }
  g_sizes = {
    'z': 100,
    'projection': 128,
    'bn_after_project': False,
    'conv_layers': [(128, 5, 2, True), (colors, 5, 2, False)],
    'dense_layers': [(1024, True)],
    'output_activation': tf.sigmoid,
  }


  # setup gan
  # note: assume square images, so only need 1 dim
  gan = DCGAN(dim, colors, d_sizes, g_sizes)
  gan.fit(X)
def test_single_autoencoder():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()
    Xtrain = Xtrain.astype(np.float32)
    Xtest = Xtest.astype(np.float32)
    _, D = Xtrain.shape
    K = len(set(Ytrain))
    enc = AutoEncoder(D, 2000, K)
    init_op = tf.global_variables_initializer()
    with tf.Session() as session:
        session.run(init_op)
        enc.set_session(session)
        enc.fit(Xtrain, show_fig=True)

        done = False
        while not done:
            i = np.random.choice(len(Xtest))
            x = Xtest[i]
            y = enc.predict([x])

            plt.subplot(1, 2, 1)
            plt.imshow(x.reshape(28, 28), cmap='gray')
            plt.title('Original')

            plt.subplot(1, 2, 2)
            plt.imshow(y.reshape(28, 28), cmap='gray')
            plt.title('Reconstructed')

            plt.show()

            ans = input("Generate another?")
            if ans and ans[0] in ('n' or 'N'):
                done = True
Esempio n. 5
0
def test_pretraining_dnn():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()

    dnn = DNN([1000, 750, 500])
    # dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=False, epochs=3, show_fig=True) no pretrain
    # vs
    dnn.fit(Xtrain, Ytrain, Xtest, Ytest, epochs=3, show_fig=True)
Esempio n. 6
0
def main():
    X, Y = util.get_mnist()
    # convert X to binary variable
    X = (X > 0.5).astype(np.float32)

    vae = VariationalAutoencoder(784, [200, 100])
    vae.fit(X)
    '''
Esempio n. 7
0
def main():
    Xtrain, Ytrain, _, _ = get_mnist()

    sample_size = 100
    X = Xtrain[:sample_size]
    Y = Ytrain[:sample_size]

    tsne = TSNE(method='exact')
    Z = tsne.fit_transform(X)
    plt.scatter(Z[:, 0], Z[:, 1], s=100, c=Y, alpha=.5)
def test_pretraining_dnn():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()
    Xtrain = Xtrain.astype(np.float32)
    Xtest = Xtest.astype(np.float32)
    _, D = Xtrain.shape
    K = len(set(Ytrain))
    dnn = DNN(D, [1000, 750, 500], K)
    init_op = tf.global_variables_initializer()
    with tf.Session() as session:
        session.run(init_op)
        dnn.set_session(session)
        dnn.fit(Xtrain, Ytrain, Xtest, Ytest, pretrain=True, epochs=2)
    def fit(self, X, Y):
        # We assume here that the classes are numbered 0... K-1
        self.K = len(set(Y))

        self.gaussians = []
        for K in range(self.K):
            Xk = X[Y == k]
            mean = Xk.mean(axis=0)
            covar = np.cov(Xk.T)
            g = {'m': mean, 'c': covar}
            self.gaussians.append(g)

            #Creates a dictionary of guassians with mean 'm' and covariance 'c'

            #function for drawing a sample from a given class y
            def sample_given_y(self, y):
                g = self.gaussians[y]
                return mvn.rvs(mean=g['m'], covar=g['c'])

            #function for grabbing a sample for any class
            def sample(self):
                y = np.random.randint(self.K)
                return self.sample_given_y(y)

            if __name__ == '__main__':
                #gather MNIST data
                X, Y = util.get_mnist()
                #create an instance of the Bayes Classifier
                clf = BayesClassifier()
                #fit the Classifier to our data, in this case MNIST
                clf.fit(X, Y)

                for k in range(clf, K):
                    #show one sample and the mean image learned

                    sample = clf.sample_given_y(k).reshape(28, 28)
                    mean = clf.gaussians[k]['m'].reshape(28, 28)

                    plt.subplot(1, 2, 1)
                    plt.imshow(sample, cmap='gray')
                    plt.title('Sample')
                    plt.subplot(1, 2, 2)
                    plt.imshow(mean, cmap='gray')
                    plt.title('Mean')
                    plt.show()

                    #generate a random sample
                    sample = clf.sample().reshape(28, 28)
                    plt.imshow(sample, cmap='gray')
                    plt.title('Random sample from a Random Class')
Esempio n. 10
0
def main():
    print('Starting autoencoder')
    X,y = util.get_mnist()

    print(y[0])
    label_reshaped = y.reshape(len(y), 1)
    onehot_encoder = OneHotEncoder(sparse=False)
    onehot_encoded = onehot_encoder.fit_transform(label_reshaped)

    X = (X > 0.5).astype(np.float32)

    data = np.concatenate((X, onehot_encoded), axis=1)
    train_data, test_data = train_test_split(data, test_size=0.1, random_state=40)

    model = VeAutoencoderSmall(x_dim=X.shape[1], y_dim=onehot_encoded.shape[1], hidden_dims=[100, 10])
    model.fit(train_data, epochs=10)

    model.predict(test_data, x_dim=X.shape[1], y_dim=onehot_encoded.shape[1])
Esempio n. 11
0
def train(seed=3):
    batch_size=128
    d_z = 20

    np.random.seed(seed)
    
    Xdata, ydata = get_mnist()
    Xtrain = Xdata[0:60000]
    sortinds = np.random.permutation(60000)
    Xtrain = Xtrain[sortinds]
    bigeps = np.random.randn(len(Xtrain), d_z)
    Xtest = Xdata[60000:70000]

    vae = VariationalAutoEncoder(batch_size=None, d_z=20)

    train_step = tf.train.AdamOptimizer(0.001).minimize(-vae.elbo)
    #train_step = tf.train.AdaGradOptimizer(0.01).minimize(-vae.elbo)
    init = tf.initialize_all_variables()

    sess = tf.Session()
    sess.run(init)

    for i_epoch in xrange(200):
        tstart = time.time()
        bigeps = np.random.randn(len(Xtrain), d_z)
        for start in xrange(0, Xtrain.shape[0], batch_size):
            end = start+batch_size
            Xt = Xtrain[start:end]
            eeps = bigeps[start:end]

            feed_dict = {vae.batch_X: Xt,
                         vae.batch_eps: eeps}

            sess.run(train_step, feed_dict = feed_dict)
            
        elapsed = time.time() - tstart
        feed_dict[vae.batch_X] = Xtrain[:len(Xtest)]
        feed_dict[vae.batch_eps] = bigeps[:len(Xtest)]
        (mll,) = sess.run((vae.elbo,), feed_dict=feed_dict)
        print i_epoch, mll, elapsed
        
        w = sess.run(vae.params, feed_dict=feed_dict)
        with open("weights_%d.pkl" % i_epoch, 'wb') as f:
            pickle.dump(w, f)
Esempio n. 12
0
def main():
  X, Y = util.get_mnist()
  # convert X to binary variable
  X = (X > 0.5).astype(np.float32)

  vae = VariationalAutoencoder(784, [200, 100])
  vae.fit(X)

  # plot reconstruction
  done = False
  while not done:
    i = np.random.choice(len(X))
    x = X[i]
    im = vae.posterior_predictive_sample([x]).reshape(28, 28)
    plt.subplot(1,2,1)
    plt.imshow(x.reshape(28, 28), cmap='gray')
    plt.title("Original")
    plt.subplot(1,2,2)
    plt.imshow(im, cmap='gray')
    plt.title("Sampled")
    plt.show()

    ans = input("Generate another?")
    if ans and ans[0] in ('n' or 'N'):
      done = True

  # plot output from random samples in latent space
  done = False
  while not done:
    im, probs = vae.prior_predictive_sample_with_probs()
    im = im.reshape(28, 28)
    probs = probs.reshape(28, 28)
    plt.subplot(1,2,1)
    plt.imshow(im, cmap='gray')
    plt.title("Prior predictive sample")
    plt.subplot(1,2,2)
    plt.imshow(probs, cmap='gray')
    plt.title("Prior predictive probs")
    plt.show()

    ans = input("Generate another?")
    if ans and ans[0] in ('n' or 'N'):
      done = True
Esempio n. 13
0
def main():
    X, Y = util.get_mnist()
    # convert X to binary variable
    X = (X > 0.5).astype(np.float32)

    vae = VariationalAutoencoder(784, [200, 100])
    vae.fit(X)

    # plot reconstruction
    done = False
    while not done:
        i = np.random.choice(len(X))
        x = X[i]
        im = vae.posterior_predictive_sample([x]).reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap='gray')
        plt.title("Original")
        plt.subplot(1, 2, 2)
        plt.imshow(im, cmap='gray')
        plt.title("Sampled")
        plt.show()

        ans = input("Generate another?")
        if ans and ans[0] in ('n' or 'N'):
            done = True

    # plot output from random samples in latent space
    done = False
    while not done:
        im, probs = vae.prior_predictive_sample_with_probs()
        im = im.reshape(28, 28)
        probs = probs.reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(im, cmap='gray')
        plt.title("Prior predictive sample")
        plt.subplot(1, 2, 2)
        plt.imshow(probs, cmap='gray')
        plt.title("Prior predictive probs")
        plt.show()

        ans = input("Generate another?")
        if ans and ans[0] in ('n' or 'N'):
            done = True
Esempio n. 14
0
def autoencoder():
    d_z = 2
    d_hidden=256
    d_x = 28*28
    N=100

    from util import get_mnist
    Xdata, ydata = get_mnist()
    Xbatch = tf.constant(np.float32(Xdata[0:N]))

    z = Gaussian(mean=0, std=1.0, shape=(N,d_z), name="z")
    X = neural_bernoulli(z, d_hidden=d_hidden, d_out=d_x, name="X")

    X.observe(Xbatch)
    q_z = neural_gaussian(X=Xbatch, d_hidden=d_hidden, d_out=d_z, name="q_z")
    z.attach_q(q_z)

    jm = Model(X)
    
    return jm
Esempio n. 15
0
def main(data_dir, n_episodes, batch_size, log_every):
    trX, teX, trY, teY = get_mnist(os.path.expanduser(f"~/{data_dir}"))
    trX = torch.Tensor(trX)
    teX = torch.Tensor(teX)
    trY = torch.Tensor(trY).to(torch.long)
    teY = torch.Tensor(teY).to(torch.long)
    n_train_samples = trX.shape[0]

    image_classifier = nn.Sequential(View(-1, 1, 28, 28),
                                     nn.Conv2d(1, 16, 3, padding=1), nn.ReLU(),
                                     nn.MaxPool2d(2),
                                     nn.Conv2d(16, 32, 3, padding=1),
                                     nn.ReLU(), nn.MaxPool2d(2),
                                     View(-1, 7 * 7 * 32),
                                     nn.Linear(7 * 7 * 32, 48), nn.ReLU(),
                                     nn.Dropout(p=0.4), nn.Linear(48, 10))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(image_classifier.parameters())

    for ep in range(n_episodes):
        sortidxs = np.random.permutation(n_train_samples)
        trX = trX[sortidxs]
        trY = trY[sortidxs]

        for batch_start in range(0, n_train_samples, batch_size):
            batch_end = batch_start + batch_size
            optimizer.zero_grad()
            outputs = image_classifier(trX[batch_start:batch_end])
            loss = criterion(outputs, trY[batch_start:batch_end])
            loss.backward()
            optimizer.step()

        print(f'Episode {ep + 1}')
        if (ep + 1) % log_every == 0:
            eval_outputs = image_classifier(teX)
            eval_loss = criterion(eval_outputs, teY)
            eval_acc = (eval_outputs.argmax(dim=1) == teY).to(
                torch.float32).mean()
            print(f'Loss: {eval_loss}. Accuracy: {eval_acc}')
Esempio n. 16
0
def main():
    X, Y = util.get_mnist()
    X = (X > 0.5).astype(np.float32)
    vae = VariationalAutoencoder(784, [200, 100])
    vae.fit(X)

    # Отобржаем изображения
    done = False
    while not done:
        i = np.random.choice(len(X))
        x = X[i]
        im = vae.posterior_predictive_sample([x]).reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap="gray")
        plt.title("Original image")
        plt.subplot(1, 2, 2)
        plt.imshow(im.reshape(28, 28), cmap="gray")
        plt.title("Sampled image")
        plt.show()

        ans = input("Generate another? [y/n]")
        if ans and ans[0] in ("n" or "N"):
            done = True

    done = False
    while not done:
        im, probs = vae.prior_predictive_sample_with_probs()
        im = im.reshape(28, 28)
        probs = probs.reshape(28, 28)
        plt.subplot(1,2,1)
        plt.imshow(im, cmap='gray')
        plt.title("Prior predictive sample")
        plt.subplot(1,2,2)
        plt.imshow(probs, cmap='gray')
        plt.title("Prior predictive probs")
        plt.show()

        ans = input("Generate another?")
        if ans and ans[0] in ('n' or 'N'):
          done = True
Esempio n. 17
0
def autoencoder():
    d_z = 2
    d_hidden=256
    d_x = 28*28
    N=100

    from util import get_mnist
    Xdata, ydata = get_mnist()
    Xbatch = Xdata[0:N]
    
    def init_decoder_params(d_z, d_hidden, d_x):
        # TODO come up with a simpler/more elegant syntax for point weights.
        # maybe just let the decoder initialize and manage its own weights / q distributions? 
        w_decode_h = DeltaQDistribution(init_weights(d_z, d_hidden))
        w_decode_h2 = DeltaQDistribution(init_weights(d_hidden, d_x))
        b_decode_1 = DeltaQDistribution(init_zero_vector(d_hidden))
        b_decode_2 = DeltaQDistribution(init_zero_vector(d_x))
        
        w1 = FlatDistribution(value=np.zeros((d_z, d_hidden), dtype=np.float32), fixed=False, name="w1")
        w1.attach_q(w_decode_h)
        w2 = FlatDistribution(value=np.zeros(( d_hidden, d_x), dtype=np.float32), fixed=False, name="w2")
        w2.attach_q(w_decode_h2)
        b1 = FlatDistribution(value=np.zeros((d_hidden,), dtype=np.float32), fixed=False, name="b1")
        b1.attach_q(b_decode_1)
        b2 = FlatDistribution(value=np.zeros((d_x), dtype=np.float32), fixed=False, name="b2")
        b2.attach_q(b_decode_2)
        
        return w1, w2, b1, b2

    w1, w2, b1, b2 = init_decoder_params(d_z, d_hidden, d_x)
    z = GaussianMatrix(mean=0, std=1.0, output_shape=(N,d_z), name="z")
    X = VAEDecoderBernoulli(z, w1, w2, b1, b2, name="X")
    
    X.observe(Xbatch)
    tfX = tf.constant(Xbatch, dtype=tf.float32)

    q_z = VAEEncoder(tfX, d_hidden, d_z)
    z.attach_q(q_z)
    
    return X
Esempio n. 18
0
def mnist():
    X, Y = util.get_mnist()
    X = X.reshape(len(X), 28, 28, 1)
    dim = X.shape[1]
    colors = X.shape[-1]

    d_sizes = {
        'conv_layers': [(2, 5, 2, False), (64, 5, 2, True)],
        'dense_layers': [(1024, True)]
    }

    g_sizes = {
        'z': 100,
        'projection': 128,
        'bn_after_project': False,
        'conv_layers': [(128, 5, 2, True), (colors, 5, 2, False)],
        'dense_layers': [(1024, True)],
        'output_activation': tf.sigmoid
    }

    gan = DCGAN(dim, colors, d_sizes, g_sizes)
    gan.fit(X)
Esempio n. 19
0
def main():
    X, Y = util.get_mnist()

    model = Autoencoder(784, 300)
    model.fit(X)

    # plot reconstruction
    done = False
    while not done:
        i = np.random.choice(len(X))
        x = X[i]
        im = model.predict([x]).reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap='gray')
        plt.title("Original")
        plt.subplot(1, 2, 2)
        plt.imshow(im, cmap='gray')
        plt.title("Reconstruction")
        plt.show()

        ans = input("Generate another?")
        if ans and ans[0] in ('n' or 'N'):
            done = True
Esempio n. 20
0
def main():
    print('Starting autoencoder')
    X, y = util.get_mnist()

    model = Autoencoder(784, 300)
    model.fit(X)

    done = False
    while not done:
        i = np.random.choice(len(X))
        x = X[i]
        im = model.predict([x]).reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap='gray')
        plt.title('Original')

        plt.subplot(1, 2, 2)
        plt.imshow(im, cmap='gray')
        plt.title('Reconstruction')

        ans = input('Generate another?')
        if ans and ans[0] in ('n' or 'N'):
            done = True
def main():
  X, Y = util.get_mnist()

  model = Autoencoder(784, 300)
  model.fit(X)

  # plot reconstruction
  done = False
  while not done:
    i = np.random.choice(len(X))
    x = X[i]
    im = model.predict([x]).reshape(28, 28)
    plt.subplot(1,2,1)
    plt.imshow(x.reshape(28, 28), cmap='gray')
    plt.title("Original")
    plt.subplot(1,2,2)
    plt.imshow(im, cmap='gray')
    plt.title("Reconstruction")
    plt.show()

    ans = input("Generate another?")
    if ans and ans[0] in ('n' or 'N'):
      done = True
Esempio n. 22
0
def test_single_autoencoder():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()

    autoencoder = AutoEncoder(300, 0)
    autoencoder.fit(Xtrain, epochs=2, show_fig=True)

    done = False
    while not done:
        i = np.random.choice(len(Xtest))
        x = Xtest[i]
        y = autoencoder.predict([x])
        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap='gray')
        plt.title('Original')

        plt.subplot(1, 2, 2)
        plt.imshow(y.reshape(28, 28), cmap='gray')
        plt.title('reconstructed')

        plt.show()

        ans = input('Generate another?')
        if ans and ans[0] in ('n' or 'N'):
            done = True
    model = Perceptron()
    t0 = datetime.now()
    model.fit(Xtrain, Ytrain)
    print "Training time:", (datetime.now() - t0)

    t0 = datetime.now()
    print "Train accuracy:", model.score(Xtrain, Ytrain)
    print "Time to compute train accuracy:", (datetime.now() - t0), "Train size:", len(Ytrain)

    t0 = datetime.now()
    print "Test accuracy:", model.score(Xtest, Ytest)
    print "Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest)


    # mnist
    X, Y = get_mnist()
    idx = np.logical_or(Y == 0, Y == 1)
    X = X[idx]
    Y = Y[idx]
    Y[Y == 0] = -1
    model = Perceptron()
    t0 = datetime.now()
    model.fit(X, Y, learning_rate=10e-3)
    print "MNIST train accuracy:", model.score(X, Y)


    # xor data
    print ""
    print "XOR results:"
    X, Y = get_simple_xor()
    Y[Y == 0] = -1
    # 1) the sample
    # 2) which cluster it came from
    # we'll use (2) to obtain the means so we can plot
    # them like we did in the previous script
    # we cheat by looking at "non-public" params in
    # the sklearn source code
    mean = gmm.means_[sample[1]]
    return clamp_sample( sample[0].reshape(28, 28) ), mean.reshape(28, 28)

  def sample(self):
    y = np.random.choice(self.K, p=self.p_y)
    return clamp_sample( self.sample_given_y(y) )


if __name__ == '__main__':
  X, Y = util.get_mnist()
  clf = BayesClassifier()
  clf.fit(X, Y)

  for k in range(clf.K):
    # show one sample for each class
    # also show the mean image learned

    sample, mean = clf.sample_given_y(k)

    plt.subplot(1,2,1)
    plt.imshow(sample, cmap='gray')
    plt.title("Sample")
    plt.subplot(1,2,2)
    plt.imshow(mean, cmap='gray')
    plt.title("Mean")
Esempio n. 25
0
def main():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()
    dae = DeepAutoEncoder([500, 300, 2])
    dae.fit(Xtrain)
    mapping = dae.map2center(Xtrain)
    plt.scatter(mapping[:, 0], mapping[:, 1], c=Ytrain, s=100, alpha=0.5)
    t0 = datetime.now()
    model.fit(Xtrain, Ytrain)
    print "Training time:", (datetime.now() - t0)

    t0 = datetime.now()
    print "Train accuracy:", model.score(Xtrain, Ytrain)
    print "Time to compute train accuracy:", (datetime.now() -
                                              t0), "Train size:", len(Ytrain)

    t0 = datetime.now()
    print "Test accuracy:", model.score(Xtest, Ytest)
    print "Time to compute test accuracy:", (datetime.now() -
                                             t0), "Test size:", len(Ytest)

    # mnist
    X, Y = get_mnist()
    idx = np.logical_or(Y == 0, Y == 1)
    X = X[idx]
    Y = Y[idx]
    Y[Y == 0] = -1
    model = Perceptron()
    t0 = datetime.now()
    model.fit(X, Y, learning_rate=10e-3)
    print "MNIST train accuracy:", model.score(X, Y)

    # xor data
    print ""
    print "XOR results:"
    X, Y = get_simple_xor()
    model.fit(X, Y)
    print "XOR accuracy:", model.score(X, Y)
Esempio n. 27
0
from util import get_mnist
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from gsdr import GSDR

# Get the MNIST data
mnist_data, mnist_target = get_mnist()
print("Data Shape:", mnist_data.shape)
print("Target Shape:", mnist_target.shape)

input_count = mnist_data.shape[1]
hidden_count = 30
forced_latent_count = 10

print("Hidden count:", hidden_count)

gsdr = GSDR(input_count, hidden_count, forced_latent_count)

states = np.eye(hidden_count)

for i in range(mnist_data.shape[0]):
    gsdr.train(mnist_data[i])

    if i % 5000 == 0:
        print("Iteration", i)
        f, ax = plt.subplots(1, hidden_count)

        f.set_size_inches(20, 20)
        
        # Generate one-hot states 0 to hidden_count
Esempio n. 28
0
def main():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()
    dnn = DNN([1000, 750, 500], UnsupervisedModel=RBM)
    dnn.fit(Xtrain, Ytrain, Xtest, Ytest, epochs=3)
Esempio n. 29
0
def main():
    X, y = util.get_mnist()
    autoencoder = VeAutoencoder(784, [10, 2])
    autoencoder.fit(X, 100, 64)
Esempio n. 30
0
            np.random.shuffle(X)
            for j in range(n_batches):
                batch = X[j * batch_size:(j + 1) * batch_size]
                c = self.train_op(batch)
                # c - это значение функции стоимости, сохраняется для того, чтобы построить график
                c /= batch_size
                costs.append(c)
                if j % 100 == 0:
                    print("--> Iter %d, cost = %.3f" % (j, c))
        plt.plot(costs)
        plt.show()


if __name__ == "__main__":
    # Пример из лекции
    X, Y = util.get_mnist()
    model = Autoencoder(28 * 28, 300)
    model.fit(X)
    # Plot reconstruction
    done = False
    # create a new plot
    my_figures = []
    while not done:
        i = np.random.choice(len(X))
        x = X[i]
        im = model.predict([x]).reshape(28, 28)
        x = x.reshape(28, 28)
        plt.subplot(1, 2, 1)
        plt.imshow(x, cmap="gray")
        plt.title("Original")
        plt.subplot(1, 2, 2)
Esempio n. 31
0
from util import get_mnist, ImageExperiment
from gsdr import GSDRStack
import numpy as np

np.random.seed(123)

# Get the data
data, target = get_mnist()
print("Data Shape:", data.shape)
print("Target Shape:", target.shape)
input_size = (28, 28)
input_count = data.shape[1]

# Create the network
hidden_count = 256
print("Hidden count:", hidden_count)

gsdr = GSDRStack()
gsdr.add(input_count=input_count, hidden_count=hidden_count, sparsity=0.20)
gsdr.add(hidden_count=hidden_count, sparsity=0.15)
gsdr.add(hidden_count=hidden_count, sparsity=0.10)
gsdr.add(hidden_count=hidden_count, sparsity=0.05)

exp = ImageExperiment(gsdr, data, input_size, epochs=1, plot_iters=5000, plot_count=30, learn_rate=0.003)
exp.run()
Esempio n. 32
0
trials = args.trials
initialization = util.get_init_for_activation(activation)


print()
print('/====================\\')
print("| Dataset: MNIST (Std)")
print("| Activation: {}".format(activation))
print("| Learning rate: {}".format(learning_rate))
print("| Batch size: {}".format(batch_size))
print("| Epochs: {}".format(epochs))
print("| Initialization: {}".format(initialization))
print('\\====================/')
print()

X_train, X_test, Y_train, Y_test = util.get_mnist()
scores = []
accs = []
for t in range(trials):
    print("--- Trial {} ---".format(t))
    model = util.get_mnist_model(activation, initialization, learning_rate)

    model.fit(X_train, Y_train,
          batch_size=batch_size, nb_epoch=epochs,
          show_accuracy=True, verbose=1,
          validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    scores.append(score[0])
    accs.append(score[1])
    util.write_dict_as_csv('{}-mnist-std-{}.csv'.format(activation, learning_rate), {'val_loss':scores, 'val_acc':accs})
Esempio n. 33
0
def main():
    Xtrain, Ytrain, Xtest, Ytest = get_mnist()
    dnn = ANN([1000, 750, 500])
    dnn.fit(Xtrain, Ytrain)
 def __init__(self):
     self.X , self.Y = util.get_mnist()