예제 #1
0
 def test_mnist(self):
     config = agents.tools.AttrDict(default_config())
     model = benchmark_model.build_tf_one_hot_model(config.batch_size)
     from keras.utils.np_utils import to_categorical
     from keras.datasets.mnist import load_data
     (x_train, y_train), (x_test, y_test) = load_data('/tmp/mnist')
     x_train = x_train.reshape((-1, 784)).astype(np.float64) / 255.
     y_train = to_categorical(y_train, 10).astype(np.float64)
     
     assert x_train.shape == (60000, 784)
     assert y_train.shape == (60000, 10)
     
     optimizer = optimizers.NMFOptimizer()
     train_op = optimizer.minimize(model.frob_norm)
     losses = []
     
     init = tf.global_variables_initializer()
     with self.test_session() as sess:
         sess.run(init)
         pprint(optimizer._layers)
         for i in range(2):
             x, y = benchmark_model.batch(x_train, y_train, config.batch_size)
             _, new_loss, acc = sess.run([train_op, model.cross_entropy, model.accuracy], feed_dict={
                 model.inputs: x,
                 model.labels: y,
             })
             losses.append(new_loss)
             print('\nloss {}, accuracy {}'.format(new_loss, acc), end='', flush=True)
예제 #2
0
 def test_autoencoder(self):
     print()
     config = agents.tools.AttrDict(default_config())
     model = benchmark_model.build_tf_one_hot_model(batch_size=config.batch_size)
     
     optimizer = optimizers.NMFOptimizer(config)
     train_op = optimizer.minimize(model.frob_norm)
     self.assertEqual(model.inputs.shape, optimizer.decoder.shape)
예제 #3
0
 def test_get_placeholder_ops(self):
     print()
     model = benchmark_model.build_tf_one_hot_model(False)
     import time
     start_time = time.time()
     inputs, labels = utility.get_placeholder_ops(model.frob_norm)
     duration = time.time() - start_time
     print('duration', duration)
     self.assertEqual(model.inputs, inputs)
     self.assertEqual(model.labels, labels)
예제 #4
0
 def test_factorize(self):
     config = agents.tools.AttrDict(default_config())
     model = benchmark_model.build_tf_one_hot_model(config.batch_size)
     
     optimizer = optimizers.NMFOptimizer(config)
     train_op = optimizer.minimize(model.frob_norm)
     
     init = tf.global_variables_initializer()
     with self.test_session() as sess:
         sess.run(init)
         pprint(optimizer._layers)
예제 #5
0
def main(_):
    # Set configuration
    config = AttrDict(default_config())
    # Build one hot mnist model.
    model = benchmark_model.build_tf_one_hot_model(batch_size=config.batch_size,
                                                   use_bias=config.use_bias,
                                                   activation=config.activation)
    # Load one hot mnist data.
    (x_train, y_train), (x_test, y_test) = benchmark_model.load_one_hot_data(dataset=config.dataset)
    
    # Testing whether the dataset have correct shape.
    assert x_train.shape == (60000, 784)
    assert y_train.shape == (60000, 10)
    
    # Minimize model's loss with NMF optimizer.
    # optimizer = NMFOptimizer(config)
    optimizer = NMFOptimizer(config=config)
    train_op = optimizer.minimize(model.frob_norm)
    
    # Minimize model's loss with Adam optimizer.
    bp_optimizer = tf.train.AdamOptimizer(config.learning_rate)
    bp_train_op = bp_optimizer.minimize(model.cross_entropy)
    
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        
        for i in range(1000):
            x, y = benchmark_model.batch(x_train, y_train, batch_size=config.batch_size)
            loss, _ = sess.run([optimizer.autoencoder_loss, optimizer.autoencoder_train_op],
                               feed_dict={model.inputs: x})
            print('\rloss {}'.format(loss), end='', flush=True)
        print()
        
        _train_and_test = functools.partial(train_and_test,
                                            sess=sess, model=model,
                                            x_train=x_train, y_train=y_train,
                                            x_test=x_test, y_test=y_test,
                                            batch_size=config.batch_size)

        print('NMF-optimizer')
        # Train with NMF optimizer.
        _train_and_test(train_op, num_iters=config.num_mf_iters)

        print('Adam-optimizer')
        # Train with Adam optimizer.
        _train_and_test(bp_train_op, num_iters=config.num_bp_iters)
예제 #6
0
def main(_):
    # Set configuration
    config = agents.tools.AttrDict(default_config())
    # Build one hot mnist model.
    model = benchmark_model.build_tf_one_hot_model(
        3072, use_bias=config.use_bias, activation=config.activation)
    # Load one hot mnist data.
    (x_train,
     y_train), (x_test,
                y_test) = benchmark_model.load_one_hot_data(dataset='cifar10')

    # Testing whether the dataset have correct shape.
    assert x_train.shape == (50000, 3072)
    assert y_train.shape == (50000, 10)

    # Minimize model's loss with NMF optimizer.
    # optimizer = NMFOptimizer(config)
    optimizer = NMFOptimizer()
    train_op = optimizer.minimize(model.frob_norm)

    # Minimize model's loss with Adam optimizer.
    bp_optimizer = tf.train.AdamOptimizer(config.learning_rate)
    bp_train_op = bp_optimizer.minimize(model.cross_entropy)

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        _train_and_test = functools.partial(train_and_test,
                                            sess=sess,
                                            model=model,
                                            x_train=x_train,
                                            y_train=y_train,
                                            x_test=x_test,
                                            y_test=y_test,
                                            batch_size=config.batch_size)

        print('NMF-optimizer')
        # Train with NMF optimizer.
        _train_and_test(train_op, num_iters=config.num_mf_iters)

        print('Adam-optimizer')
        # Train with Adam optimizer.
        _train_and_test(bp_train_op, num_iters=config.num_bp_iters)