Exemplo n.º 1
0
def run(method, model_path):
    model = NeuralClassifier(input_dim=28 * 28)
    model.stack(Dense(128, 'relu'), Dense(128, 'relu'), Dense(10, 'linear'),
                Softmax())

    trainer = ScipyTrainer(model, method)

    annealer = LearningRateAnnealer()

    mnist = MiniBatches(MnistDataset(), batch_size=100)

    trainer.run(mnist, epoch_controllers=[annealer])

    model.save_params(model_path)
Exemplo n.º 2
0
def run(initializer, model_path):
    model = NeuralClassifier(input_dim=28 * 28)
    for _ in range(6):
        model.stack(Dense(128, 'relu', init=initializer))
    model.stack(Dense(10, 'linear'), Softmax())

    trainer = MomentumTrainer(model)

    annealer = LearningRateAnnealer(trainer)

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(model_path)
Exemplo n.º 3
0
    def _initialize_impl(self, X, y=None):
        assert not self.is_initialized,\
            "This neural network has already been initialized."
        self._create_specs(X, y)

        self._create_mlp()
        if y is None:
            return

        if self.valid_size > 0.0:
            assert self.valid_set is None, "Can't specify valid_size and valid_set together."
            X, X_v, y, y_v = sklearn.cross_validation.train_test_split(
                                X, y,
                                test_size=self.valid_size,
                                random_state=self.random_state)
            self.valid_set = X_v, y_v
        self.train_set = X, y
        
        self.trainer = MomentumTrainer(self.mlp)
        self.controllers = [
            self,
            LearningRateAnnealer(self.trainer, patience=self.n_stable, anneal_times=0)]
Exemplo n.º 4
0
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    import pdb
    pdb.set_trace()
    model = NeuralLM(vocab.size)
    model.stack(
        RNN(hidden_size=100,
            output_type="sequence",
            hidden_activation='sigmoid',
            persistent_state=True,
            batch_size=lmdata.size,
            reset_state_for_input=0),
        ClassOutputLayer(output_size=100, class_size=100))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(
        model, {
            "learning_rate": LearningRateAnnealer.learning_rate(1.2),
            "weight_l2": 1e-7
        })
    annealer = LearningRateAnnealer()

    trainer.run(lmdata, epoch_controllers=[annealer])

    model.save_params(default_model)
Exemplo n.º 5
0
Classify MNIST digits using a very deep think network.
Plain deep networks are very hard to be trained, as shown in this case.

But we should notice that if highway layers just learn to pass information forward,
in other words, just be transparent layers, then they would be meaningless.
"""

import logging, os
logging.basicConfig(level=logging.INFO)

from deepy.dataset import MnistDataset, MiniBatches
from deepy.networks import NeuralClassifier
from deepy.layers import Dense, Softmax
from deepy.trainers import MomentumTrainer, LearningRateAnnealer

model_path = os.path.join(os.path.dirname(__file__), "models", "baseline1.gz")

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    for _ in range(20):
        model.stack(Dense(71, 'relu'))
    model.stack(Dense(10, 'linear'), Softmax())

    trainer = MomentumTrainer(model)

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer.run(mnist, controllers=[LearningRateAnnealer()])

    model.save_params(model_path)
Exemplo n.º 6
0
                             "lstm_rnnlm.gz")

if __name__ == '__main__':
    ap = ArgumentParser()
    ap.add_argument("--model", default="")
    ap.add_argument("--small", action="store_true")
    args = ap.parse_args()

    vocab, lmdata = load_data(small=args.small, history_len=5, batch_size=64)
    model = NeuralLM(vocab.size, test_data=None)
    model.stack(
        LSTM(hidden_size=100,
             output_type="sequence",
             persistent_state=True,
             batch_size=lmdata.size,
             reset_state_for_input=0), FullOutputLayer(vocab.size))

    if os.path.exists(args.model):
        model.load_params(args.model)

    trainer = SGDTrainer(
        model, {
            "learning_rate": LearningRateAnnealer.learning_rate(1.2),
            "weight_l2": 1e-7
        })
    annealer = LearningRateAnnealer(trainer)

    trainer.run(lmdata, controllers=[annealer])

    model.save_params(default_model)
Exemplo n.º 7
0
    model_path = args.model

    network = get_network(model_path,
                          std=args.variance,
                          disable_reinforce=args.disable_reinforce,
                          random_glimpse=args.random_glimpse)

    trainer_conf = TrainerConfig()
    trainer_conf.learning_rate = LearningRateAnnealer.learning_rate(
        args.learning_rate)
    trainer_conf.weight_l2 = 0.0001
    trainer_conf.hidden_l2 = 0.0001
    trainer_conf.method = args.method

    trainer = FirstGlimpseTrainer(network,
                                  network.layers[0],
                                  config=trainer_conf)

    annealer = LearningRateAnnealer(trainer, patience=5)

    timer = Timer()
    for _ in trainer.train(mnist.train_set(), mnist.valid_set(),
                           mnist.test_set()):
        if annealer.invoke():
            break
    timer.end()

    network.save_params(model_path)

    timer.report()
Exemplo n.º 8
0
if __name__ == '__main__':

    ap = ArgumentParser()
    ap.add_argument("--model", default=os.path.join(os.path.dirname(__file__), "models", "sequence_adding_100_2.gz"))
    args = ap.parse_args()

    model = NeuralRegressor(input_dim=2, input_tensor=3)
    model.stack(IRNN(hidden_size=100, input_type="sequence",
                     output_type="one"),
                      Dense(1))

    if os.path.exists(args.model):
        model.load_params(args.model)

    conf = TrainerConfig()
    conf.learning_rate = LearningRateAnnealer.learning_rate(0.01)
    conf.gradient_clipping = 3
    conf.patience = 50
    conf.gradient_tolerance = 5
    conf.avoid_nan = False
    trainer = SGDTrainer(model, conf)

    annealer = LearningRateAnnealer(patience=20)

    trainer.run(batch_set, controllers=[annealer])

    model.save_params(args.model)
    print "Identity matrix weight:"
    print model.first_layer().W_h.get_value().diagonal()