示例#1
0
 def learning_rate(value=0.01):
     """
     Wrap learning rate.
     """
     return shared_scalar(value, name="learning_rate")
示例#2
0
L2NORM_LIMIT = 1.9365
EPSILON = 1e-7

def clip_param_norm():
    for param in model.parameters:
        if param.name.startswith("W"):
            l2_norms = np.sqrt(np.sum(param.get_value() ** 2, axis=0, keepdims=True))
            desired_norms = np.clip(l2_norms, 0, L2NORM_LIMIT)
            scale = (desired_norms + EPSILON) / (l2_norms + EPSILON)
            param.set_value(param.get_value() * scale)

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.training_callbacks.append(clip_param_norm)
    model.stack(Dropout(0.2),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Dense(10, 'linear', init=UniformInitializer(.005)),
                Softmax())


    trainer = MomentumTrainer(model, {"learning_rate": shared_scalar(0.01),
                                      "momentum": 0.5})

    annealer = ExponentialLearningRateAnnealer(trainer, debug=True)

    mnist = MiniBatches(MnistDataset(), batch_size=100)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(default_model)
示例#3
0
"""
An auto-encoder for compress MNIST images.
"""

import logging, os
logging.basicConfig(level=logging.INFO)

from deepy.dataset import MnistDataset, MiniBatches
from deepy.networks import AutoEncoder
from deepy.layers import Dense
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from deepy.utils import shared_scalar

model_path = os.path.join(os.path.dirname(__file__), "models",
                          "mnist_autoencoder.gz")

if __name__ == '__main__':
    model = AutoEncoder(input_dim=28 * 28, rep_dim=30)
    model.stack_encoders(Dense(50, 'tanh'), Dense(30))
    model.stack_decoders(Dense(50, 'tanh'), Dense(28 * 28))

    trainer = SGDTrainer(model, {
        'learning_rate': shared_scalar(0.05),
        'gradient_clipping': 3
    })

    mnist = MiniBatches(MnistDataset(for_autoencoder=True), batch_size=20)

    trainer.run(mnist, controllers=[LearningRateAnnealer(trainer)])

    model.save_params(model_path)
示例#4
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
An auto-encoder for compress MNIST images.
"""


import logging, os
logging.basicConfig(level=logging.INFO)

from deepy.dataset import MnistDataset, MiniBatches
from deepy.networks import AutoEncoder
from deepy.layers import Dense
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from deepy.utils import shared_scalar

model_path = os.path.join(os.path.dirname(__file__), "models", "mnist_autoencoder.gz")

if __name__ == '__main__':
    model = AutoEncoder(input_dim=28 * 28, rep_dim=30)
    model.stack_encoders(Dense(50, 'tanh'), Dense(30))
    model.stack_decoders(Dense(50, 'tanh'), Dense(28 * 28))

    trainer = SGDTrainer(model, {'learning_rate': shared_scalar(0.05), 'gradient_clipping': 3})

    mnist = MiniBatches(MnistDataset(for_autoencoder=True), batch_size=20)

    trainer.run(mnist, controllers=[LearningRateAnnealer(trainer)])

    model.save_params(model_path)
示例#5
0
 def learning_rate(value=0.01):
     """
     Wrap learning rate.
     """
     return shared_scalar(value, name="learning_rate")
示例#6
0
def clip_param_norm():
    for param in model.parameters:
        if param.name.startswith("W"):
            l2_norms = np.sqrt(
                np.sum(param.get_value()**2, axis=0, keepdims=True))
            desired_norms = np.clip(l2_norms, 0, L2NORM_LIMIT)
            scale = (desired_norms + EPSILON) / (l2_norms + EPSILON)
            param.set_value(param.get_value() * scale)


if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.training_callbacks.append(clip_param_norm)
    model.stack(Dropout(0.2),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Dense(10, 'linear', init=UniformInitializer(.005)), Softmax())

    trainer = MomentumTrainer(model, {
        "learning_rate": shared_scalar(0.01),
        "momentum": 0.5
    })

    annealer = ExponentialLearningRateAnnealer(debug=True)

    mnist = MiniBatches(MnistDataset(), batch_size=100)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(default_model)