コード例 #1
0
ファイル: util.py プロジェクト: slorr80/deepy
def run(initializer, model_path):
    model = NeuralClassifier(input_dim=28 * 28)
    for _ in range(6):
        model.stack(Dense(128, 'relu', init=initializer))
    model.stack(Dense(10, 'linear'), Softmax())

    trainer = MomentumTrainer(model)

    annealer = LearningRateAnnealer(trainer)

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(model_path)
コード例 #2
0
    def _initialize_impl(self, X, y=None):
        assert not self.is_initialized,\
            "This neural network has already been initialized."
        self._create_specs(X, y)

        self._create_mlp()
        if y is None:
            return

        if self.valid_size > 0.0:
            assert self.valid_set is None, "Can't specify valid_size and valid_set together."
            X, X_v, y, y_v = sklearn.cross_validation.train_test_split(
                                X, y,
                                test_size=self.valid_size,
                                random_state=self.random_state)
            self.valid_set = X_v, y_v
        self.train_set = X, y
        
        self.trainer = MomentumTrainer(self.mlp)
        self.controllers = [
            self,
            LearningRateAnnealer(self.trainer, patience=self.n_stable, anneal_times=0)]
コード例 #3
0
default_model = os.path.join(os.path.dirname(__file__), "models",
                             "deep_conv.gz")

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.stack(  # Reshape to 3D tensor
        Reshape((-1, 28, 28)),
        # Add a new dimension for convolution
        DimShuffle((0, 'x', 1, 2)),
        Convolution((4, 1, 5, 5), activation="relu"),
        Dropout(0.15),
        Convolution((8, 4, 5, 5), activation="relu"),
        Dropout(0.1),
        Convolution((16, 8, 3, 3), activation="relu"),
        Flatten(),
        Dropout(0.1),
        # As dimension information was lost, reveal it to the pipe line
        RevealDimension(16),
        Dense(10, 'linear'),
        Softmax())

    trainer = MomentumTrainer(model)

    annealer = LearningRateAnnealer()

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(default_model)
コード例 #4
0
ファイル: tutorial2.py プロジェクト: zuxfoucault/deepy
        decoding_output = self.decoder.output(internal_variable)

        classification_output = self.classifier.output(internal_variable)

        auto_encoder_cost = AutoEncoderCost(decoding_output, x).get()

        classification_cost = CrossEntropyCost(classification_output,
                                               self.target_input).get()

        final_cost = 0.01 * auto_encoder_cost + classification_cost

        error_rate = ErrorRateCost(classification_output,
                                   self.target_input).get()

        self.register_monitors(("err", error_rate),
                               ("encoder_cost", auto_encoder_cost),
                               ("classify_cost", classification_cost))

        return final_cost


if __name__ == '__main__':
    model = BasicNetwork(input_dim=28 * 28, model=MyJointTrainingModel())

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer = MomentumTrainer(model, {'weight_l2': 0.0001})

    trainer.run(mnist, controllers=[LearningRateAnnealer(trainer)])

    model.save_params(model_path)
コード例 #5
0
ファイル: mlp_maxout.py プロジェクト: slorr80/deepy
L2NORM_LIMIT = 1.9365
EPSILON = 1e-7

def clip_param_norm():
    for param in model.parameters:
        if param.name.startswith("W"):
            l2_norms = np.sqrt(np.sum(param.get_value() ** 2, axis=0, keepdims=True))
            desired_norms = np.clip(l2_norms, 0, L2NORM_LIMIT)
            scale = (desired_norms + EPSILON) / (l2_norms + EPSILON)
            param.set_value(param.get_value() * scale)

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.training_callbacks.append(clip_param_norm)
    model.stack(Dropout(0.2),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Dense(10, 'linear', init=UniformInitializer(.005)),
                Softmax())


    trainer = MomentumTrainer(model, {"learning_rate": shared_scalar(0.01),
                                      "momentum": 0.5})

    annealer = ExponentialLearningRateAnnealer(trainer, debug=True)

    mnist = MiniBatches(MnistDataset(), batch_size=100)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(default_model)
コード例 #6
0
ファイル: mlp.py プロジェクト: slorr80/deepy
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import logging, os
logging.basicConfig(level=logging.INFO)

from deepy.dataset import MnistDataset, MiniBatches
from deepy.networks import NeuralClassifier
from deepy.layers import Dense, Softmax
from deepy.trainers import MomentumTrainer, LearningRateAnnealer

default_model = os.path.join(os.path.dirname(__file__), "models", "mlp1.gz")

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.stack(Dense(256, 'relu'), Dense(256, 'relu'), Dense(10, 'linear'),
                Softmax())

    trainer = MomentumTrainer(model, {"weight_l2": 0.001})

    annealer = LearningRateAnnealer(trainer)

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(default_model)
コード例 #7
0
def clip_param_norm():
    for param in model.parameters:
        if param.name.startswith("W"):
            l2_norms = np.sqrt(
                np.sum(param.get_value()**2, axis=0, keepdims=True))
            desired_norms = np.clip(l2_norms, 0, L2NORM_LIMIT)
            scale = (desired_norms + EPSILON) / (l2_norms + EPSILON)
            param.set_value(param.get_value() * scale)


if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.training_callbacks.append(clip_param_norm)
    model.stack(Dropout(0.2),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Maxout(240, num_pieces=5, init=UniformInitializer(.005)),
                Dense(10, 'linear', init=UniformInitializer(.005)), Softmax())

    trainer = MomentumTrainer(model, {
        "learning_rate": graph.shared(0.01),
        "momentum": 0.5
    })

    annealer = ExponentialLearningRateAnnealer(debug=True)

    mnist = MiniBatches(MnistDataset(), batch_size=100)

    trainer.run(mnist, epoch_controllers=[annealer])

    model.save_params(default_model)
コード例 #8
0
ファイル: tutorial1.py プロジェクト: zuxfoucault/deepy
import logging, os
logging.basicConfig(level=logging.INFO)

# MNIST Multi-layer model with dropout.
from deepy.dataset import MnistDataset, MiniBatches
from deepy.networks import NeuralClassifier
from deepy.layers import Dense, Softmax, Dropout
from deepy.trainers import MomentumTrainer, LearningRateAnnealer

model_path = os.path.join(os.path.dirname(__file__), "models", "tutorial1.gz")

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28*28)
    model.stack(Dense(256, 'relu'),
                Dropout(0.2),
                Dense(256, 'relu'),
                Dropout(0.2),
                Dense(10, 'linear'),
                Softmax())

    mnist = MiniBatches(MnistDataset(), batch_size=20)

    trainer = MomentumTrainer(model, {"learning_rate": LearningRateAnnealer.learning_rate(0.01)})

    annealer = LearningRateAnnealer(trainer)

    trainer.run(mnist, controllers=[annealer])

    model.save_params(model_path)