Пример #1
0
def base(train_dp, valid_dp, logger, learning_rate):
    # learning_rate = 0.01
    rng = numpy.random.RandomState([2016,02,26])

    max_epochs = 1000
    cost = CECost()

    stats = list()

    test_dp = deepcopy(valid_dp)
    train_dp.reset()
    valid_dp.reset()
    test_dp.reset()

    # NETWORK TOPOLOGY:
    model = MLP(cost=cost)
    model.add_layer(Relu(idim=125, odim=125, irange=1.6, rng=rng))
    model.add_layer(Softmax(idim=125, odim=19, rng=rng))


    # define the optimiser, here stochasitc gradient descent
    # with fixed learning rate and max_epochs
    lr_scheduler = LearningRateFixed(
        learning_rate=learning_rate, max_epochs=max_epochs)
    optimiser = SGDOptimiser(lr_scheduler=lr_scheduler)

    logger.info('Training started...')
    tr_stats_b, valid_stats_b = optimiser.train(model, train_dp, valid_dp)

    logger.info('Testing the model on test set:')

    tst_cost, tst_accuracy = optimiser.validate(model, test_dp)
    logger.info('ACL test set accuracy is %.2f %%, cost (%s) is %.3f' %
                (tst_accuracy*100., cost.get_name(), tst_cost))
Пример #2
0
        def get_cur_encoder_model(model, cur_layer_id):
            cur_layer = model.layers[cur_layer_id]
            assert isinstance(cur_layer, Linear), (
                "Expected current layer to be Linear or its subclass")

            if cur_layer_id == 0:
                get_inputs = lambda x: (masking_noise(x), x)
            else:
                prev_layers = model.layers[:cur_layer_id]
                prev_mds = MLP_fast(MSECost())
                prev_mds.set_layers(prev_layers)

                def get_inputs_noisy(x):
                    pure = prev_mds.fprop(x)
                    return masking_noise(pure), pure

                get_inputs = get_inputs_noisy

            if cur_layer_id == last_layer:
                assert isinstance(cur_layer, Softmax), (
                    "final layer must be softmax for MNIST digits classification"
                )
                #here it automatically matches output of previous layer
                get_targets = lambda inputs, t, pure: t
                cur_model = MLP_fast(CECost())
                cur_model.add_layer(cur_layer)
            else:
                get_targets = lambda inputs, t, pure: pure
                cur_model = MLP_fast(MSECost())
                cur_model.add_layer(cur_layer)
                #echo the output of the current layer
                cur_model.add_layer(Linear(cur_layer.odim, cur_layer.idim))

            return cur_model, get_inputs, get_targets
Пример #3
0
        def get_cur_discr_model(model, cur_layer_id):
            cur_layer = model.layers[cur_layer_id]
            assert isinstance(cur_layer, Linear), (
                "Expected current layer to be Linear or its subclass")
            get_targets = lambda inputs, t, pure: t
            if cur_layer_id == 0:
                get_inputs = lambda x: (x, None)
            else:
                prev_layers = model.layers[:cur_layer_id]
                prev_mds = MLP_fast(MSECost())
                prev_mds.set_layers(prev_layers)
                get_inputs = lambda x: (prev_mds.fprop(x), None)

            last_layer = cur_layer_id == len(model.layers) - 1
            cur_model = MLP_fast(CECost())
            cur_model.add_layer(cur_layer)
            if last_layer:
                assert isinstance(cur_layer, Softmax), (
                    "final layer must be softmax for MNIST digits classification"
                )
                #here it automatically matches output of previous layer
            else:
                #get final layer for the MNIST dataset
                cur_model.add_layer(Softmax(cur_layer.odim, 10))

            return cur_model, get_inputs, get_targets
from mlp.layers import MLP, Linear, Sigmoid, Softmax #import required layer types
from mlp.layers import *
from mlp.optimisers import SGDOptimiser #import the optimiser

from mlp.costs import CECost #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

logger = logging.getLogger()
logger.setLevel(logging.INFO)
rng = numpy.random.RandomState([2015,10,10])

#some hyper-parameters
nhid = 100
learning_rate = 0.07
max_epochs = 30
cost = CECost()

stats = list()

test_dp = deepcopy(valid_dp)
train_dp.reset()
valid_dp.reset()
test_dp.reset()

#define the model
model = MLP(cost=cost)
#model.add_layer(ComplexLinear(idim=125, odim=125, irange=1.6, rng=rng))
#model.add_layer(Sigmoid(idim=2*125, odim=125, irange=1.6, rng=rng))
model.add_layer(Sigmoid(idim=125, odim=125, irange=1.6, rng=rng))
model.add_layer(Softmax(idim=125, odim=19, rng=rng))
Пример #5
0
logger = logging.getLogger()
logger.setLevel(logging.INFO)

from mlp.layers import MLP, Sigmoid, Linear, Softmax, Relu  #import required layer types
from mlp.conv import ConvLinear, ConvRelu, ConvSigmoid
from mlp.maxpooling import ConvMaxPool2D
from mlp.optimisers import SGDOptimiser, Optimiser  #import the optimiser
from mlp.dataset import MNISTDataProvider  #import data provider #Ruslan Burakov - s1569105
from mlp.costs import CECost, MSECost  #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

rng = numpy.random.RandomState([2015, 10, 10])

# define the model structure, here just one linear layer
# and mean square error cost
cost = CECost()
tsk8_2_model = MLP(cost=cost)
"""
                 num_inp_feat_maps,
                 num_out_feat_maps,
                 image_shape=(28, 28),
                 kernel_shape=(5, 5),
                 stride=(1, 1),
                 irange=0.2,
                 rng=None,
                 conv_fwd=my_conv_fwd,
                 conv_bck=my_conv_bck,
                 conv_grad=my_conv_grad)
"""
tsk8_2_model.add_layer(
    ConvRelu(num_inp_feat_maps=1,
from mlp.layers import MLP, Linear, Sigmoid, Softmax  #import required layer types
from mlp.layers import *
from mlp.optimisers import SGDOptimiser  #import the optimiser

from mlp.costs import CECost  #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

logger = logging.getLogger()
logger.setLevel(logging.INFO)
rng = numpy.random.RandomState([2015, 10, 10])

#some hyper-parameters
nhid = 100
learning_rate = 0.07
max_epochs = 30
cost = CECost()

stats = list()

test_dp = deepcopy(valid_dp)
train_dp.reset()
valid_dp.reset()
test_dp.reset()

#define the model
model = MLP(cost=cost)
#model.add_layer(ComplexLinear(idim=125, odim=125, irange=1.6, rng=rng))
#model.add_layer(Sigmoid(idim=2*125, odim=125, irange=1.6, rng=rng))
model.add_layer(Sigmoid(idim=125, odim=125, irange=1.6, rng=rng))
model.add_layer(Softmax(idim=125, odim=19, rng=rng))
Пример #7
0
logger = logging.getLogger()
logger.setLevel(logging.INFO)

from mlp.layers import MLP, Sigmoid, Linear, Softmax  #import required layer types
from mlp.conv import ConvLinear, ConvRelu, ConvSigmoid
from mlp.maxpooling import ConvMaxPool2D
from mlp.optimisers import SGDOptimiser, Optimiser  #import the optimiser
from mlp.dataset import MNISTDataProvider  #import data provider #Ruslan Burakov - s1569105
from mlp.costs import CECost, MSECost  #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

rng = numpy.random.RandomState([2015, 10, 10])

# define the model structure, here just one linear layer
# and mean square error cost
tsk8_1_cost = CECost()
tsk8_1_model = MLP(cost=tsk8_1_cost)
"""
                 num_inp_feat_maps,
                 num_out_feat_maps,
                 image_shape=(28, 28),
                 kernel_shape=(5, 5),
                 stride=(1, 1),
                 irange=0.2,
                 rng=None,
                 conv_fwd=my_conv_fwd,
                 conv_bck=my_conv_bck,
                 conv_grad=my_conv_grad)
"""
tsk8_1_model.add_layer(
    ConvSigmoid(num_inp_feat_maps=1,