def base(train_dp, valid_dp, logger, learning_rate):
    # learning_rate = 0.01
    rng = numpy.random.RandomState([2016,02,26])

    max_epochs = 1000
    cost = CECost()

    stats = list()

    test_dp = deepcopy(valid_dp)
    train_dp.reset()
    valid_dp.reset()
    test_dp.reset()

    # NETWORK TOPOLOGY:
    model = MLP(cost=cost)
    model.add_layer(Relu(idim=125, odim=125, irange=1.6, rng=rng))
    model.add_layer(Softmax(idim=125, odim=19, rng=rng))


    # define the optimiser, here stochasitc gradient descent
    # with fixed learning rate and max_epochs
    lr_scheduler = LearningRateFixed(
        learning_rate=learning_rate, max_epochs=max_epochs)
    optimiser = SGDOptimiser(lr_scheduler=lr_scheduler)

    logger.info('Training started...')
    tr_stats_b, valid_stats_b = optimiser.train(model, train_dp, valid_dp)

    logger.info('Testing the model on test set:')

    tst_cost, tst_accuracy = optimiser.validate(model, test_dp)
    logger.info('ACL test set accuracy is %.2f %%, cost (%s) is %.3f' %
                (tst_accuracy*100., cost.get_name(), tst_cost))
                               fft=True,
                               name='RLAx')
    for dt in pandas.date_range("2015-01-10", "2015-10-10"):

        print "date: " + str(dt)

        train_dp.reset()
        test_dp.reset()
        valid_dp.reset()

        rng = numpy.random.RandomState([dt.year, dt.month, dt.day])

        # define the model structure, here just one linear layer
        # and mean square error cost
        cost = CECost()
        model = MLP(cost=cost)
        model.add_layer(ConvRelu_Opt(1, 1, rng=rng, stride=(1, 1)))
        model.add_layer(Sigmoid(idim=122, odim=122, rng=rng))
        model.add_layer(Softmax(idim=122, odim=19, rng=rng))
        #one can stack more layers here

        # print map(lambda x: (x.idim, x.odim), model.layers)
        lr_scheduler = LearningRateFixed(learning_rate=0.01, max_epochs=500)
        optimiser = SGDOptimiser(lr_scheduler=lr_scheduler)

        tr_stats, valid_stats = optimiser.train(model, train_dp, valid_dp)
        tst_cost, tst_accuracy = optimiser.validate(model, test_dp)
        seeds.append((tr_stats, valid_stats, (tst_cost, tst_accuracy)))

    end = time.time()
    print "scipy.correlate time: " + str(end - start)
Esempio n. 3
0
logger.setLevel(logging.INFO)

from mlp.layers import MLP, Sigmoid, Linear, Softmax  #import required layer types
from mlp.conv import ConvLinear, ConvRelu, ConvSigmoid
from mlp.maxpooling import ConvMaxPool2D
from mlp.optimisers import SGDOptimiser, Optimiser  #import the optimiser
from mlp.dataset import MNISTDataProvider  #import data provider #Ruslan Burakov - s1569105
from mlp.costs import CECost, MSECost  #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

rng = numpy.random.RandomState([2015, 10, 10])

# define the model structure, here just one linear layer
# and mean square error cost
tsk8_1_cost = CECost()
tsk8_1_model = MLP(cost=tsk8_1_cost)
"""
                 num_inp_feat_maps,
                 num_out_feat_maps,
                 image_shape=(28, 28),
                 kernel_shape=(5, 5),
                 stride=(1, 1),
                 irange=0.2,
                 rng=None,
                 conv_fwd=my_conv_fwd,
                 conv_bck=my_conv_bck,
                 conv_grad=my_conv_grad)
"""
tsk8_1_model.add_layer(
    ConvSigmoid(num_inp_feat_maps=1,
                num_out_feat_maps=1,
Esempio n. 4
0
logger.setLevel(logging.INFO)

from mlp.layers import MLP, Sigmoid, Linear, Softmax, Relu  #import required layer types
from mlp.conv import ConvLinear, ConvRelu, ConvSigmoid
from mlp.maxpooling import ConvMaxPool2D
from mlp.optimisers import SGDOptimiser, Optimiser  #import the optimiser
from mlp.dataset import MNISTDataProvider  #import data provider #Ruslan Burakov - s1569105
from mlp.costs import CECost, MSECost  #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

rng = numpy.random.RandomState([2015, 10, 10])

# define the model structure, here just one linear layer
# and mean square error cost
cost = CECost()
tsk8_2_model = MLP(cost=cost)
"""
                 num_inp_feat_maps,
                 num_out_feat_maps,
                 image_shape=(28, 28),
                 kernel_shape=(5, 5),
                 stride=(1, 1),
                 irange=0.2,
                 rng=None,
                 conv_fwd=my_conv_fwd,
                 conv_bck=my_conv_bck,
                 conv_grad=my_conv_grad)
"""
tsk8_2_model.add_layer(
    ConvRelu(num_inp_feat_maps=1,
             num_out_feat_maps=5,
Esempio n. 5
0

from mlp.layers import MLP, Sigmoid, Linear, Softmax, Relu #import required layer types
from mlp.conv import ConvLinear, ConvRelu, ConvSigmoid
from mlp.maxpooling import ConvMaxPool2D
from mlp.optimisers import SGDOptimiser, Optimiser#import the optimiser
from mlp.dataset import MNISTDataProvider #import data provider #Ruslan Burakov - s1569105
from mlp.costs import CECost, MSECost #import the cost we want to use for optimisation
from mlp.schedulers import LearningRateFixed

rng = numpy.random.RandomState([2015,10,10])

# define the model structure, here just one linear layer
# and mean square error cost
cost = CECost()
tsk8_1_1_model = MLP(cost=cost)
"""
                 num_inp_feat_maps,
                 num_out_feat_maps,
                 image_shape=(28, 28),
                 kernel_shape=(5, 5),
                 stride=(1, 1),
                 irange=0.2,
                 rng=None,
                 conv_fwd=my_conv_fwd,
                 conv_bck=my_conv_bck,
                 conv_grad=my_conv_grad)
"""
tsk8_1_1_model.add_layer(ConvSigmoid(num_inp_feat_maps=1,
                            num_out_feat_maps=1,
                            image_shape=(28,28),