def base(train_dp, valid_dp, logger, learning_rate):
    # learning_rate = 0.01
    rng = numpy.random.RandomState([2016,02,26])

    max_epochs = 1000
    cost = CECost()

    stats = list()

    test_dp = deepcopy(valid_dp)
    train_dp.reset()
    valid_dp.reset()
    test_dp.reset()

    # NETWORK TOPOLOGY:
    model = MLP(cost=cost)
    model.add_layer(Relu(idim=125, odim=125, irange=1.6, rng=rng))
    model.add_layer(Softmax(idim=125, odim=19, rng=rng))


    # define the optimiser, here stochasitc gradient descent
    # with fixed learning rate and max_epochs
    lr_scheduler = LearningRateFixed(
        learning_rate=learning_rate, max_epochs=max_epochs)
    optimiser = SGDOptimiser(lr_scheduler=lr_scheduler)

    logger.info('Training started...')
    tr_stats_b, valid_stats_b = optimiser.train(model, train_dp, valid_dp)

    logger.info('Testing the model on test set:')

    tst_cost, tst_accuracy = optimiser.validate(model, test_dp)
    logger.info('ACL test set accuracy is %.2f %%, cost (%s) is %.3f' %
                (tst_accuracy*100., cost.get_name(), tst_cost))
        print "date: " + str(dt)

        train_dp.reset()
        test_dp.reset()
        valid_dp.reset()

        rng = numpy.random.RandomState([dt.year, dt.month, dt.day])

        # define the model structure, here just one linear layer
        # and mean square error cost
        cost = CECost()
        model = MLP(cost=cost)
        model.add_layer(ConvRelu_Opt(1, 1, rng=rng, stride=(1, 1)))
        model.add_layer(Sigmoid(idim=122, odim=122, rng=rng))
        model.add_layer(Softmax(idim=122, odim=19, rng=rng))
        #one can stack more layers here

        # print map(lambda x: (x.idim, x.odim), model.layers)
        lr_scheduler = LearningRateFixed(learning_rate=0.01, max_epochs=500)
        optimiser = SGDOptimiser(lr_scheduler=lr_scheduler)

        tr_stats, valid_stats = optimiser.train(model, train_dp, valid_dp)
        tst_cost, tst_accuracy = optimiser.validate(model, test_dp)
        seeds.append((tr_stats, valid_stats, (tst_cost, tst_accuracy)))

    end = time.time()
    print "scipy.correlate time: " + str(end - start)
    with open('seeds_conv_fft_feat.pkl', 'wb') as f:
        p.dump(seeds, f)
Example #3
0
#L1 test
from support import *
from mlp.schedulers import LearningRateFixed
from mlp.optimisers import SGDOptimiser

LearningRate = 0.7
Num_epoches = 30
l1_weights = [10, 1, 0.1, 0.01, 0.001]
tsk2_1_jobs = []
for l1_w in l1_weights:
    lr_scheduler = LearningRateFixed(LearningRate, Num_epoches)
    tsk2_1_jobs.append({
        "model": create_one_hid_model(),
        "label": "l1_w={}".format(l1_w),
        "optimiser": SGDOptimiser(lr_scheduler, l1_weight=l1_w)
    })

tsk2_1_stats = get_models_statistics(tsk2_1_jobs)