Ejemplo n.º 1
0
def createNet():
    net = FeedForwardNetwork()
    modules = add_modules(net)
    add_connections(net, modules)
    # finish up
    net.sortModules()
    gradientCheck(net)
    return net
Ejemplo n.º 2
0
def createNet():
    net = FeedForwardNetwork()
    modules = add_modules(net)
    add_connections(net, modules)
    # finish up
    net.sortModules()
    gradientCheck(net)
    return net
Ejemplo n.º 3
0
    def equivalence_recurrent(self, net, _net):
        self.sync(net, _net)
        for i in xrange(self.runs):
            inpt = scipy.random.random(net.indim)
            pybrain_res = net.activate(inpt)
            arac_res = _net.activate(inpt)
            self.assertArrayNear(pybrain_res, arac_res)

        for _ in xrange(self.runs):
            error = scipy.random.random(net.outdim)
            pybrain_res = net.backActivate(error)
            arac_res = _net.backActivate(error)
            self.assertArrayNear(pybrain_res, arac_res)
            if hasattr(_net, '_derivs'):
                self.assertArrayNear(_net.derivs, net.derivs)

        net.reset()
        _net.reset()
        self.assert_((_net.inputbuffer == 0.).all())

        for _ in xrange(self.runs):
            inpt = scipy.random.random(net.indim)
            pybrain_res = net.activate(inpt)
            arac_res = _net.activate(inpt)
            self.assertArrayNear(pybrain_res, arac_res)

        for _ in xrange(self.runs):
            error = scipy.random.random(net.outdim)
            pybrain_res = net.backActivate(error)
            arac_res = _net.backActivate(error)
            self.assertArrayNear(pybrain_res, arac_res)
            if hasattr(_net, '_derivs'):
                self.assertArrayNear(_net.derivs, net.derivs)

        self.assert_(gradientCheck(_net))
Ejemplo n.º 4
0
    def equivalence_recurrent(self, net, _net):
        self.sync(net, _net)
        for i in xrange(self.runs):
            inpt = scipy.random.random(net.indim)
            pybrain_res = net.activate(inpt)
            arac_res = _net.activate(inpt)
            self.assertArrayNear(pybrain_res, arac_res)

        for _ in xrange(self.runs):
            error = scipy.random.random(net.outdim)
            pybrain_res = net.backActivate(error)
            arac_res = _net.backActivate(error)
            self.assertArrayNear(pybrain_res, arac_res)
            if hasattr(_net, '_derivs'):
                self.assertArrayNear(_net.derivs, net.derivs)
                
        net.reset()
        _net.reset()
        self.assert_((_net.inputbuffer == 0.).all())
        
        for _ in xrange(self.runs):
            inpt = scipy.random.random(net.indim)
            pybrain_res = net.activate(inpt)
            arac_res = _net.activate(inpt)
            self.assertArrayNear(pybrain_res, arac_res)

        for _ in xrange(self.runs):
            error = scipy.random.random(net.outdim)
            pybrain_res = net.backActivate(error)
            arac_res = _net.backActivate(error)
            self.assertArrayNear(pybrain_res, arac_res)
            if hasattr(_net, '_derivs'):
                self.assertArrayNear(_net.derivs, net.derivs)
                
        self.assert_(gradientCheck(_net))
Ejemplo n.º 5
0
    def equivalence_feed_forward(self, net, _net):
        self.sync(net, _net)
        for _ in xrange(self.runs):
            inpt = scipy.random.random(net.indim)
            pybrain_res = net.activate(inpt)
            arac_res = _net.activate(inpt)
            self.assertArrayNear(pybrain_res, arac_res)
            error = scipy.random.random(net.outdim)
            pybrain_res = net.backActivate(error)
            arac_res = _net.backActivate(error)
            self.assertArrayNear(pybrain_res, arac_res)
            if hasattr(_net, '_derivs'):
                self.assertArrayNear(_net.derivs, net.derivs)

        self.assert_(gradientCheck(_net))
Ejemplo n.º 6
0
    def equivalence_feed_forward(self, net, _net):
        self.sync(net, _net)
        for _ in xrange(self.runs):
            inpt = scipy.random.random(net.indim)
            pybrain_res = net.activate(inpt)
            arac_res = _net.activate(inpt)
            self.assertArrayNear(pybrain_res, arac_res)
            error = scipy.random.random(net.outdim)
            pybrain_res = net.backActivate(error)
            arac_res = _net.backActivate(error)
            self.assertArrayNear(pybrain_res, arac_res)
            if hasattr(_net, '_derivs'):
                self.assertArrayNear(_net.derivs, net.derivs)

        self.assert_(gradientCheck(_net))
Ejemplo n.º 7
0
    def Train(self, dataset, error_observer, logger, dump_file):
        gradientCheck(self.m_net)

        net_dataset = SequenceClassificationDataSet(4, 2)
        for record in dataset:
            net_dataset.newSequence()

            gl_raises = record.GetGlRises()
            gl_min = record.GetNocturnalMinimum()

            if DayFeatureExpert.IsHypoglycemia(record):
                out_class = [1, 0]
            else:
                out_class = [0, 1]

            for gl_raise in gl_raises:
                net_dataset.addSample([gl_raise[0][0].total_seconds() / (24*3600), gl_raise[0][1] / 300, gl_raise[1][0].total_seconds() / (24*3600), gl_raise[1][1] / 300] , out_class)

        train_dataset, test_dataset = net_dataset.splitWithProportion(0.8)

        trainer = RPropMinusTrainer(self.m_net, dataset=train_dataset, momentum=0.8, learningrate=0.3, lrdecay=0.9, weightdecay=0.01, verbose=True)
        validator = ModuleValidator()

        train_error = []
        test_error = []
        for i in range(0, 80):
            trainer.trainEpochs(1)
            train_error.append(validator.MSE(self.m_net, train_dataset)) # here is validate func, think it may be parametrised by custom core function
            test_error.append(validator.MSE(self.m_net, test_dataset))
            print train_error
            print test_error
            error_observer(train_error, test_error)
            gradientCheck(self.m_net)

        dump_file = open(dump_file, 'wb')
        pickle.dump(self.m_net, dump_file)
Ejemplo n.º 8
0
def run():
    import scipy
    from scipy import linalg

    f = open("modelfitDatabase1.dat", "rb")
    import pickle

    dd = pickle.load(f)
    node = dd.children[13]

    rfs = node.children[0].data["ReversCorrelationRFs"]

    pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
    pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])

    training_set = node.data["training_set"]
    validation_set = node.data["validation_set"]
    training_inputs = node.data["training_inputs"]
    validation_inputs = node.data["validation_inputs"]

    ofs = contrib.modelfit.fit_sigmoids_to_of(numpy.mat(training_set), numpy.mat(pred_act))
    pred_act_t = contrib.modelfit.apply_sigmoid_output_function(numpy.mat(pred_act), ofs)
    pred_val_act_t = contrib.modelfit.apply_sigmoid_output_function(numpy.mat(pred_val_act), ofs)

    (sx, sy) = numpy.shape(rfs[0])
    print sx, sy
    n = FeedForwardNetwork()

    inLayer = LinearLayer(sx * sy)
    hiddenLayer = SigmoidLayer(4)
    outputLayer = SigmoidLayer(1)

    n.addInputModule(inLayer)
    n.addModule(hiddenLayer)
    n.addOutputModule(outputLayer)

    in_to_hidden = RBFConnection(sx, sy, inLayer, hiddenLayer)
    # in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outputLayer)

    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)
    n.sortModules()
    gradientCheck(n)
    return

    from pybrain.datasets import SupervisedDataSet

    ds = SupervisedDataSet(sx * sy, 1)
    val = SupervisedDataSet(sx * sy, 1)

    for i in xrange(0, len(training_inputs)):
        ds.addSample(training_inputs[i], training_set[i, 0])

    for i in xrange(0, len(validation_inputs)):
        val.addSample(validation_inputs[i], validation_set[i, 0])

    tstdata, trndata = ds.splitWithProportion(0.1)

    from pybrain.supervised.trainers import BackpropTrainer

    trainer = BackpropTrainer(n, trndata, momentum=0.1, verbose=True, learningrate=0.002)

    training_set = numpy.array(numpy.mat(training_set)[:, 0])
    validation_set = numpy.array(numpy.mat(validation_set)[:, 0])
    pred_val_act_t = numpy.array(numpy.mat(pred_val_act_t)[:, 0])

    out = n.activateOnDataset(val)
    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - out, 2))

    print "Start training"
    for i in range(50):
        trnresult = percentError(trainer.testOnData(), trndata)
        tstresult = percentError(trainer.testOnData(dataset=tstdata), tstdata)

        print "epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult
        trainer.trainEpochs(1)

        out = n.activateOnDataset(val)
        (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
        print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(
            numpy.power(validation_set - out, 2)
        )

    out = n.activateOnDataset(val)

    print numpy.shape(out)
    print numpy.shape(validation_set)

    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - out, 2))

    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, pred_val_act_t)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(
        numpy.power(validation_set - pred_val_act_t, 2)
    )

    return n