Beispiel #1
0
def ae2(data):
    """This one uses an Autoencode layer. This layer is a part of the graph and does not need a specific traget"""

    miniBatchSize = 1

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Autoencode(
        i.name,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o
    # ae.init()
    # o.train.printGraph()
    for e in xrange(2000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o, inp=data[i:i + miniBatchSize])

    return ae, o
Beispiel #2
0
def ae1(data):
    '''Using a regression layer. This layer needs an explicit target'''

    miniBatchSize = 2

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Regression(
        8,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o

    for e in xrange(1000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o,
                     inp=data[i:i + miniBatchSize],
                     targets=data[i:i + miniBatchSize])

    return ae, o
Beispiel #3
0
    def _setOutputs(self):
        from theano.tensor.nnet import conv

        for layer in self.network.inConnections[self]:

            if self.inputs is None:
                self.inputs = layer.outputs
            else:
                self.inputs += layer.outputs

        if self.filterHeight > self.inputHeight:
            raise ValueError(
                "Filter height for '%s' cannot be bigger than its input height: '%s' > '%s'"
                % (self.name, self.filterHeight, self.inputHeight))

        if self.filterWidth > self.inputWidth:
            raise ValueError(
                "Filter width for '%s' cannot be bigger than its input width: '%s' > '%s'"
                % (self.name, self.filterWidth, self.inputWidth))

        self.convolution = conv.conv2d(
            input=self.inputs,
            filters=self.W,
            filter_shape=self.getParameterShape('W'))
        self.pooled = self.pooler.apply(self)
        self.nbFlatOutputs = self.nbChannels * self.height * self.width

        if self.b is None:
            MI.ZeroBias().apply(self)

        self.b = self.b.dimshuffle('x', 0, 'x', 'x')

        self.outputs = self.pooled + self.b
        self.testOutputs = self.pooled + self.b
Beispiel #4
0
	def __init__(self, WInitialization=MI.SmallUniformWeights(), bInitialization=MI.ZeroBias(), epsilon=1e-6) :
		Decorator_ABC.__init__(self)
		self.epsilon = epsilon
		self.WInitialization = WInitialization
		self.bInitialization = bInitialization
		self.W = None
		self.b = None
		self.paramShape = None
Beispiel #5
0
 def __init__(self,
              size,
              layerTypes,
              initializations=[MI.SmallUniformWeights(),
                               MI.ZeroBias()],
              **kwargs):
     super(WeightBias_ABC, self).__init__(size,
                                          layerTypes=layerTypes,
                                          initializations=initializations,
                                          **kwargs)
     self.testInputs = None
     self.parameters = {"W": None, "b": None}
Beispiel #6
0
    def test_ae_reg(self):

        data = []
        for i in xrange(8):
            zeros = numpy.zeros(8)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(8, name='inp')
        h = ML.Hidden(
            3,
            activation=MA.ReLU(),
            initializations=[MI.SmallUniformWeights(),
                             MI.ZeroBias()],
            name="hid")
        o = ML.Regression(
            8,
            activation=MA.ReLU(),
            initializations=[MI.SmallUniformWeights(),
                             MI.ZeroBias()],
            learningScenario=ls,
            costObject=cost,
            name="out")

        ae = i > h > o

        miniBatchSize = 1
        for e in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                ae.train(o,
                         inp=data[i:i + miniBatchSize],
                         targets=data[i:i + miniBatchSize])

        res = ae.propagate(o, inp=data)["outputs"]
        for i in xrange(len(res)):
            self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
Beispiel #7
0
    def __init__(self,
                 WInitialization=MI.SmallUniformWeights(),
                 bInitialization=MI.ZeroBias(),
                 epsilon=1e-6,
                 onTrain=True,
                 onTest=True):
        Decorator_ABC.__init__(self)
        self.epsilon = epsilon
        self.WInitialization = WInitialization
        self.bInitialization = bInitialization
        self.W = None
        self.b = None
        self.paramShape = None

        self.onTrain = onTrain
        self.onTest = onTest
        self.hyperParameters.extend(["onTrain", "onTest", "epsilon"])
Beispiel #8
0
* automatically saves the model if the training halts because of an error or if the process is killed
* saves a log if the process dies unexpectedly
* training results and hyper parameters values are recorded to a file
* allows you to define custom stop criteria
* training info is printed at each epoch, including best scores and at which epoch they were achieved

"""

if __name__ == "__main__":

	# Let's define the network
	ls = MS.GradientDescent(lr=0.01)
	cost = MC.NegativeLogLikelihood()

	i = ML.Input(28 * 28, name='inp')
	h = ML.Hidden(500, activation=MA.Tanh(), initializations=[MI.GlorotTanhInit(), MI.ZeroBias()], regularizations=[MR.L1(0), MR.L2(0.0001)], name="hid")
	o = ML.SoftmaxClassifier(10, learningScenario=ls, costObject=cost, name="out", regularizations=[MR.L1(0), MR.L2(0.0001)])

	mlp = i > h > o

	mlp.saveDOT("mnist_mlp")
	mlp.saveHTML("mnist_mlp")
	# And then map sets to the inputs and outputs of our network
	train_set, validation_set, test_set = load_mnist()

	trainData = MDM.Series(images=train_set[0], numbers=train_set[1])
	trainMaps = MDM.DatasetMapper("train", miniBatchSize=500)
	trainMaps.mapInput(i, trainData.images)
	trainMaps.mapOutput(o, trainData.numbers)

	testData = MDM.Series(images=test_set[0], numbers=test_set[1])
Beispiel #9
0
    def __init__(self, size, layerType, initializations = [MI.HeWeights(), MI.ZeroBias()], **kwargs) :
        super(WeightBias_ABC, self).__init__(size, layerType=layerType, initializations=initializations, **kwargs)

        self.W = None
        self.b = None
Beispiel #10
0
* allows you to define custom stop criteria
* training info is printed at each epoch, including best scores and at which epoch they were achieved

"""

if __name__ == "__main__":

    # Let's define the network
    ls = MS.GradientDescent(lr=0.01)
    cost = MC.NegativeLogLikelihood()

    i = ML.Input(28 * 28, name='inp')
    h = ML.Hidden(500,
                  activation=MA.Tanh(),
                  initializations=[MI.GlorotTanhInit(),
                                   MI.ZeroBias()],
                  regularizations=[MR.L1(0), MR.L2(0.0001)],
                  name="hid")
    o = ML.SoftmaxClassifier(10,
                             learningScenario=ls,
                             costObject=cost,
                             name="out",
                             regularizations=[MR.L1(0),
                                              MR.L2(0.0001)])

    mlp = i > h > o

    mlp.saveDOT("mnist_mlp")
    mlp.saveHTML("mnist_mlp")
    # And then map sets to the inputs and outputs of our network
    train_set, validation_set, test_set = load_mnist()