예제 #1
0
def ae1(data):
    '''Using a regression layer. This layer needs an explicit target'''

    miniBatchSize = 2

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Regression(
        8,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o

    for e in xrange(1000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o,
                     inp=data[i:i + miniBatchSize],
                     targets=data[i:i + miniBatchSize])

    return ae, o
예제 #2
0
def ae2(data):
    """This one uses an Autoencode layer. This layer is a part of the graph and does not need a specific traget"""

    miniBatchSize = 1

    ls = MS.GradientDescent(lr=0.1)
    cost = MC.MeanSquaredError()

    i = ML.Input(8, name='inp')
    h = ML.Hidden(3,
                  activation=MA.ReLU(),
                  initializations=[MI.SmallUniformWeights(),
                                   MI.ZeroBias()],
                  name="hid")
    o = ML.Autoencode(
        i.name,
        activation=MA.ReLU(),
        initializations=[MI.SmallUniformWeights(),
                         MI.ZeroBias()],
        learningScenario=ls,
        costObject=cost,
        name="out")

    ae = i > h > o
    # ae.init()
    # o.train.printGraph()
    for e in xrange(2000):
        for i in xrange(0, len(data), miniBatchSize):
            ae.train(o, inp=data[i:i + miniBatchSize])

    return ae, o
예제 #3
0
파일: tests.py 프로젝트: Solertis/Mariana
    def test_ae_reg(self) :

        data = []
        for i in xrange(8) :
            zeros = numpy.zeros(8)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr = 0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(8, name = 'inp')
        h = ML.Hidden(3, activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], name = "hid")
        o = ML.Regression(8, activation = MA.ReLU(), initializations=[MI.SmallUniformWeights(), MI.ZeroBias()], learningScenario = ls, costObject = cost, name = "out" )

        ae = i > h > o

        miniBatchSize = 1
        for e in xrange(2000) :
            for i in xrange(0, len(data), miniBatchSize) :
                ae.train(o, inp = data[i:i+miniBatchSize], targets = data[i:i+miniBatchSize] )

        res = ae.propagate(o, inp = data)["outputs"]
        for i in xrange(len(res)) :
            self.assertEqual( numpy.argmax(data[i]), numpy.argmax(res[i]))
예제 #4
0
	def __init__(self, WInitialization=MI.SmallUniformWeights(), bInitialization=MI.ZeroBias(), epsilon=1e-6) :
		Decorator_ABC.__init__(self)
		self.epsilon = epsilon
		self.WInitialization = WInitialization
		self.bInitialization = bInitialization
		self.W = None
		self.b = None
		self.paramShape = None
예제 #5
0
파일: layers.py 프로젝트: StatML/Mariana
 def __init__(self,
              size,
              layerTypes,
              initializations=[MI.SmallUniformWeights(),
                               MI.ZeroBias()],
              **kwargs):
     super(WeightBias_ABC, self).__init__(size,
                                          layerTypes=layerTypes,
                                          initializations=initializations,
                                          **kwargs)
     self.testInputs = None
     self.parameters = {"W": None, "b": None}
예제 #6
0
    def __init__(self,
                 size,
                 layerType,
                 initializations=[MI.SmallUniformWeights(),
                                  MI.ZerosBias()],
                 **kwargs):
        super(WeightBias_ABC, self).__init__(size,
                                             layerType=layerType,
                                             initializations=initializations,
                                             **kwargs)

        self.W = None
        self.b = None
예제 #7
0
    def __init__(self,
                 WInitialization=MI.SmallUniformWeights(),
                 bInitialization=MI.ZeroBias(),
                 epsilon=1e-6,
                 onTrain=True,
                 onTest=True):
        Decorator_ABC.__init__(self)
        self.epsilon = epsilon
        self.WInitialization = WInitialization
        self.bInitialization = bInitialization
        self.W = None
        self.b = None
        self.paramShape = None

        self.onTrain = onTrain
        self.onTest = onTest
        self.hyperParameters.extend(["onTrain", "onTest", "epsilon"])