Пример #1
0
    def test_ae_reg(self):
        powerOf2 = 3
        nbUnits = 2**powerOf2

        data = []
        for i in xrange(nbUnits):
            zeros = numpy.zeros(nbUnits)
            zeros[i] = 1
            data.append(zeros)

        ls = MS.GradientDescent(lr=0.1)
        cost = MC.MeanSquaredError()

        i = ML.Input(nbUnits, name='inp')
        h = ML.Hidden(powerOf2,
                      activation=MA.ReLU(),
                      initializations=[
                          MI.Uniform('W', small=True),
                          MI.SingleValue('b', 0)
                      ],
                      name="hid")
        o = ML.Regression(nbUnits,
                          activation=MA.ReLU(),
                          initializations=[
                              MI.Uniform('W', small=True),
                              MI.SingleValue('b', 0)
                          ],
                          learningScenari=[ls],
                          cost=cost,
                          name="out")

        ae = i > h > o
        ae.init()

        miniBatchSize = 1
        for e in xrange(2000):
            for i in xrange(0, len(data), miniBatchSize):
                miniBatch = data[i:i + miniBatchSize]
                ae["out"].train({
                    "inp.inputs": miniBatch,
                    "out.targets": miniBatch
                })["out.drive.train"]

        res = ae["out"].propagate["test"]({
            "inp.inputs": data
        })["out.propagate.test"]
        for i in xrange(len(res)):
            self.assertEqual(numpy.argmax(data[i]), numpy.argmax(res[i]))
Пример #2
0
 def __init__(
         self,
         numFilters,
         filterHeight,
         filterWidth,
         filterDepth,
         name,
         stride=(1, 1, 1),
         pad=0,
         untieBiases=False,
         initializations=[MI.GlorotNormal('W'),
                          MI.SingleValue('b', 0)],
         flipFilters=True,
         **kwargs):
     super(Convolution3D,
           self).__init__(LasagneCONV.Conv3DLayer,
                          initializations=initializations,
                          lasagneHyperParameters={
                              "numFilters":
                              numFilters,
                              "filter_size":
                              (filterHeight, filterWidth, filterDepth),
                              "stride":
                              stride,
                              "pad":
                              pad,
                              "untie_biases":
                              untieBiases,
                              "flip_filters":
                              flipFilters
                          },
                          lasagneKwargs={},
                          name=name,
                          **kwargs)
Пример #3
0
    def __init__(
            self,
            size,
            initializations=[MI.GlorotNormal('W'),
                             MI.SingleValue('b', 0)],
            **kwargs):
        super(Dense, self).__init__(initializations=initializations, **kwargs)
        if isinstance(size, int):
            sh = (None, size)
        elif isinstance(size, float):
            sh = (None, int(size))
        else:
            sh = [None]
            sh.extend(list(size))
            sh = tuple(sh)

        self.size = size
        self.setHP("shape", sh)

        self.setParameters({
            "W": MTYPES.Parameter("%s.W" % (self.name)),
            "b": MTYPES.Parameter("%s.b" % (self.name))
        })

        self.inputShape = None
        self.originalInputShape = None
Пример #4
0
    def __init__(
        self,
        size,
        name,
        initializations=[MI.Uniform('W_in_to_hid'), MI.Uniform('W_hid_to_hid'), MI.SingleValue('b', 0)],
        backwards=False,
        learnInit=False,
        gradientSteps=-1,
        gradClipping=0,
        unrollScan=False,
        # precomputeInput=False,
        onlyReturnFinal=False,
        **kwargs
    ):
        super(RecurrentDense, self).__init__(
                LasagneLayers.RecurrentLayer,
                lasagneHyperParameters={
                    "num_units": size,
                    "backwards": backwards,
                    "learn_init": learnInit,
                    "gradient_steps": gradientSteps,
                    "grad_clipping": gradClipping,
                    "unroll_scan": unrollScan,
                    "precompute_input": False,
                    "mask_input": None,
                    "only_return_final": onlyReturnFinal,
                },
                initializations=initializations,
                name=name,
                lasagneKwargs={},
                **kwargs
            )

        self.addHyperParameters(
            {
                # "maxSequenceLength": maxSequenceLength,
                "backwards": backwards,
                "learnInit": learnInit,
                "gradientSteps": gradientSteps,
                "gradClipping": gradClipping,
                "unrollScan": unrollScan,
                # "precomputeInput": precomputeInput,
                "onlyReturnFinal": onlyReturnFinal
            }
        )
Пример #5
0
    def __init__(
        self,
        size,
        name,
        inGateConfig=GateConfig(),
        forgateGateConfig=GateConfig(),
        cellGateConfig=GateConfig(),
        outgateGateConfig=GateConfig(W_cell=None, activation=MA.Tanh()),
        initializations=[MI.Uniform('W_in_to_hid'), MI.Uniform('W_hid_to_hid'), MI.SingleValue('b', 0)],
        backwards=False,
        learnInit=False,
        gradientSteps=-1,
        gradClipping=0,
        unrollScan=False,
        # precomputeInput=False,
        onlyReturnFinal=False,
        **kwargs
    ):
    
# class lasagne.layers.LSTMLayer(incoming,
#     # num_units,
#     ingate=lasagne.layers.Gate(),
#     forgetgate=lasagne.layers.Gate(),
#     cell=lasagne.layers.Gate( W_cell=None, nonlinearity=lasagne.nonlinearities.tanh),
#     outgate=lasagne.layers.Gate(),
#     # nonlinearity=lasagne.nonlinearities.tanh,
#     cell_init=lasagne.init.Constant(0.),
#     hid_init=lasagne.init.Constant(0.),
#     peepholes=True,
#     # backwards=False,
#     # learn_init=False,
#     # gradient_steps=-1,
#     # grad_clipping=0,
#     # unroll_scan=False,
#     # precompute_input=True,
#     # mask_input=None,
#     # only_return_final=False,
#     **kwargs
# )
Пример #6
0
 def __init__(
         self,
         numFilters,
         filterSize,
         name,
         stride=1,
         pad=0,
         untieBiases=False,
         flipFilters=True,
         initializations=[MI.GlorotNormal('W'),
                          MI.SingleValue('b', 0)],
         **kwargs):
     super(Convolution1D, self).__init__(LasagneCONV.Conv1DLayer,
                                         initializations=initializations,
                                         lasagneHyperParameters={
                                             "num_filters": numFilters,
                                             "filter_size": filterSize,
                                             "stride": stride,
                                             "pad": pad,
                                             "untie_biases": untieBiases,
                                             "flip_filters": flipFilters
                                         },
                                         lasagneKwargs={},
                                         **kwargs)
Пример #7
0
 def __init__(self,
     initializations=[MI.Normal('W_in', 0.1, 0), MI.Normal('W_hid', 0.1, 0), MI.Normal('W_cell', 0.1, 0), MI.SingleValue('b', 0)],
     activation=MI.Sigmoid()
 ):
     
     super(GateConfig, self).__init__()
     self.initializations = initializations
     self.activation = activation