def __init__(self, nInputs, nHidden, outputActivation=Sigmoid): """ Constructs a new `AutoEncoder` network. :Parameters: nInputs : int The `inputs` size. nHidden : int The size of the hidden representation. outputActivation : class derived from `Activation` The type of activation for the backprojection layer. :attention: `outputActivation` parameter is not an instance but a class. """ Sequential.__init__(self, nInputs=nInputs) self.nHidden = nHidden self.add(Linear(nHidden, nInputs)) self.add(Tanh(nHidden)) self.add(Linear(nInputs, nHidden)) self.add(outputActivation(nInputs))
def __init__(self, nUnits, outputActivation=Sigmoid): """ Constructs a new `MultiLayerPerceptron` network. :Parameters: nUnits : int list The sizes of the (input, hidden and output) representations. outputActivation : class derived from `Activation` The type of activation for the output layer. :attention: `outputActivation` parameter is not an instance but a class. """ Sequential.__init__(self, nInputs=nUnits[0]) self.nUnits = nUnits # In and hidden layers for nOutputs in nUnits[1:-1]: self.add(Linear(nOutputs)) self.add(Tanh(nOutputs)) # Output layer self.add(Linear(nUnits[-1])) self.add(outputActivation(nUnits[-1]))