def _setOutputs(self): from theano.tensor.nnet import conv for layer in self.network.inConnections[self]: if self.inputs is None: self.inputs = layer.outputs else: self.inputs += layer.outputs if self.filterHeight > self.inputHeight: raise ValueError( "Filter height for '%s' cannot be bigger than its input height: '%s' > '%s'" % (self.name, self.filterHeight, self.inputHeight)) if self.filterWidth > self.inputWidth: raise ValueError( "Filter width for '%s' cannot be bigger than its input width: '%s' > '%s'" % (self.name, self.filterWidth, self.inputWidth)) self.convolution = conv.conv2d( input=self.inputs, filters=self.W, filter_shape=self.getParameterShape('W')) self.pooled = self.pooler.apply(self) self.nbFlatOutputs = self.nbChannels * self.height * self.width if self.b is None: MI.ZerosBias().apply(self) self.b = self.b.dimshuffle('x', 0, 'x', 'x') self.outputs = self.pooled + self.b self.testOutputs = self.pooled + self.b
def __init__(self, WInitialization=MI.SmallUniformWeights(), bInitialization=MI.ZerosBias(), epsilon=1e-6): Decorator_ABC.__init__(self) self.epsilon = epsilon self.WInitialization = WInitialization self.bInitialization = bInitialization self.W = None self.b = None self.paramShape = None
def __init__(self, size, layerType, initializations=[MI.SmallUniformWeights(), MI.ZerosBias()], **kwargs): super(WeightBias_ABC, self).__init__(size, layerType=layerType, initializations=initializations, **kwargs) self.W = None self.b = None
def _setOutputs(self): """initializes weights and bias. By default weights are setup to random low values, use Mariana decorators to change this behaviour.""" for layer in self.network.inConnections[self]: if self.inputs is None: self.inputs = layer.outputs else: self.inputs += layer.outputs if self.W is None: raise ValueError( "No initialization was defined for weights (self.W)") if self.b is None: MI.ZerosBias().apply(self) self.outputs = tt.dot(self.inputs, self.W) + self.b self.testOutputs = tt.dot(self.inputs, self.W) + self.b