def _make_loss_functions(self, mode=None): """Return pair (f_loss, f_d_loss) of functions. - f_loss returns the current loss, - f_d_loss returns the gradient of that loss wrt parameters, """ rng = T.shared_randomstreams.RandomStreams() # Drop out inpts. inpt = self.exprs['inpt'] inpt_dropped_out = corrupt.mask(inpt, self.p_dropout_inpt, rng) givens = {inpt: inpt_dropped_out} loss = theano.clone(self.exprs['loss'], givens) n_layers = len(self.n_hiddens) for i in range(n_layers - 1): # Drop out hidden. hidden = self.exprs['hidden_%i' % i] hidden_dropped_out = corrupt.mask(hidden, self.p_dropout_hidden, rng) givens = {hidden: hidden_dropped_out} loss = theano.clone(loss, givens) d_loss = T.grad(loss, self.parameters.flat) f_loss = self.function(['inpt', 'target'], loss, explicit_pars=True, mode=mode) f_d_loss = self.function(['inpt', 'target'], d_loss, explicit_pars=True, mode=mode) return f_loss, f_d_loss
def _init_exprs(self): # Here we need to replace the input with a corrupted version. If we do # so naively by calling clone on the loss, the targets (which are # identical to the inputs in thesense of identity in programming) the # targets will be replaced as well. Instead, we just want to thave the # inputs replaced. Thus we first clone the output of the model and # replace the input with the corrupted input. This will not change the # targets. Afterwards, we put that corruption into the loss as well. super(DenoisingAutoEncoder, self)._init_exprs() if self.noise_type == 'gauss': corrupted_inpt = corrupt.gaussian_perturb( self.exprs['inpt'], self.c_noise) elif self.noise_type == 'mask': corrupted_inpt = corrupt.mask( self.exprs['inpt'], self.c_noise) output_from_corrupt = theano.clone( self.exprs['output'], {self.exprs['inpt']: corrupted_inpt} ) score = self.exprs['loss'] loss = theano.clone( self.exprs['loss'], {self.exprs['output']: output_from_corrupt}) self.exprs.update(get_named_variables(locals(), overwrite=True))
def _init_exprs(self): # Here we need to replace the input with a corrupted version. If we do # so naively by calling clone on the loss, the targets (which are # identical to the inputs in thesense of identity in programming) the # targets will be replaced as well. Instead, we just want to thave the # inputs replaced. Thus we first clone the output of the model and # replace the input with the corrupted input. This will not change the # targets. Afterwards, we put that corruption into the loss as well. super(DenoisingAutoEncoder, self)._init_exprs() if self.noise_type == 'gauss': corrupted_inpt = corrupt.gaussian_perturb(self.exprs['inpt'], self.c_noise) elif self.noise_type == 'mask': corrupted_inpt = corrupt.mask(self.exprs['inpt'], self.c_noise) output_from_corrupt = theano.clone( self.exprs['output'], {self.exprs['inpt']: corrupted_inpt}) score = self.exprs['loss'] loss = theano.clone(self.exprs['loss'], {self.exprs['output']: output_from_corrupt}) self.exprs.update(get_named_variables(locals(), overwrite=True))