Example #1
0
    def _init_exprs(self):
        # Here we need to replace the input with a corrupted version. If we do
        # so naively by calling clone on the loss, the targets (which are
        # identical to the inputs in thesense of identity in programming) the
        # targets will be replaced as well. Instead, we just want to thave the
        # inputs replaced. Thus we first clone the output of the model and
        # replace the input with the corrupted input. This will not change the
        # targets. Afterwards, we put that corruption into the loss as well.
        super(DenoisingAutoEncoder, self)._init_exprs()
        if self.noise_type == 'gauss':
            corrupted_inpt = corrupt.gaussian_perturb(
                self.exprs['inpt'], self.c_noise)
        elif self.noise_type == 'mask':
            corrupted_inpt = corrupt.mask(
                self.exprs['inpt'], self.c_noise)

        output_from_corrupt = theano.clone(
            self.exprs['output'],
            {self.exprs['inpt']: corrupted_inpt}
        )

        score = self.exprs['loss']
        loss = theano.clone(
            self.exprs['loss'],
            {self.exprs['output']: output_from_corrupt})

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Example #2
0
    def _init_exprs(self):
        super(ContractiveAutoEncoder, self)._init_exprs()
        jacobian_loss = T.sum(T.grad(self.exprs['feature'].mean(axis=0).sum(),
                              self.exprs['inpt']).mean(axis=0) ** 2)
        loss = self.exprs['loss'] + self.c_jacobian * jacobian_loss

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Example #3
0
    def _init_exprs(self):
        super(ContractiveAutoEncoder, self)._init_exprs()
        jacobian_loss = T.sum(
            T.grad(self.exprs['feature'].mean(axis=0).sum(),
                   self.exprs['inpt']).mean(axis=0)**2)
        loss = self.exprs['loss'] + self.c_jacobian * jacobian_loss

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Example #4
0
    def _init_exprs(self):
        super(SparseAutoEncoder, self)._init_exprs()
        f_sparsity_loss = lookup(self.sparsity_loss, loss_)
        sparsity_loss = f_sparsity_loss(
            self.sparsity_target, self.exprs['feature'].mean(axis=0)).sum()
        loss = self.exprs['loss'] + self.c_sparsity * sparsity_loss

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Example #5
0
    def _init_exprs(self):
        super(SparseAutoEncoder, self)._init_exprs()
        f_sparsity_loss = lookup(self.sparsity_loss, loss_)
        sparsity_loss = f_sparsity_loss(
            self.sparsity_target, self.exprs['feature'].mean(axis=0)).sum()
        loss = self.exprs['loss'] + self.c_sparsity * sparsity_loss

        self.exprs.update(get_named_variables(locals(), overwrite=True))
Example #6
0
    def _init_exprs(self):
        # Here we need to replace the input with a corrupted version. If we do
        # so naively by calling clone on the loss, the targets (which are
        # identical to the inputs in thesense of identity in programming) the
        # targets will be replaced as well. Instead, we just want to thave the
        # inputs replaced. Thus we first clone the output of the model and
        # replace the input with the corrupted input. This will not change the
        # targets. Afterwards, we put that corruption into the loss as well.
        super(DenoisingAutoEncoder, self)._init_exprs()
        if self.noise_type == 'gauss':
            corrupted_inpt = corrupt.gaussian_perturb(self.exprs['inpt'],
                                                      self.c_noise)
        elif self.noise_type == 'mask':
            corrupted_inpt = corrupt.mask(self.exprs['inpt'], self.c_noise)

        output_from_corrupt = theano.clone(
            self.exprs['output'], {self.exprs['inpt']: corrupted_inpt})

        score = self.exprs['loss']
        loss = theano.clone(self.exprs['loss'],
                            {self.exprs['output']: output_from_corrupt})

        self.exprs.update(get_named_variables(locals(), overwrite=True))