Esempio n. 1
0
                                    nvis = dim
                                    )

        #generate the noise samples
        noise_func = function([], noise_distribution.random_design_matrix(X))
        Y = []
        for i in xrange(noise_per_clean):
            Y.append(sharedX(noise_func()))

        #Get the objective function
        nce = DNCE(noise_distribution)
        J = nce(model,X,Y)

        accs = []
        for Y_i in Y:
            pos_prob = 1./(1.+T.exp(model.free_energy(X)-model.free_energy(Y_i)))
            acc = (pos_prob > .5).mean()
            accs.append(acc)
        acc = sum(accs) / float(len(accs))

        print '\tinit accuracy ',function([],acc)()

        #Minimize the objective function with batch gradient descent
        minimizer = BatchGradientDescent( objective = J,
                                            params = model.get_params(),
                                            param_constrainers = [ model.censor_updates ])

        print '\tinit obj:',minimizer.obj()
        #minimizer.verbose = True
        minimizer.minimize()
        print '\tfinal obj:',minimizer.obj()
Esempio n. 2
0
        for i in xrange(noise_per_clean):
            Y.append(sharedX(noise_func()))

        #Get the objective function
        nce = NCE(
            DiagonalMND(nvis=dim,
                        init_beta=beta,
                        init_mu=0.,
                        min_beta=beta,
                        max_beta=beta), -1)
        J = nce(model, X, T.concatenate(Y, axis=0))

        accs = []
        for Y_i in Y:
            pos_prob = 1. / (
                1. + T.exp(model.free_energy(X) - model.free_energy(Y_i)))
            acc = (pos_prob > .5).mean()
            accs.append(acc)
        acc = sum(accs) / float(len(accs))

        print '\tinit accuracy ', function([], acc)()

        #Minimize the objective function with batch gradient descent
        minimizer = BatchGradientDescent(
            objective=J,
            params=model.get_params(),
            param_constrainers=[model.censor_updates])

        print '\tinit obj:', minimizer.obj()
        #minimizer.verbose = True
        minimizer.minimize()
Esempio n. 3
0
                                    nvis = dim
                                    )

        #generate the noise samples
        noise_func = function([], noise_distribution.random_design_matrix(X))
        Y = []
        for i in xrange(noise_per_clean):
            Y.append(sharedX(noise_func()))

        #Get the objective function
        nce = DNCE(noise_distribution)
        J = nce(model,X,Y)

        accs = []
        for Y_i in Y:
            pos_prob = 1./(1.+T.exp(model.free_energy(X)-model.free_energy(Y_i)))
            acc = (pos_prob > .5).mean()
            accs.append(acc)
        acc = sum(accs) / float(len(accs))

        print '\tinit accuracy ',function([],acc)()

        #Minimize the objective function with batch gradient descent
        minimizer = BatchGradientDescent( objective = J,
                                            params = model.get_params(),
                                            param_constrainers = [ model.censor_updates ])

        print '\tinit obj:',minimizer.obj()
        #minimizer.verbose = True
        minimizer.minimize()
        print '\tfinal obj:',minimizer.obj()