Beispiel #1
0
 def set_parameter(self, mean_parameter, noise_variance):
     """
     Set $\beta_E^*$.
     """
     mean_parameter = np.squeeze(mean_parameter)
     likelihood_loss = rr.signal_approximator(mean_parameter, coef=1. / noise_variance)
     self.likelihood_loss = rr.affine_smooth(likelihood_loss, self._response_selector)
    def set_parameter(self, mean_parameter, noise_variance):

        mean_parameter = np.squeeze(mean_parameter)

        self.likelihood_loss = rr.signal_approximator(mean_parameter,
                                                      coef=1. / noise_variance)

        self.likelihood_loss = rr.affine_smooth(
            self.likelihood_loss, self.X_permute.dot(self.B_p_inv))
Beispiel #3
0
    def smooth_objective_post(self,
                              sel_param,
                              mode='both',
                              check_feasibility=False):

        sel_param = self.apply_offset(sel_param)
        generative_mean = np.zeros(self.p_shape)
        generative_mean[:self.param_shape] = sel_param

        cov_data_inv = self.solver.score_cov_inv

        sel_lasso = selection_probability_random_lasso(self.solver,
                                                       generative_mean)

        sel_prob_primal = sel_lasso.minimize2(nstep=100)[::-1]

        optimal_primal = (sel_prob_primal[1])[:self.p_shape]

        sel_prob_val = -sel_prob_primal[0]

        full_gradient = cov_data_inv.dot(optimal_primal - generative_mean)

        optimizer = full_gradient[:self.param_shape]

        likelihood_loss = log_likelihood(
            self.solver.target_observed,
            self.solver.score_cov[:self.param_shape, :self.param_shape],
            self.param_shape)

        likelihood_loss_value = likelihood_loss.smooth_objective(
            sel_param, 'func')

        likelihood_loss_grad = likelihood_loss.smooth_objective(
            sel_param, 'grad')

        log_prior_loss = rr.signal_approximator(np.zeros(self.param_shape),
                                                coef=1. / self.prior_variance)

        log_prior_loss_value = log_prior_loss.smooth_objective(
            sel_param, 'func')

        log_prior_loss_grad = log_prior_loss.smooth_objective(
            sel_param, 'grad')

        f = likelihood_loss_value + log_prior_loss_value + sel_prob_val

        g = likelihood_loss_grad + log_prior_loss_grad + optimizer

        if mode == 'func':
            return self.scale(f)
        elif mode == 'grad':
            return self.scale(g)
        elif mode == 'both':
            return self.scale(f), self.scale(g)
        else:
            raise ValueError("mode incorrectly specified")
Beispiel #4
0
 def set_prior(self, prior_variance):
     self.log_prior_loss = rr.signal_approximator(np.zeros(
         self.param_shape),
                                                  coef=1. / prior_variance)
Beispiel #5
0
 def set_likelihood(self, y, noise_variance, generative_X):
     likelihood_loss = rr.signal_approximator(y, coef=1. / noise_variance)
     self.likelihood_loss = rr.affine_smooth(likelihood_loss, generative_X)
Beispiel #6
0
import numpy as np
import pylab
from scipy import sparse

import regreg.api as R
Y = np.random.standard_normal(500)
Y[100:150] += 7
Y[250:300] += 14
loss = R.signal_approximator(Y)
sparsity = R.l1norm(len(Y), lagrange=0.8)
D = (np.identity(500) + np.diag([-1] * 499, k=1))[:-1]
D = sparse.csr_matrix(D)
fused = R.l1norm.linear(D, lagrange=25.5)
problem = R.container(loss, sparsity, fused)
solver = R.admm_problem(problem)
solver.fit(max_its=1000, tol=1e-8)
solution = solver.beta
pylab.plot(solution, c='g', linewidth=3)
pylab.scatter(np.arange(Y.shape[0]), Y)
Beispiel #7
0
import numpy as np
import pylab	
from scipy import sparse

import regreg.api as R
Y = np.random.standard_normal(500); Y[100:150] += 7; Y[250:300] += 14
loss = R.signal_approximator(Y)
sparsity = R.l1norm(len(Y), lagrange=0.8)
D = (np.identity(500) + np.diag([-1]*499,k=1))[:-1]
D = sparse.csr_matrix(D)
fused = R.l1norm.linear(D, lagrange=25.5)
problem = R.container(loss, sparsity, fused)
solver = R.admm_problem(problem)
solver.fit(max_its=1000, tol=1e-8)
solution = solver.beta
pylab.plot(solution, c='g', linewidth=3)	
pylab.scatter(np.arange(Y.shape[0]), Y)