Ejemplo n.º 1
0
    def __init__(self,
                 map,
                 generative_mean,
                 coef=1.,
                 offset=None,
                 quadratic=None):

        self.map = map
        self.q = map.p - map.nactive
        self.r = map.p + map.nactive
        self.p = map.p

        rr.smooth_atom.__init__(self, (2 * self.p, ),
                                offset=offset,
                                quadratic=quadratic,
                                initial=self.map.feasible_point,
                                coef=coef)

        self.coefs[:] = self.map.feasible_point

        opt_vars_0 = np.zeros(self.r, bool)
        opt_vars_0[self.p:] = 1
        opt_vars = np.append(opt_vars_0, np.ones(self.q, bool))

        opt_vars_active = np.append(opt_vars_0, np.zeros(self.q, bool))
        opt_vars_inactive = np.zeros(2 * self.p, bool)
        opt_vars_inactive[self.r:] = 1

        self._response_selector = rr.selector(~opt_vars, (2 * self.p, ))
        self._opt_selector_active = rr.selector(opt_vars_active,
                                                (2 * self.p, ))
        self._opt_selector_inactive = rr.selector(opt_vars_inactive,
                                                  (2 * self.p, ))

        nonnegative = nonnegative_softmax_scaled(self.map.nactive)
        self.nonnegative_barrier = nonnegative.linear(
            self._opt_selector_active)

        cube_objective = smooth_cube_barrier(self.map.inactive_lagrange)
        self.cube_barrier = rr.affine_smooth(cube_objective,
                                             self._opt_selector_inactive)

        linear_map = np.hstack(
            [self.map._score_linear_term, self.map._opt_linear_term])
        randomization_loss = log_likelihood(np.zeros(self.p),
                                            self.map.randomization_cov, self.p)
        self.randomization_loss = rr.affine_smooth(
            randomization_loss,
            rr.affine_transform(linear_map, self.map._opt_affine_term))

        likelihood_loss = log_likelihood(generative_mean, self.map.score_cov,
                                         self.p)

        self.likelihood_loss = rr.affine_smooth(likelihood_loss,
                                                self._response_selector)

        self.total_loss = rr.smooth_sum([
            self.randomization_loss, self.likelihood_loss,
            self.nonnegative_barrier, self.cube_barrier
        ])
    def sel_prob_smooth_objective(self, param, mode='both', check_feasibility=False):

        param = self.apply_offset(param)

        data = np.squeeze(self.t *  self.map.A)

        offset_active = self.map.offset_active + data[:self.map.nactive]
        offset_inactive = self.map.offset_inactive + data[self.map.nactive:]

        active_conj_loss = rr.affine_smooth(self.active_conjugate,
                                            rr.affine_transform(self.map.B_active, offset_active))

        cube_loss = neg_log_cube_probability_fs(self.q, offset_inactive, randomization_scale = self.map.randomization_scale)

        total_loss = rr.smooth_sum([active_conj_loss,
                                    cube_loss,
                                    self.nonnegative_barrier])

        if mode == 'func':
            f = total_loss.smooth_objective(param, 'func')
            return self.scale(f)
        elif mode == 'grad':
            g = total_loss.smooth_objective(param, 'grad')
            return self.scale(g)
        elif mode == 'both':
            f, g = total_loss.smooth_objective(param, 'both')
            return self.scale(f), self.scale(g)
        else:
            raise ValueError("mode incorrectly specified")
 def randomize(self, loss, epsilon=0):
     """
     Randomize the loss.
     """
     randomized_loss = rr.smooth_sum([loss])
     _randomZ = self.sample()
     randomized_loss.quadratic = rr.identity_quadratic(
         epsilon, 0, -_randomZ, 0)
     return randomized_loss
 def randomize(self, loss, epsilon=0, perturb=None):
     """
     Randomize the loss.
     """
     randomized_loss = rr.smooth_sum([loss])
     if perturb is None:
         perturb = self.sample()
     randomized_loss.quadratic = rr.identity_quadratic(
         epsilon, 0, -perturb, 0)
     return randomized_loss, perturb
Ejemplo n.º 5
0
    def __init__(self,
                 y,
                 grad_map,
                 prior_variance,
                 coef=1.,
                 offset=None,
                 quadratic=None,
                 nstep=10):

        generative_X = grad_map.generative_X
        self.param_shape = generative_X.shape[1]

        y = np.squeeze(y)

        E_1 = grad_map.E_1

        E_2 = grad_map.E_2

        self.E = E_2

        self.generative_X = grad_map.generative_X

        initial = np.zeros(self.E)

        #initial[:E_1] = np.squeeze(grad_map.feasible_point[:E_1]* grad_map.active_signs_1[None,:])

        initial = np.squeeze(grad_map.feasible_point[E_1:] *
                             grad_map.active_signs_2[None, :])

        rr.smooth_atom.__init__(self, (self.param_shape, ),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = initial

        noise_variance = grad_map.noise_variance

        self.set_likelihood(y, noise_variance, generative_X)

        self.set_prior(prior_variance)

        self.initial_state = initial

        self.total_loss = rr.smooth_sum(
            [self.likelihood_loss, self.log_prior_loss, grad_map])
Ejemplo n.º 6
0
    def __init__(self,
                 y,
                 grad_map,
                 prior_variance,
                 coef=1.,
                 offset=None,
                 quadratic=None,
                 nstep=10):

        generative_X = grad_map.generative_X
        self.param_shape = generative_X.shape[1]

        y = np.squeeze(y)

        self.E = grad_map.E

        self.generative_X = grad_map.generative_X

        initial = np.zeros(self.E)

        rr.smooth_atom.__init__(self,
                                (self.param_shape,),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = initial

        noise_variance = grad_map.noise_variance

        self.set_likelihood(y, noise_variance, generative_X)

        self.set_prior(prior_variance)

        self.initial_state = initial

        self.total_loss_0 = rr.smooth_sum([self.likelihood_loss,
                                           self.log_prior_loss,
                                           grad_map])
Ejemplo n.º 7
0
    def __init__(
            self,
            X,
            feasible_point,
            active,  # the active set chosen by randomized lasso
            active_sign,  # the set of signs of active coordinates chosen by lasso
            lagrange,  # in R^p
            mean_parameter,  # in R^n
            noise_variance,  #noise_level in data
            randomizer,  #specified randomization
            epsilon,  # ridge penalty for randomized lasso
            coef=1.,
            offset=None,
            quadratic=None,
            nstep=10):

        n, p = X.shape

        self._X = X

        E = active.sum()
        self.q = p - E

        self.active = active
        self.noise_variance = noise_variance
        self.randomization = randomizer
        self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
        if self.active_conjugate is None:
            raise ValueError(
                'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates'
            )

        initial = np.zeros(n + E, )
        initial[n:] = feasible_point
        self.n = n

        rr.smooth_atom.__init__(self, (n + E, ),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = initial

        opt_vars = np.zeros(n + E, bool)
        opt_vars[n:] = 1

        nonnegative = nonnegative_softmax_scaled(E)

        self._opt_selector = rr.selector(opt_vars, (n + E, ))
        self.nonnegative_barrier = nonnegative.linear(self._opt_selector)
        self._response_selector = rr.selector(~opt_vars, (n + E, ))

        self.set_parameter(mean_parameter, noise_variance)

        X_E = X[:, active]
        B = X.T.dot(X_E)

        B_E = B[active]
        B_mE = B[~active]

        self.A_active = np.hstack([
            -X[:, active].T,
            (B_E + epsilon * np.identity(E)) * active_sign[None, :]
        ])

        self.A_inactive = np.hstack(
            [-X[:, ~active].T, (B_mE * active_sign[None, :])])

        self.offset_active = active_sign * lagrange[active]

        self.offset_inactive = np.zeros(p - E)

        self.active_conj_loss = rr.affine_smooth(
            self.active_conjugate,
            rr.affine_transform(self.A_active, self.offset_active))

        cube_obj = neg_log_cube_probability(self.q,
                                            lagrange[~active],
                                            randomization_scale=1.)

        self.cube_loss = rr.affine_smooth(cube_obj, self.A_inactive)

        self.total_loss = rr.smooth_sum([
            self.active_conj_loss, self.cube_loss, self.likelihood_loss,
            self.nonnegative_barrier
        ])
Ejemplo n.º 8
0
    def __init__(self,
                 X,
                 feasible_point,
                 active,  # the active set chosen by randomized marginal screening
                 active_signs,  # the set of signs of active coordinates chosen by ms
                 threshold,  # in R^p
                 mean_parameter,
                 noise_variance,
                 randomizer,
                 coef=1.,
                 offset=None,
                 quadratic=None,
                 nstep=10):

        n, p = X.shape
        self._X = X

        E = active.sum()
        self.q = p - E
        sigma = np.sqrt(noise_variance)

        self.active = active

        self.noise_variance = noise_variance
        self.randomization = randomizer
        self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
        if self.active_conjugate is None:
            raise ValueError(
                'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates')

        initial = np.zeros(n + E, )
        initial[n:] = feasible_point
        self.n = n

        rr.smooth_atom.__init__(self,
                                (n + E,),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = initial
        nonnegative = nonnegative_softmax_scaled(E)

        opt_vars = np.zeros(n + E, bool)
        opt_vars[n:] = 1

        self._opt_selector = rr.selector(opt_vars, (n + E,))
        self.nonnegative_barrier = nonnegative.linear(self._opt_selector)
        self._response_selector = rr.selector(~opt_vars, (n + E,))

        self.set_parameter(mean_parameter, noise_variance)

        self.A_active = np.hstack([np.true_divide(-X[:, active].T, sigma), np.identity(E) * active_signs[None, :]])

        self.A_inactive = np.hstack([np.true_divide(-X[:, ~active].T, sigma), np.zeros((p - E, E))])

        self.offset_active = active_signs * threshold[active]
        self.offset_inactive = np.zeros(p - E)

        self.active_conj_loss = rr.affine_smooth(self.active_conjugate,
                                                 rr.affine_transform(self.A_active, self.offset_active))

        cube_obj = neg_log_cube_probability(self.q, threshold[~active], randomization_scale=1.)

        self.cube_loss = rr.affine_smooth(cube_obj, rr.affine_transform(self.A_inactive, self.offset_inactive))

        self.total_loss = rr.smooth_sum([self.active_conj_loss,
                                         self.cube_loss,
                                         self.likelihood_loss,
                                         self.nonnegative_barrier])
Ejemplo n.º 9
0
    def __init__(
            self,
            X,
            feasible_point,  #in R^{|E|_1 + |E|_2}
            active_1,  #the active set chosen by randomized marginal screening
            active_2,  #the active set chosen by randomized lasso
            active_signs_1,  #the set of signs of active coordinates chosen by ms
            active_signs_2,  #the set of signs of active coordinates chosen by lasso
            lagrange,  #in R^p
            threshold,  #in R^p
            mean_parameter,  # in R^n
            noise_variance,
            randomizer,
            epsilon,  #ridge penalty for randomized lasso
            coef=1.,
            offset=None,
            quadratic=None,
            nstep=10):

        n, p = X.shape
        self._X = X

        E_1 = active_1.sum()
        E_2 = active_2.sum()

        sigma = np.sqrt(noise_variance)

        self.active_1 = active_1
        self.active_2 = active_2
        self.noise_variance = noise_variance
        self.randomization = randomizer
        self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
        if self.active_conjugate is None:
            raise ValueError(
                'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates'
            )

        initial = np.zeros(n + E_1 + E_2, )
        initial[n:] = feasible_point
        self.n = n

        rr.smooth_atom.__init__(self, (n + E_1 + E_2, ),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = initial
        nonnegative = nonnegative_softmax_scaled(E_1 + E_2)
        opt_vars = np.zeros(n + E_1 + E_2, bool)
        opt_vars[n:] = 1

        self._opt_selector = rr.selector(opt_vars, (n + E_1 + E_2, ))
        self.nonnegative_barrier = nonnegative.linear(self._opt_selector)
        self._response_selector = rr.selector(~opt_vars, (n + E_1 + E_2, ))

        self.set_parameter(mean_parameter, noise_variance)

        arg_ms = np.zeros(self.n + E_1 + E_2, bool)
        arg_ms[:self.n + E_1] = 1
        arg_lasso = np.zeros(self.n + E_1, bool)
        arg_lasso[:self.n] = 1
        arg_lasso = np.append(arg_lasso, np.ones(E_2, bool))

        self.A_active_1 = np.hstack([
            np.true_divide(-X[:, active_1].T, sigma),
            np.identity(E_1) * active_signs_1[None, :]
        ])

        self.A_inactive_1 = np.hstack([
            np.true_divide(-X[:, ~active_1].T, sigma),
            np.zeros((p - E_1, E_1))
        ])

        self.offset_active_1 = active_signs_1 * threshold[active_1]
        self.offset_inactive_1 = np.zeros(p - E_1)

        self._active_ms = rr.selector(
            arg_ms, (self.n + E_1 + E_2, ),
            rr.affine_transform(self.A_active_1, self.offset_active_1))

        self._inactive_ms = rr.selector(
            arg_ms, (self.n + E_1 + E_2, ),
            rr.affine_transform(self.A_inactive_1, self.offset_inactive_1))

        self.active_conj_loss_1 = rr.affine_smooth(self.active_conjugate,
                                                   self._active_ms)

        self.q_1 = p - E_1

        cube_obj_1 = neg_log_cube_probability(self.q_1,
                                              threshold[~active_1],
                                              randomization_scale=1.)

        self.cube_loss_1 = rr.affine_smooth(cube_obj_1, self._inactive_ms)

        X_step2 = X[:, active_1]
        X_E_2 = X_step2[:, active_2]
        B = X_step2.T.dot(X_E_2)

        B_E = B[active_2]
        B_mE = B[~active_2]

        self.A_active_2 = np.hstack([
            -X_step2[:, active_2].T,
            (B_E + epsilon * np.identity(E_2)) * active_signs_2[None, :]
        ])
        self.A_inactive_2 = np.hstack(
            [-X_step2[:, ~active_2].T, (B_mE * active_signs_2[None, :])])

        self.offset_active_2 = active_signs_2 * lagrange[active_2]

        self.offset_inactive_2 = np.zeros(E_1 - E_2)

        self._active_lasso = rr.selector(
            arg_lasso, (self.n + E_1 + E_2, ),
            rr.affine_transform(self.A_active_2, self.offset_active_2))

        self._inactive_lasso = rr.selector(
            arg_lasso, (self.n + E_1 + E_2, ),
            rr.affine_transform(self.A_inactive_2, self.offset_inactive_2))

        self.active_conj_loss_2 = rr.affine_smooth(self.active_conjugate,
                                                   self._active_lasso)

        self.q_2 = E_1 - E_2

        cube_obj_2 = neg_log_cube_probability(self.q_2,
                                              lagrange[~active_2],
                                              randomization_scale=1.)

        self.cube_loss_2 = rr.affine_smooth(cube_obj_2, self._inactive_lasso)

        self.total_loss = rr.smooth_sum([
            self.active_conj_loss_1, self.active_conj_loss_2, self.cube_loss_1,
            self.cube_loss_2, self.likelihood_loss, self.nonnegative_barrier
        ])
    def __init__(self,
                 map,
                 generative_mean,
                 coef=1.,
                 offset=None,
                 quadratic=None):

        self.map = map
        self.q = map.p - map.nactive
        self.r = map.p + map.nactive
        self.p = map.p

        self.inactive_conjugate = self.active_conjugate = map.randomization.CGF_conjugate

        if self.active_conjugate is None:
            raise ValueError(
                'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates')

        self.inactive_lagrange = self.map.inactive_lagrange

        rr.smooth_atom.__init__(self,
                                (self.r,),
                                offset=offset,
                                quadratic=quadratic,
                                initial=self.map.feasible_point,
                                coef=coef)

        self.coefs[:] = self.map.feasible_point

        nonnegative = nonnegative_softmax_scaled(self.map.nactive)

        opt_vars = np.zeros(self.r, bool)
        opt_vars[map.p:] = 1

        self._opt_selector = rr.selector(opt_vars, (self.r,))
        self._response_selector = rr.selector(~opt_vars, (self.r,))

        self.nonnegative_barrier = nonnegative.linear(self._opt_selector)

        self.active_conj_loss = rr.affine_smooth(self.active_conjugate,
                                                 rr.affine_transform(np.hstack([self.map.A_active, self.map.B_active]),
                                                                     self.map.offset_active))

        cube_obj = neg_log_cube_probability(self.q, self.inactive_lagrange, randomization_scale=1.)
        self.cube_loss = rr.affine_smooth(cube_obj, np.hstack([self.map.A_inactive, self.map.B_inactive]))

        # w_1, v_1 = np.linalg.eig(self.map.score_cov)
        # self.score_cov_inv_half = (v_1.T.dot(np.diag(np.power(w_1, -0.5)))).dot(v_1)
        # likelihood_loss = rr.signal_approximator(np.squeeze(np.zeros(self.p)), coef=1.)
        # scaled_response_selector = rr.selector(~opt_vars, (self.r,), rr.affine_transform(self.score_cov_inv_half,
        #                                                                                  self.score_cov_inv_half.
        #                                                                                  dot(np.squeeze(generative_mean))))
        #print("cov", self.map.score_cov.shape )
        likelihood_loss = log_likelihood(generative_mean, self.map.score_cov, self.p)

        self.likelihood_loss = rr.affine_smooth(likelihood_loss, self._response_selector)

        self.total_loss = rr.smooth_sum([self.active_conj_loss,
                                         self.likelihood_loss,
                                         self.nonnegative_barrier,
                                         self.cube_loss])
Ejemplo n.º 11
0
    def __init__(
            self,
            X,
            feasible_point,
            active,  # the active set chosen by randomized lasso
            active_sign,  # the set of signs of active coordinates chosen by lasso
            lagrange,  # in R^p
            mean_parameter,  # in R^n
            noise_variance,  # noise_level in data
            randomizer,  # specified randomization
            epsilon,  # ridge penalty for randomized lasso
            coef=1.,
            offset=None,
            quadratic=None,
            nstep=10):

        n, p = X.shape
        E = active.sum()
        self._X = X
        self.active = active
        self.noise_variance = noise_variance
        self.randomization = randomizer

        self.CGF_randomization = randomizer.CGF

        if self.CGF_randomization is None:
            raise ValueError(
                'randomization must know its cgf -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates'
            )

        self.inactive_lagrange = lagrange[~active]

        initial = feasible_point

        self.feasible_point = feasible_point

        rr.smooth_atom.__init__(self, (p, ),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = feasible_point

        mean_parameter = np.squeeze(mean_parameter)

        self.active = active

        X_E = self.X_E = X[:, active]
        self.X_permute = np.hstack([self.X_E, self._X[:, ~active]])
        B = X.T.dot(X_E)

        B_E = B[active]
        B_mE = B[~active]

        self.active_slice = np.zeros_like(active, np.bool)
        self.active_slice[:active.sum()] = True

        self.B_active = np.hstack([
            (B_E + epsilon * np.identity(E)) * active_sign[None, :],
            np.zeros((E, p - E))
        ])
        self.B_inactive = np.hstack(
            [B_mE * active_sign[None, :],
             np.identity((p - E))])
        self.B_p = np.vstack((self.B_active, self.B_inactive))

        self.B_p_inv = np.linalg.inv(self.B_p.T)

        self.offset_active = active_sign * lagrange[active]
        self.inactive_subgrad = np.zeros(p - E)

        self.cube_bool = np.zeros(p, np.bool)

        self.cube_bool[E:] = 1

        self.dual_arg = self.B_p_inv.dot(
            np.append(self.offset_active, self.inactive_subgrad))

        self._opt_selector = rr.selector(~self.cube_bool, (p, ))

        self.set_parameter(mean_parameter, noise_variance)

        _barrier_star = barrier_conjugate_softmax_scaled_rr(
            self.cube_bool, self.inactive_lagrange)

        self.conjugate_barrier = rr.affine_smooth(_barrier_star,
                                                  np.identity(p))

        self.CGF_randomizer = rr.affine_smooth(self.CGF_randomization,
                                               -self.B_p_inv)

        self.constant = np.true_divide(mean_parameter.dot(mean_parameter),
                                       2 * noise_variance)

        self.linear_term = rr.identity_quadratic(0, 0, self.dual_arg,
                                                 -self.constant)

        self.total_loss = rr.smooth_sum([
            self.conjugate_barrier, self.CGF_randomizer, self.likelihood_loss
        ])

        self.total_loss.quadratic = self.linear_term
Ejemplo n.º 12
0
    def __init__(self,
                 X,
                 feasible_point,
                 active,
                 active_sign,
                 mean_parameter,  # in R^n
                 noise_variance,
                 randomizer,
                 coef=1.,
                 offset=None,
                 quadratic=None,
                 nstep=10):


        self.n, p = X.shape
        E = 1
        self.q = p-1
        self._X = X
        self.active = active
        self.noise_variance = noise_variance
        self.randomization = randomizer

        self.inactive_conjugate = self.active_conjugate = randomizer.CGF_conjugate
        if self.active_conjugate is None:
            raise ValueError(
                'randomization must know its CGF_conjugate -- currently only isotropic_gaussian and laplace are implemented and are assumed to be randomization with IID coordinates')

        initial = np.zeros(self.n + E, )
        initial[self.n:] = feasible_point

        rr.smooth_atom.__init__(self,
                                (self.n + E,),
                                offset=offset,
                                quadratic=quadratic,
                                initial=initial,
                                coef=coef)

        self.coefs[:] = initial

        nonnegative = nonnegative_softmax_scaled(E)

        opt_vars = np.zeros(self.n + E, bool)
        opt_vars[self.n:] = 1

        self._opt_selector = rr.selector(opt_vars, (self.n + E,))
        self._response_selector = rr.selector(~opt_vars, (self.n + E,))

        self.nonnegative_barrier = nonnegative.linear(self._opt_selector)

        sign = np.zeros((1, 1))
        sign[0:, :] = active_sign
        self.A_active = np.hstack([-X[:, active].T, sign])
        self.active_conj_loss = rr.affine_smooth(self.active_conjugate, self.A_active)

        self.A_in_1 = np.hstack([-X[:, ~active].T, np.zeros((p - 1, 1))])
        self.A_in_2 = np.hstack([np.zeros((self.n, 1)).T, np.ones((1, 1))])
        self.A_inactive = np.vstack([self.A_in_1, self.A_in_2])

        cube_loss = neg_log_cube_probability_fs(self.q, p)
        self.cube_loss = rr.affine_smooth(cube_loss, self.A_inactive)

        self.set_parameter(mean_parameter, noise_variance)

        self.total_loss = rr.smooth_sum([self.active_conj_loss,
                                         self.cube_loss,
                                         self.likelihood_loss,
                                         self.nonnegative_barrier])