Ejemplo n.º 1
0
    def fun(W):
        # glue W and inds together
        glued_together = np.concatenate((W, inds), axis=1)

        # separate W and inds back out
        new_W = W[:, :-1]
        new_inds = np.int64(W[:, -1])

        assert new_inds.dtype == np.int64
        return new_W[new_inds].sum()
Ejemplo n.º 2
0
    def fun(W):
        # glue W and inds together
        glued_together = np.concatenate((W, inds), axis=1)

        # separate W and inds back out
        new_W = W[:,:-1]
        new_inds = np.int64(W[:,-1])

        assert new_inds.dtype == np.int64
        return new_W[new_inds].sum()
Ejemplo n.º 3
0
 def fun(W):
     W = np.concatenate((W, inds), axis=1)
     W = W[:,:-1]
     return W[np.int64(W[:,-1])].sum()
Ejemplo n.º 4
0
    def fit(self):
        group = self.group
        lambdas = self.reg_lambda
        X = self.xs
        y = self.ys
        np.random.RandomState(0)
        group = np.array(group)
        group.dtype = np.int64
        if group.shape[0] != X.shape[1]:
            raise ValueError('group should be (n_features,)')

        # type check for data matrix
        if not isinstance(X, np.ndarray):
            raise ValueError('Input data should be of type ndarray (got %s).' %
                             type(X))

        n_features = np.float(X.shape[1])
        n_features = np.int64(n_features)
        if y.ndim == 1:
            y = y[:, np.newaxis]

        n_classes = 1
        n_classes = np.int64(n_classes)

        # Initialize parameters
        beta_hat = 1 / (n_features) * np.random.normal(0.0, 1.0,
                                                       [n_features, n_classes])
        #print('betahat 0,1', beta_hat[0:2])
        fit_params = list()

        for l, rl in enumerate(lambdas):
            #print(l, rl)
            fit_params.append({'beta': beta_hat})

            # Warm initialize parameters
            if l == 0:
                fit_params[-1]['beta'] = beta_hat
            else:
                fit_params[-1]['beta'] = fit_params[-2]['beta']

            tol = self.tol
            alpha = 1.

            # Temporary parameters to update
            beta = np.zeros([n_features, n_classes])
            beta = fit_params[-1]['beta']
            g = np.zeros([n_features, n_classes])

            # Initialize loss accumulators
            L, DL = list(), list()
            #pdb.set_trace()
            for t in range(0, self.max_iter):
                #print(t)
                grad_beta = self._grad_L2loss(beta=beta,
                                              reg_lambda=rl,
                                              X=X,
                                              y=y)
                #print('before grad',beta[0:2])
                beta = beta - self.learning_rate * grad_beta
                #beta = beta - self.learning_rate * (1 / np.sqrt(t + 1)) * g
                #print('after grad',beta[0:2])
                beta = self._prox(beta, rl * self.learning_rate)
                #print('after prox',beta[0:2])
                #print(beta[0])
                L.append(self._loss(beta, rl, X, y))
                #print(self._loss(beta, rl, X, y))
                if t > 1:
                    if (L[-1] > L[-2]):
                        break
                    DL.append(L[-1] - L[-2])
                    #print('DL, L-1, L-2',DL, L[-1], L[-2])
                    if np.abs(DL[-1] / L[-1]) < tol:
                        print('converged', rl)
                        msg = ('\tConverged. Loss function:'
                               ' {0:.2f}').format(L[-1])
                        msg = ('\tdL/L: {0:.6f}\n'.format(DL[-1] / L[-1]))
                        break
            #print(beta)
            fit_params[-1]['beta'] = beta
            self.lossresults[rl] = L
            self.dls[rl] = DL
            #print(L)
        # Update the estimated variables

        self.fit_ = fit_params
        self.ynull_ = np.mean(y)

        # Return
        return self
Ejemplo n.º 5
0
 def fun(W):
     W = np.concatenate((W, inds), axis=1)
     W = W[:, :-1]
     return W[np.int64(W[:, -1])].sum()
Ejemplo n.º 6
0
    def fit(self):

        group = self.group
        print(group.shape)
        lambdas = self.reg_lambda
        parameter = self.parameter
        X = self.xs
        #print(X.shape)
        y = self.ys

        np.random.RandomState(0)
        group = np.asarray(group, dtype=np.int64)

        #print(group.shape[0])
        group.dtype = np.int64
        #print(group.shape[0])
        #print(X.shape[1])
        if group.shape[0] != X.shape[1]:
            raise ValueError('group should be (n_features,)')

        # type check for data matrix
        if not isinstance(X, np.ndarray):
            raise ValueError('Input data should be of type ndarray (got %s).' %
                             type(X))

        n_features = np.float(X.shape[1])
        n_features = np.int64(n_features)
        if y.ndim == 1:
            y = y[:, np.newaxis]
            self.ys = y
        #print(y.shape)
        n_classes = 1
        n_classes = np.int64(n_classes)

        beta_hat = 1 / (n_features) * np.random.normal(0.0, 1.0,
                                                       [n_features, n_classes])
        fit_params = list()

        for l, rl in enumerate(lambdas):
            fit_params.append({'beta': beta_hat})
            if l == 0:
                fit_params[-1]['beta'] = beta_hat
            else:
                fit_params[-1]['beta'] = fit_params[-2]['beta']
            tol = self.tol
            alpha = 1.
            beta = np.zeros([n_features, n_classes])
            beta = fit_params[-1]['beta']
            #print('losser',self._L2loss(beta,X,y))
            g = np.zeros([n_features, n_classes])
            L, DL, L2, PEN = list(), list(), list(), list()
            lamb = self.learning_rate
            bm1 = beta.copy()
            bm2 = beta.copy()
            for t in range(0, self.max_iter):
                L.append(self._loss(beta, rl, X, y))
                L2.append(self._L2loss(beta, X, y))
                PEN.append(self._L1penalty(beta))
                w = (t / (t + 3))
                yk = beta + w * (bm1 - bm2)
                #print('losser',self._L2loss(yk,X,y))
                #print('beforebt',np.linalg.norm(yk),np.linalg.norm(X),np.linalg.norm(y))
                beta, lamb = self._btalgorithm(yk, lamb, .5, 1000, rl)
                #X = self.xs
                #y = self.ys
                #print('losser2',self._L2loss(beta,X,y))
                bm2 = bm1.copy()
                bm1 = beta.copy()
                if t > 1:
                    DL.append(L[-1] - L[-2])
                    if np.abs(DL[-1] / L[-1]) < tol:
                        print('converged', rl)
                        msg = ('\tConverged. Loss function:'
                               ' {0:.2f}').format(L[-1])
                        msg = ('\tdL/L: {0:.6f}\n'.format(DL[-1] / L[-1]))
                        break

            #print(beta)
            fit_params[-1]['beta'] = beta
            self.lossresults[rl] = L
            self.l2loss[rl] = L2
            self.penalty[rl] = PEN
            self.dls[rl] = DL
            #print(L)
        # Update the estimated variables

        self.fit_ = fit_params
        self.ynull_ = np.mean(y)

        # Return
        return self