Ejemplo n.º 1
0
    def __call__(self, A, Y, rng=np.random, E=None):
        import sklearn.linear_model
        tstart = time.time()
        Y = self.mul_encoders(Y, E, copy=True)  # copy since 'fit' may modify Y

        # TODO: play around with regularization constants (I just guessed).
        #   Do we need to scale regularization by number of agents, to get
        #   same level of sparsity? esp. with weights? Currently, setting
        #   l1=1e-3 works well with weights when connecting 1D populations
        #   with 100 agents each.
        a = self.l1 * A.max()  # L1 regularization
        b = self.l2 * A.max()**2  # L2 regularization
        alpha = a + b
        l1_ratio = a / (a + b)

        # --- solve least-squares A * X = Y
        model = sklearn.linear_model.ElasticNet(alpha=alpha,
                                                l1_ratio=l1_ratio,
                                                fit_intercept=False,
                                                max_iter=self.max_iter)
        model.fit(A, Y)
        X = model.coef_.T
        X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1], )
        t = time.time() - tstart
        infos = {'rmses': rmses(A, X, Y), 'time': t}
        return X, infos
Ejemplo n.º 2
0
    def __call__(self, A, Y, rng=np.random, E=None):
        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)

        # solve for coefficients using standard solver
        X, info0 = self.solver1(A, Y, rng=rng)
        X = self.mul_encoders(X, E)

        # drop weights close to zero, based on `drop` ratio
        Xabs = np.sort(np.abs(X.flat))
        threshold = Xabs[int(np.round(self.drop * Xabs.size))]
        X[np.abs(X) < threshold] = 0

        # retrain nonzero weights
        Y = self.mul_encoders(Y, E)
        for i in range(X.shape[1]):
            nonzero = X[:, i] != 0
            if nonzero.sum() > 0:
                X[nonzero, i], info1 = self.solver2(A[:, nonzero],
                                                    Y[:, i],
                                                    rng=rng)

        t = time.time() - tstart
        info = {
            'rmses': rmses(A, X, Y),
            'info0': info0,
            'info1': info1,
            'time': t
        }
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Ejemplo n.º 3
0
 def __call__(self, A, Y, rng=np.random, E=None):
     tstart = time.time()
     Y = self.mul_encoders(Y, E)
     X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond)
     t = time.time() - tstart
     return X, {
         'rmses': rmses(A, X, Y),
         'residuals': np.sqrt(residuals2),
         'rank': rank,
         'singular_values': s,
         'time': t
     }
Ejemplo n.º 4
0
    def __call__(self, A, Y, rng=np.random, E=None):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)
        Y = self.mul_encoders(Y, E, copy=True)
        d = Y.shape[1]

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Ejemplo n.º 5
0
    def _solve(self, A, Y, rng, E, sigma=0.):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)
        Y = self.mul_encoders(Y, E, copy=True)
        d = Y.shape[1]

        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
        GY = np.dot(A.T, Y.clip(0, None))
        # ^ TODO: why is it better if we clip Y to be positive here?

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(GA, GY[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info