Beispiel #1
0
    def __call__(self, A, Y, rng=np.random):
        import sklearn.linear_model
        tstart = time.time()
        Y = np.array(Y)  # copy since 'fit' may modify Y

        # TODO: play around with regularization constants (I just guessed).
        #   Do we need to scale regularization by number of neurons, to get
        #   same level of sparsity? esp. with weights? Currently, setting
        #   l1=1e-3 works well with weights when connecting 1D populations
        #   with 100 neurons each.
        a = self.l1 * A.max()      # L1 regularization
        b = self.l2 * A.max()**2   # L2 regularization
        alpha = a + b
        l1_ratio = a / (a + b)

        # --- solve least-squares A * X = Y
        model = sklearn.linear_model.ElasticNet(
            alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False,
            max_iter=self.max_iter)
        model.fit(A, Y)
        X = model.coef_.T
        X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],)
        t = time.time() - tstart
        infos = {'rmses': rmses(A, X, Y), 'time': t}
        return X, infos
Beispiel #2
0
    def __call__(self, A, Y, rng=None, E=None):
        tstart = time.time()
        Y, m, n, d, matrix_in = format_system(A, Y)

        # solve for coefficients using standard solver
        X, info0 = self.solver1(A, Y, rng=rng)
        X = self.mul_encoders(X, E)

        # drop weights close to zero, based on `drop` ratio
        Xabs = np.sort(np.abs(X.flat))
        threshold = Xabs[int(np.round(self.drop * Xabs.size))]
        X[np.abs(X) < threshold] = 0

        # retrain nonzero weights
        Y = self.mul_encoders(Y, E)
        for i in range(X.shape[1]):
            nonzero = X[:, i] != 0
            if nonzero.sum() > 0:
                X[nonzero, i], info1 = self.solver2(
                    A[:, nonzero], Y[:, i], rng=rng)

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'info0': info0, 'info1': info1,
                'time': t}
        return X if matrix_in else X.flatten(), info
Beispiel #3
0
 def __call__(self, A, Y, rng=np.random):
     tstart = time.time()
     X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond)
     t = time.time() - tstart
     return X, {'rmses': rmses(A, X, Y),
                'residuals': np.sqrt(residuals2),
                'rank': rank,
                'singular_values': s,
                'time': t}
Beispiel #4
0
 def _solve(self, A, Y, rng, E, sigma):
     tstart = time.time()
     # form Gram matrix so we can add regularization
     GA = np.dot(A.T, A)
     GY = np.dot(A.T, Y)
     np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
     X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E)
     t = time.time() - tstart
     # recompute the RMSE in terms of the original matrices
     info = {'rmses': rmses(A, X, Y), 'gram_info': info, 'time': t}
     return X, info
Beispiel #5
0
 def __call__(self, A, Y, rng=np.random):
     tstart = time.time()
     X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond)
     t = time.time() - tstart
     return X, {
         'rmses': rmses(A, X, Y),
         'residuals': np.sqrt(residuals2),
         'rank': rank,
         'singular_values': s,
         'time': t
     }
Beispiel #6
0
 def _solve(self, A, Y, rng, E, sigma):
     tstart = time.time()
     # form Gram matrix so we can add regularization
     GA = np.dot(A.T, A)
     GY = np.dot(A.T, Y)
     np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
     X, info = super(NnlsL2, self).__call__(GA, GY, rng=rng, E=E)
     t = time.time() - tstart
     # recompute the RMSE in terms of the original matrices
     info = {'rmses': rmses(A, X, Y), 'gram_info': info, 'time': t}
     return X, info
    def __call__(self, A, Y, rng=None, E=None):
        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)

        sigma = A.max() * self.reg  # magnitude of noise

        Y = self.mul_encoders(Y, E, copy=True)
        n_post = Y.shape[1]
        n_inh = int(self.p_inh * n)

        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
        GY = np.dot(A.T, Y)

        # flip the sign of the inhibitory neurons so we can do all
        #  the solving at once as a non-negative minimization
        GA[:, :n_inh] *= -1

        X = np.zeros((n, n_post))
        residuals = np.zeros(n_post)
        for j in range(n_post):
            if self.sparsity > 0:
                # choose random indices to keep
                N = GY.shape[0]
                S = N - int(N * self.sparsity)
                indices = rng.choice(np.arange(N), S, replace=False)
                sA = GA[indices, :][:, indices]
                sY = GY[indices, j]
            else:
                sA = GA
                sY = GY[:, j]
                indices = slice(None)

            # call nnls to do the non-negative least-squares minimization
            X[indices, j], residuals[j] = nnls(sA, sY)

        # flip the sign of the weights for the inhibitory neurons
        X[:n_inh, :] *= (-1)

        # compute the resulting rmse
        rms = rmses(A, X, Y)

        t = time.time() - tstart
        info = {
            'rmses': rms,
            'residuals': residuals / Y.shape[0],
            'time': t,
            'n_inh': n_inh
        }

        return X, info
Beispiel #8
0
 def __call__(self, A, Y, rng=np.random):
     tstart = time.time()
     X, residuals2, rank, s = np.linalg.lstsq(A, Y, rcond=self.rcond)
     t = time.time() - tstart
     return (
         X,
         {
             "rmses": rmses(A, X, Y),
             "residuals": np.sqrt(residuals2),
             "rank": rank,
             "singular_values": s,
             "time": t,
         },
     )
Beispiel #9
0
    def __call__(self, A, Y, rng=np.random):
        import scipy.optimize  # pylint: disable=import-outside-toplevel

        tstart = time.time()
        Y, _, n, _, matrix_in = format_system(A, Y)
        d = Y.shape[1]

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])

        t = time.time() - tstart
        info = {"rmses": rmses(A, X, Y), "residuals": residuals, "time": t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Beispiel #10
0
    def __call__(self, A, Y, rng=None, E=None):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, d, matrix_in = format_system(A, Y)
        Y = self.mul_encoders(Y, E)

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in else X.flatten(), info
Beispiel #11
0
    def __call__(self, A, Y, rng=np.random):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)
        d = Y.shape[1]

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Beispiel #12
0
    def __call__(self, A, Y, rng=None, E=None):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, d, matrix_in = format_system(A, Y)
        Y = self.mul_encoders(Y, E)

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(A, Y[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in else X.flatten(), info
Beispiel #13
0
    def _solve(self, A, Y, sigma=0.0):
        import scipy.optimize  # pylint: disable=import-outside-toplevel

        tstart = time.time()
        Y, _, n, _, matrix_in = format_system(A, Y)
        d = Y.shape[1]

        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
        GY = np.dot(A.T, np.maximum(Y, 0))
        # ^ TODO: why is it better if we clip Y to be positive here?

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(GA, GY[:, i])

        t = time.time() - tstart
        info = {"rmses": rmses(A, X, Y), "residuals": residuals, "time": t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Beispiel #14
0
    def _solve(self, A, Y, sigma=0.):
        import scipy.optimize

        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)
        d = Y.shape[1]

        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma**2)
        GY = np.dot(A.T, Y.clip(0, None))
        # ^ TODO: why is it better if we clip Y to be positive here?

        X = np.zeros((n, d))
        residuals = np.zeros(d)
        for i in range(d):
            X[:, i], residuals[i] = scipy.optimize.nnls(GA, GY[:, i])

        t = time.time() - tstart
        info = {'rmses': rmses(A, X, Y), 'residuals': residuals, 'time': t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Beispiel #15
0
    def __call__(self, A, Y, rng=np.random):
        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)

        # solve for coefficients using standard solver
        X, info0 = self.solver1(A, Y, rng=rng)

        # drop weights close to zero, based on `drop` ratio
        Xabs = np.sort(np.abs(X.flat))
        threshold = Xabs[int(np.round(self.drop * Xabs.size))]
        X[np.abs(X) < threshold] = 0

        # retrain nonzero weights
        for i in range(X.shape[1]):
            nonzero = X[:, i] != 0
            if nonzero.sum() > 0:
                X[nonzero, i], info1 = self.solver2(A[:, nonzero], Y[:, i], rng=rng)

        t = time.time() - tstart
        info = {"rmses": rmses(A, X, Y), "info0": info0, "info1": info1, "time": t}
        return X if matrix_in or X.shape[1] > 1 else X.ravel(), info
Beispiel #16
0
def test_accuracy():
    N = 200
    p_inh = 0.2
    sparsity = 0.5
    tols = [None, 1e-80, 1e-40, 1e-30, 1e-12, 1e-10, 1e-8, 1e-6, 1e-4, 1e-2]
    model = nengo.Network(seed=0)
    with model:
        a = nengo.Ensemble(n_neurons=N, dimensions=3, seed=1)
        b = nengo.Ensemble(n_neurons=N, dimensions=3, seed=2)
        cs = [nengo.Connection(a, b, solver=nengo_solver_dales.DalesL2(reg=0.1, p_inh=p_inh, sparsity=sparsity, tol=tol)) for tol in tols]
    sim = nengo.Simulator(model)

    import pylab
    x, a = nengo.utils.ensemble.tuning_curves(a, sim, inputs=sim.data[a].eval_points)
    enc = sim.data[b].scaled_encoders
    target = np.dot(x, enc.T)

    ws = [sim.data[c].weights for c in cs]
    ts = [sim.data[c].solver_info['time'] for c in cs]

    actuals = [np.dot(a, w.T) for w in ws]

    rms = [np.mean(rmses(a, w.T, target)) for w in ws]

    print(rms)
    print(ts)

    for i in range(len(tols)):
        pylab.subplot(1, len(tols), i+1)
        pylab.scatter(target, actuals[i], s=1)
        pylab.title(rms[i])

    pylab.figure()
    pylab.plot(rms)
    pylab.twinx()
    pylab.plot(ts)
    pylab.xticks(range(len(tols)), tols)
    pylab.show()
Beispiel #17
0
    def __call__(self, A, Y, rng=None, E=None):
        import sklearn.linear_model
        tstart = time.time()
        Y = self.mul_encoders(Y, E, copy=True)  # copy since 'fit' may modify Y

        # TODO: play around with regularization constants (I just guessed).
        #   Do we need to scale regularization by number of neurons, to get
        #   same level of sparsity? esp. with weights? Currently, setting
        #   l1=1e-3 works well with weights when connecting 1D populations
        #   with 100 neurons each.
        a = self.l1 * A.max()      # L1 regularization
        b = self.l2 * A.max()**2   # L2 regularization
        alpha = a + b
        l1_ratio = a / (a + b)

        # --- solve least-squares A * X = Y
        model = sklearn.linear_model.ElasticNet(
            alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False, max_iter=1000)
        model.fit(A, Y)
        X = model.coef_.T
        X.shape = (A.shape[1], Y.shape[1]) if Y.ndim > 1 else (A.shape[1],)
        t = time.time() - tstart
        infos = {'rmses': rmses(A, X, Y), 'time': t}
        return X, infos
Beispiel #18
0
    def __call__(self, A, Y, rng=None, E=None):
        tstart = time.time()
        Y, m, n, _, matrix_in = format_system(A, Y)
        
        sigma = A.max() * self.reg    # magnitude of noise

        Y = self.mul_encoders(Y, E, copy=True)
        n_post = Y.shape[1]
        n_inh = int(self.p_inh * n)

        # flip the sign of the inhibitory neurons so we can do all
        #  the solving at once as a non-negative minimization
        A[:, :n_inh] *= -1

        # form Gram matrix so we can add regularization
        GA = np.dot(A.T, A)
        np.fill_diagonal(GA, GA.diagonal() + A.shape[0] * sigma ** 2)
        GY = np.dot(A.T, Y)

        X = np.zeros((n, n_post))
        residuals = np.zeros(n_post)

        if self.multiprocess:
            pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
            args = []
            if self.sparsity > 0:
                all_indices = []
                for j in range(n_post):
                    N = GY.shape[0]
                    S = N - int(N*self.sparsity)
                    indices = rng.choice(np.arange(N), S, replace=False)
                    args.append((GA[indices, :][:, indices], GY[indices, j]))
                    all_indices.append(indices)
                r = pool.starmap(nnls, args)
                for j, (XX, res) in enumerate(r):
                    X[all_indices[j],j] = XX
                    residuals[j] = res
            else:
                for j in range(n_post):
                    args.append((GA, GY[:, j]))
                if self.tol is None:
                    r = pool.starmap(nnls, args)
                else:
                    args2 = [(a[0], a[1], self.tol) for a in args]
                    r2 = pool.starmap(nnls_predotted, args2)
                    r = [(rr, 0) for rr in r2]
                for j, (XX, res) in enumerate(r):
                    X[:,j] = XX
                    residuals[j] = res
        else:
            for j in range(n_post):
                if self.sparsity > 0:
                    # choose random indices to keep
                    N = GY.shape[0]
                    S = N - int(N*self.sparsity)
                    indices = rng.choice(np.arange(N), S, replace=False)
                    sA = GA[indices, :][:, indices]
                    sY = GY[indices, j]
                else:
                    sA = GA
                    sY = GY[:, j]
                    indices = slice(None)

                # call nnls to do the non-negative least-squares minimization
                if self.tol is None:
                    X[indices, j], residuals[j] = nnls(sA, sY)
                else:
                    X[indices, j] = nnls_predotted(sA, sY, self.tol)


        # flip the sign of the weights for the inhibitory neurons
        X[:n_inh, :] *= (-1)
        
        # compute the resulting rmse
        rms = rmses(A, X, Y)
        
        t = time.time() - tstart
        info = {'rmses': rms,
                'residuals': residuals/Y.shape[0],
                'time': t,
                'n_inh': n_inh}

        return X, info