Ejemplo n.º 1
0
def test(n=10):
    """ 
    the model to be optimized here is a quadratic form
    f(x) = 1/2 (Ax-a)^2, the gradient is A'(Ax-a). Find parameters x given data A, a
    """
    a = np.random.randn(n)
    x = np.abs(np.random.randn(n)*3)
    A = np.random.randn(n,n)
    A = np.dot(A.T, A)
    A = np.eye(n) + A
    
    print 'Eigenvalues: ', np.linalg.eigvals(A)

    args  = (A, a) # parameters of the model to optimize: plug in PCA data here.
    lam   = .1 # this is a sparseness parameter?!
    m     = 8 #would increase this to the dimension of problem to 
    maxit = 300
    obj   = Obj(f, args) # creating an instantiation of 
    # to call qn.owLBFGS need to package the function to optimize in an object.
    q = qn.owlbfgs(obj, n, lam=lam * np.ones((n)),
                   debug=True, maxit=maxit, m=m, pos=True) # orthant wise LBFGS, dimensions, lambdas, iterations...
    ret = q.run(x) # q is a c function, no idea what this does

    b = np.dot(np.linalg.inv(A), a) # true minimumu
    print 'True min: ', f(b, np.zeros_like(x), (A, a))    
    print 'True argmin: ', b
    print 'Initial value: ', x
    print 'My solution: ', ret
    print 'Error: ', np.linalg.norm(ret-b)
Ejemplo n.º 2
0
def sparseqn_batch(phi,
                   X,
                   lam=1.,
                   maxit=25,
                   positive=False,
                   Sin=None,
                   debug=False,
                   delta=0.01,
                   past=6,
                   mask=None):
    """
    phi       - basis
    X         - array of batches
    maxit     - maximum quasi-newton steps
    positive  - positive only coefficients
    Sin       - warm start for coefficients
    debug     - print debug info
    delta     - sufficient decrease condition
    past
    mask      - an array of dims of coefficients used
                to mask derivative
    
    Sin is not modified
    """
    C, N, P = phi.shape
    npats = X.shape[0]
    T = X.shape[2]
    alen = T + P - 1

    # instantiate objective class
    obj = Objective(phi, X, mask)

    # warm start values
    if Sin is not None:
        A = Sin.copy()
    else:
        A = np.zeros((npats, N, alen))

    lam = lam * np.ones(N * alen)

    # don't regularized coefficients that are masked out
    if mask is not None: lam *= mask.flatten()

    q = qn.owlbfgs(obj,
                   N * alen,
                   lam=lam,
                   debug=debug,
                   maxit=maxit,
                   delta=delta,
                   past=past,
                   pos=positive)

    for i in range(npats):
        q.clear()
        A[i] = q.run(A[i].flatten()).reshape(A[i].shape)
        obj.indx += 1

    return A
Ejemplo n.º 3
0
  def sparseqn_batch(self, phi, X, lam=1., maxit=25,
                     positive=False, Sin=None, debug=True,
                     delta=0.01, past=6, mask=None):
      """Use quasinewton method to infer coefficients.

      Parameters
      ----------
      phi : 3d array
        Basis
      X : 2d array
        Patch
      lam : float
        L1 penalty
      maxit : int
        Maximum number of quasi-newton iterations
      positive : bool
        If True, only allow positive coefficients
      Sin : 2d array
        Starting value for coefficients; if None, zeros are used.
      debug : bool
        Print debugging information
      delta : int
        ?????
      past : int
        ?????
      mask : 2d array
        An array of dims of coefficients used to mask derivative.

      Returns
      -------
      2d array
        Coefficients for this dataset and this basis.
      """
      C, N, P = phi.shape
      npats = X.shape[0]
      T = X.shape[-1]
      alen = T + P - 1
      A = Sin if Sin else np.zeros((N,alen))
      lam = lam * np.ones(N*alen)

      # don't regularized coefficients that are masked out
      if mask is not None: lam *= mask.flatten()

      # instantiate objective class
      obj = Owlbfgs.Objective(phi, X, mask)

      q = qn.owlbfgs(obj, N*alen, lam=lam,
                     debug=debug, maxit=maxit, delta=delta,
                     past=past, pos=positive)

      A = q.run(A.flatten()).reshape(N, alen)
      return A
Ejemplo n.º 4
0
def sparseqn_batch(phi, X, lam=1., maxit=25,
                   positive=False, Sin=None, debug=False,
                   delta=0.01, past=6, mask=None):
    """
    phi       - basis
    X         - array of batches
    maxit     - maximum quasi-newton steps
    positive  - positive only coefficients
    Sin       - warm start for coefficients
    debug     - print debug info
    delta     - sufficient decrease condition
    past
    mask      - an array of dims of coefficients used
                to mask derivative
    
    Sin is not modified
    """
    C, N, P = phi.shape
    npats = X.shape[0]
    T = X.shape[2]
    alen = T + P - 1

    # instantiate objective class
    obj = Objective(phi, X, mask)

    # warm start values
    if Sin is not None:
        A = Sin.copy()
    else:
        A = np.zeros((npats, N, alen))

    lam = lam * np.ones(N*alen)

    # don't regularized coefficients that are masked out
    if mask is not None: lam *= mask.flatten()
    
    q = qn.owlbfgs(obj, N*alen, lam=lam,
                   debug=debug, maxit=maxit, delta=delta,
                   past=past, pos=positive)

    for i in range(npats):
        q.clear()
        A[i] = q.run(A[i].flatten()).reshape(A[i].shape)
        obj.indx += 1
        
    return A
Ejemplo n.º 5
0
def test(n):
    display = True if n == 2 else False
    a = np.random.randn(n)
    x = np.abs(np.random.randn(n) * 3)
    A = np.random.randn(n, n)
    A = np.dot(A.T, A)
    A = np.eye(n) + A

    A = power(n, 2)
    # A = np.eye(n)

    print "Eigenvalues: ", np.linalg.eigvals(A)

    args = (A, a)
    lam = 0.02
    m = 8
    maxit = 300
    obj = Obj(f, args)
    #    q = qn.l1_penalty(obj, lam=lam * np.ones((n)))
    q = qn.owlbfgs(obj, n, lam=lam * np.ones((n)), debug=False, maxit=maxit, m=m, pos=False)
    #   q = qn.owlbfgs(f, n, args=args, lam=lam * np.ones((n)), debug=True, maxit=maxit, m=m, stype='wolfe')
    x2 = x.copy()
    x3 = x.copy()
    tic = now()
    ret = q.run(x2)
    T1 = now() - tic

    # (ret, allvec) = q.run(x)

    b = np.dot(np.linalg.inv(A), a)
    print "True min: ", f(b, np.zeros_like(x), (A, a))
    print "True argmin: ", b
    print "Initial value: ", x
    print "My solution: ", ret
    print "Error: ", np.linalg.norm(ret - b)
    if False:
        print "BFGS:"
        res = fmin_bfgs(f2, x, fprime=df2, args=args, full_output=1, retall=1)
        print res[0]
        print "Error: ", np.linalg.norm(res[0] - b)
        # print 'Hessian: ', np.linalg.inv(res[3])

    logfile = os.path.join(os.path.expanduser("~"), "sn", "py", "spikes", "qn", "test.h5")

    #     tic = now()
    #     res2 = lbfgs.bfgsl1(f3, x3, lam = lam, args=args, debug=False, log=None)
    #     t2 = now() - tic
    #     print 'bfgsl1: ', t1, f4(res2, A, a, lam)[0], res2
    #     print 'qn: ', t2, f4(ret, A, a, lam)[0], ret

    #     bounds = ((0, None),) * n
    #     tic = now()
    #     x, _, _ = fmin_l_bfgs_b(f4, x3,args=(A,a,lam), bounds=bounds, maxfun=maxit)
    #     t3 = now() - tic
    #     print 'lbfgsb: ', t3, f4(x, A, a, lam)[0], x

    #     if n < 10:
    #         print 'A.T A: ', np.dot(A.T, A)
    #         B = q.unroll()
    #         print 'B^-1: ', np.linalg.inv(B)

    # h5 = h5py.File(logfile, 'r')
    # allvec5 = h5['x'][:]
    # h5.close()

    if False and display:
        plot_path(allvec, args, 1, f)
        # plot_path(allvec2, args, 2, f)

    input = raw_input()