示例#1
0
def iteration(points, v, sig, tvector):
    x = hstack((v, tvector))
    J = jacobian(v, sig, tvector)
    r = residuals(points[:, 0], points[:, 1], v, sig, tvector)
    dell = dot(J.T, r)
    #Solving this with a conjugate gradient method,
    #Hopefully the zero entries lead to some savings, but I haven't really checked.
    #there may also be a way to get savings by not multiplying out J'J,
    #but idk how to do that.
    pk = linalg.cg(dot(J.T, J), -dell)[0]
    #Using backtracking to get alpha
    #Using this method because it's what I know.
    #http://en.wikipedia.org/wiki/Wolfe_conditions
    alpha = 1.0  #"For Newton and quasi-Newton methods, the step \(\alpha_0 = 1\) should always be used as the initial trial step length."--pg. 59
    rho = 0.7  #between 0 and 1
    c = 0.1**6  #The "C_1" used in the first wolfe condish. between 0 and 1, typically "quite small"
    xnew = x + alpha * pk
    rnew = residuals(points[:, 0], points[:, 1], xnew[0:9], sig,
                     xnew[9:x.shape[0]])
    #this is to prevent sticking when the solution is outside the boundary.
    #I don't think it works very well.
    tired = False
    while (dot(rnew, rnew) > dot(r, r) + c * alpha * dot(dell, pk)
           or not (discr(xnew[0], xnew[1], xnew[2], xnew[3], xnew[4],
                         xnew[9:x.shape[0]]) >= 0).all()) and not tired:
        alpha = rho * alpha
        xnew = x + alpha * pk
        rnew = residuals(points[:, 0], points[:, 1], xnew[0:9], sig,
                         xnew[9:x.shape[0]])
        if (rho < 0.1**2) and (discr(xnew[0], xnew[1], xnew[2], xnew[3],
                                     xnew[4], xnew[9:x.shape[0]]) >= 0).all():
            print "warning: Boundary condition issues"
            tired = True
    return xnew[0:9], xnew[9:x.shape[0]]
示例#2
0
def iteration(points,v,sig,tvector):
    x=hstack((v,tvector))
    J=jacobian(v,sig,tvector)
    r=residuals(points[:,0],points[:,1],v,sig,tvector)
    dell=dot(J.T,r)
    #Solving this with a conjugate gradient method,
    #Hopefully the zero entries lead to some savings, but I haven't really checked.
    #there may also be a way to get savings by not multiplying out J'J,
    #but idk how to do that.
    pk=linalg.cg(dot(J.T,J),-dell)[0]
    #Using backtracking to get alpha
    #Using this method because it's what I know.
    #http://en.wikipedia.org/wiki/Wolfe_conditions
    alpha=1.0 #"For Newton and quasi-Newton methods, the step \(\alpha_0 = 1\) should always be used as the initial trial step length."--pg. 59
    rho=0.7 #between 0 and 1
    c=0.1**6 #The "C_1" used in the first wolfe condish. between 0 and 1, typically "quite small"
    xnew=x+alpha*pk
    rnew=residuals(points[:,0],points[:,1],xnew[0:9],sig,xnew[9:x.shape[0]])
    #this is to prevent sticking when the solution is outside the boundary.
    #I don't think it works very well.
    tired=False
    while (dot(rnew,rnew) > dot(r,r)+c*alpha*dot(dell,pk) or not (discr(xnew[0],xnew[1],xnew[2],xnew[3],xnew[4],xnew[9:x.shape[0]])>=0).all()) and not tired:
        alpha=rho*alpha
        xnew=x+alpha*pk
        rnew=residuals(points[:,0],points[:,1],xnew[0:9],sig,xnew[9:x.shape[0]])
        if (rho < 0.1**2) and (discr(xnew[0],xnew[1],xnew[2],xnew[3],xnew[4],xnew[9:x.shape[0]])>=0).all():
            print "warning: Boundary condition issues"
            tired=True
    return xnew[0:9],xnew[9:x.shape[0]]
    def _compute_grad_solve_iterative(self, G, H, grad_loss_params, tol=1e-6):
        from scipy.sparse import linalg

        if self._warm_start is None:
            v, convergence = linalg.cg(
                H.tondarray(), grad_loss_params.tondarray(), tol=tol)
        else:
            v, convergence = linalg.cg(
                H.tondarray(), grad_loss_params.tondarray(), tol=tol,
                x0=self._warm_start.tondarray())

        if convergence != 0:
            warnings.warn('Convergence of poisoning algorithm not reached!')

        v = CArray(v.ravel())

        # store v to be used as warm start at the next iteration
        self._warm_start = v

        gt = -G.dot(v.T)
        return gt.ravel()
示例#4
0
文件: cgtest.py 项目: ucb-sejits/akx
    import scipy.io.mmio
    matrix = scipy.io.mmio.mmread(filename).tocsr()
print >>sys.stderr, "done"

b = numpy.ones(matrix.shape[0])

calc_time = 0

if options.scipy:
    if scipy.version.version == '0.6.0':
        import scipy.linalg as linalg
    else:
        import scipy.sparse.linalg as linalg
    for i in xrange(5):
        cg_time = time.time()
        x, info = linalg.cg(matrix, b, maxiter=options.m)
        cg_time = time.time() - cg_time
        calc_time += cg_time
        print("time = " + str(cg_time))
    for i in xrange(5):
        cg_time = time.time()
        for j in xrange(options.m):
            dummy = matrix * x
        cg_time = time.time() - cg_time
        calc_time += cg_time
        print("mul_time = " + str(cg_time))
else:
    print >>sys.stderr, "Initializing akx...",
    import akxconfig
    if options.tb_num:
        akxconfig.threadcounts = [options.tb_num]
示例#5
0
    import scipy.io.mmio
    matrix = scipy.io.mmio.mmread(filename).tocsr()
print >> sys.stderr, "done"

b = numpy.ones(matrix.shape[0])

calc_time = 0

if options.scipy:
    if scipy.version.version == '0.6.0':
        import scipy.linalg as linalg
    else:
        import scipy.sparse.linalg as linalg
    for i in xrange(5):
        cg_time = time.time()
        x, info = linalg.cg(matrix, b, maxiter=options.m)
        cg_time = time.time() - cg_time
        calc_time += cg_time
        print("time = " + str(cg_time))
    for i in xrange(5):
        cg_time = time.time()
        for j in xrange(options.m):
            dummy = matrix * x
        cg_time = time.time() - cg_time
        calc_time += cg_time
        print("mul_time = " + str(cg_time))
else:
    print >> sys.stderr, "Initializing akx...",
    import akxconfig
    if options.tb_num:
        akxconfig.threadcounts = [options.tb_num]