def TestGradientWithFD(self, curr):
     analytic_gr = np.zeros(curr.shape[0], dtype='f')
     fd_gr = np.zeros(curr.shape[0], dtype='f')
     self.AddGradientTo(curr, analytic_gr)
     self.AddEstimatedGradientTo(curr, fd_gr)
     #        print("Analytic grad: ", analytic_gr)
     #        print("FD grad: ", fd_gr)
     #        print()
     na = norm_2(analytic_gr)
     nf = norm_2(fd_gr)
     if np.abs(na) < 1e-10:
         if np.abs(nf) > 1e-10:
             print(
                 "B A D  (Zero gradient) !! Objective Function: testing gradients...norms: analytic: {0:9.8f}, FD: {1:9.8f}."
                 .format(na, nf))
         else:
             print("Both gradients are 0")
     elif ((na - nf) / na < 1e-5):
         pass
         print(
             "Objective Function: testing gradients...norms: analytic: {0:9.8f}, FD: {1:9.8f}."
             .format(na, nf))
     else:
         print(
             "B A D  !! Objective Function: testing gradients...norms: analytic: {0:9.8f}, FD: {1:9.8f}."
             .format(na, nf))
         print("An gr:", analytic_gr)
         print("FD gr", fd_gr)
         print((na - nf) / na)
         print(analytic_gr - fd_gr)
    def TestHessianWithFD(self, curr):
        hess_shape = (curr.data.shape[0], curr.data.shape[0])
        analytic_hess = np.zeros(hess_shape, np.float64)
        fd_hess = np.zeros(hess_shape, np.float64)
        self.AddHessianTo(curr, analytic_hess)
        self.AddEstimatedHessianTo(curr, fd_hess)
        #        size = 12
        #        print(analytic_hess[0:size,0:size])
        #        print(fd_hess[0:size,0:size])
        na = norm_2(analytic_hess)
        nf = norm_2(fd_hess)

        if np.abs(na) < 1e-10:
            if np.abs(nf) > 1e-10:
                print(
                    "B A D  (Zero hessian) !! Objective Function: testing hessians...norms: analytic: {0:9.8f}, FD: {1:9.8f}."
                    .format(na, nf))
            else:
                print("Both hessians are 0")
        elif ((na - nf) / na < 1e-5):
            #pass
            print(
                "Objective Function: testing hessians...norms: analytic: {0:9.8f}, FD: {1:9.8f}."
                .format(na, nf))
        else:
            print(
                "B A D  !! Objective Function: testing hessians...norms: analytic: {0:9.8f}, FD: {1:9.8f}."
                .format(na, nf))
            print((na - nf) / na)
 def ComputeValue(self, var):
     firstPos = var.GetEE(0)
     if firstPos.shape[0] != 3:
         raise "StartObj is working now only with the EE dimention of 3!"
     diff = firstPos - self.ee_start
     res = (norm_2(diff)) * self.weight
     return res
 def ComputeValue(self, curr):
     val = 0
     for i in range(self.npts):
         theta_i = curr.GetTheta(i)
         ee_i = curr.GetEE(i)
         fk_i = FK(self.links, self.axes, theta_i)
         tres = norm_2(fk_i - ee_i)
         val += tres
     #return val * self.weight * 2
     return self.inner_w * val * self.weight
Example #5
0
    def minimize(self, objective, p, regFactor):

        optimizationConverged = False
        #currentPoint = p.Copy()
        currentPoint = copy.deepcopy(p)
        for i in range(0, self.maxiterations):
            searchDirection = self.computeSearchDirection(
                objective, currentPoint, regFactor)
            #print("norm_2(searchDirection):",norm_2(searchDirection))
            if norm_2(searchDirection) < epsilon:
                optimizationConverged = True
                break
            alpha, currentPoint = self.doLineSearch(objective, currentPoint,
                                                    searchDirection)

        return currentPoint, optimizationConverged
        #            #print("   eigvals:",np.linalg.eigvals(A))

        gr_newton = np.dot(np.transpose(J_newton), (forwardK_newton - tgt))
        gr_thesis = np.dot(np.transpose(J_thesis), (forwardK_thesis - tgt))

        p_newton = np.dot(-np.linalg.pinv(J_newton), (forwardK_newton - tgt))
        p_newton_threshold = np.linalg.norm(p_newton)
        p_thesis = -np.dot(np.linalg.pinv(A), gr_thesis)
        p_thesis_threshold = np.linalg.norm(p_thesis)

        stop_treshold = 0.01

        #        objective_values_newton.append(np.linalg.norm(forwardK_newton-tgt,2))
        #        objective_values_thesis.append(np.linalg.norm(forwardK_thesis-tgt,2))

        objective_values_newton.append(norm_2(forwardK_newton - tgt) * 0.5)
        objective_values_thesis.append(norm_2(forwardK_thesis - tgt) * 0.5)

        if not newton_reached and p_newton_threshold < stop_treshold:
            print("Newton reached the stop threshold")
            #            print("Newton threshold",p_newton_threshold)
            #            print("Thesis threshold",p_thesis_threshold)
            print("Iteration #", num_of_iterations)
            #            print ("Target position:",tgt)
            #            print ("Current position:",FK(links,w,theta_newton))
            #            print ("Current theta:",np.rad2deg(theta_newton))
            #            print("---------------------------------\n")
            newton_reached = True
            #break

        if not thesis_reached and p_thesis_threshold < stop_treshold:
    NHess = objN.CalculateHessian(
        pN
    )  #this one may be fifferent fro the one used in computeSearchDirection
    THess = objT.CalculateHessian(
        pT
    )  #this one may be fifferent fro the one used in computeSearchDirection

    AddEpsToDiagonal(THess, Tfactor)
    AddEpsToDiagonal(NHess, Nfactor)

    #print (np.linalg.eigvals(NHess))

    #print("N: descentDirection? ",NisDirectionDescent)
    #print("T: descentDirection? ",TisDirectionDescent)

    NStepSize = norm_2(NSearchDir)
    TStepSize = norm_2(TSearchDir)

    #print("Step N",step_newton)
    #print("P N",NSearchDir)

    #break

    #    isSparsityOk = testSparsityPattern(NHess,THess)
    #    print ("IsSparsityOK: ",isSparsityOk)

    #    print("Obj N value:",nVal)
    #    print("pN:",pN)
    #print("FK(pN):",FK(li,ax,pN))
    #    print("Obj T value:",tVal)
    #    print("N Step size:",NStepSize)
 def ComputeValue(self, curr):
     forwardK = FK(self.links, self.axes, curr)
     res = norm_2(forwardK - self.tgt)
     return res * 0.5
 def ComputeValue(self, curr):
     lastPos = curr.GetEE(curr.LastIndex)
     if lastPos.shape[0] != 3:
         raise "FinalObj is working now only with the EE dimention of 3!"
     diff = lastPos - self.ee_final
     return (norm_2(diff)) * self.weight
Example #10
0
 def ComputeValue(self, curr):
     m = np.dot(self.A, curr.data)
     return norm_2(m) * self.weight