예제 #1
0
def anglerotation(R):
    """
    compute the angle of the rotation matrix R
    :param R: the rotation matrix
    :return: the angle of rotation in degree
    """
    theta = np.rad2deg(np.arccos((np.trace(R) - 1) / 2))
    return theta
예제 #2
0
def rot_matrix_error(R0, R1, method = 'unit_quaternion_product'):
    """ R0, R1 are 3x3 or 4x4 homogeneous Rotation matrixes
        returns: the value of the error depending on the method """

    if ((R0.shape != (4,4)) and (R0.shape != (3,3))):
        print ("Error in the R0 input rotation matrix shape, must be 3x3 or 4x4")
        print R0
        return -1
    if ((R1.shape != (4,4)) and (R1.shape != (3,3))):
        print ("Error in the R1 input rotation matrix shape, must be 3x3 or 4x4")
        print R1
        return -1

    if R0.shape == (3,3):
        R = np.eye(4)
        R[:3,:3] = R0
        R0 = R

    if R1.shape == (3,3):
        R = np.eye(4)
        R[:3,:3] = R1
        R1 = R



    if(method == 'unit_quaternion_product' ):
        ## From the paper "Metrics for 3D Rotations: Comparison and Analysis" D. Huynh
        # The 3D rotation error is computed using the inner product of unit quaterions


        #We use the ros library TF to convert rotation matrix into unit quaternions
        from tf import transformations
        q0 = transformations.quaternion_from_matrix(R0)
        q1 = transformations.quaternion_from_matrix(R1)

        # We convert into unit quaternions
        q0 = q0 / np.linalg.norm(q0)
        q1 = q1 / np.linalg.norm(q1)

        #Find the error as defined in the paper
        rot_error = 1 - np.linalg.norm(np.dot(q0,q1))

    if(method == 'angle'):
        #option 2 find the angle of this rotation. In particular, the above is invalid
        #for very large error angles (error > 90 degrees) and is imprecise for large
        #error angles (angle > 45 degrees).

        E = R1.dot(R0.T)
        from cv2 import Rodrigues
        rot_vector, J = Rodrigues(E[:3,:3])

        angle = np.linalg.norm(rot_vector)

        rot_error = np.rad2deg(angle)


    return rot_error
예제 #3
0
def rotate(arr, theta, axis=0, backend='autograd', device=None):
    """
    A rotate function that allows taking gradient with regards to theta.

    :param arr: a 3D object in [len_y, len_x, len_z, n_channels].
    """
    if backend == 'autograd':
        warnings.warn('Rotate (with grad) in Autograd is not yet implemented. Use Pytorch backend instead.')
        axes = []
        for i in range(3):
            if i != axis:
                axes.append(i)
        return scipy.ndimage.rotate(arr, -anp.rad2deg(theta), reshape=False, axes=axes, mode='nearest', order=1)
    elif backend == 'pytorch':
        try:
            theta = theta.view(1)
        except:
            theta = tc.tensor(theta, requires_grad=False, device=device)
            theta = theta.view(1)
        axis_arrangement = [0, 1, 2, 3]
        # Move channel to the 2nd dimension.
        axis_arrangement[1], axis_arrangement[3] = axis_arrangement[3], axis_arrangement[1]
        # Move invariant axis to front.
        if axis != 0:
            q = axis_arrangement.index(axis)
            axis_arrangement[0], axis_arrangement[q] = axis_arrangement[q], axis_arrangement[0]
        if axis_arrangement[2] < axis_arrangement[3]:
            theta = -theta
        arr = permute_axes(arr, axis_arrangement, override_backend='pytorch')
        naught = cast(tc.tensor([0.], device=device), pytorch_dtype_query_mapping_dict[theta.dtype], override_backend='pytorch')
        m0 = tc.cat([tc.cos(theta), -tc.sin(theta), naught])
        m1 = tc.cat([tc.sin(theta), tc.cos(theta), naught])
        m = tc.stack([m0, m1]).view(1, 2, 3)
        m = cast(tile(m, [arr.shape[0], 1, 1], override_backend='pytorch'), pytorch_dtype_query_mapping_dict[arr.dtype], override_backend='pytorch')
        g = tc.nn.functional.affine_grid(m, arr.shape, align_corners=False)

        arr = tc.nn.functional.grid_sample(arr, g, padding_mode='border', align_corners=False)
        arr = permute_axes(arr, [axis_arrangement.index(0), axis_arrangement.index(1),
                                 axis_arrangement.index(2), axis_arrangement.index(3)], override_backend='pytorch')
        return arr
예제 #4
0
파일: camera.py 프로젝트: EgalYue/ivs_sim
 def fov(self):
     """ Calculate field of view angles (grads) from camera matrix """
     fovx = np.rad2deg(2 * atan(self.img_width / (2. * self.fx)))
     fovy = np.rad2deg(2 * atan(self.img_height / (2. * self.fy)))
     return fovx, fovy
예제 #5
0
def test_rad2deg():
    fun = lambda x: 3.0 * np.rad2deg(x)
    check_grads(fun)(10.0 * npr.rand())
def create_cam_distribution_in_YZ(cam=None,
                                  plane_size=(0.3, 0.3),
                                  theta_params=(0, 180, 10),
                                  r_params=(0.25, 1.0, 4),
                                  plot=False):
    """
    cam distritubution in YZ plane
    :param cam:
    :param plane_size:
    :param theta_params:
    :param phi_params:
    :param r_params:
    :param plot:
    :return:
    """
    if cam == None:
        # Create an initial camera on the center of the world
        cam = Camera()
        f = 800
        cam.set_K(fx=f, fy=f, cx=320, cy=240)  # Camera Matrix
        cam.img_width = 320 * 2
        cam.img_height = 240 * 2

    # we create a default plane with 4 points with a side lenght of w (meters)
    plane = Plane(origin=np.array([0, 0, 0]),
                  normal=np.array([0, 0, 1]),
                  size=plane_size,
                  n=(2, 2))
    # We extend the size of this plane to account for the deviation from a uniform pattern
    # plane.size = (plane.size[0] + deviation, plane.size[1] + deviation)

    d_space = np.linspace(r_params[0], r_params[1], r_params[2])
    t_list = []
    for d in d_space:
        xx, yy, zz = uniform_halfCircle_in_YZ(theta_params, d,
                                              False)  # YZ plane
        sphere_points = np.array(
            [xx.ravel(), yy.ravel(), zz.ravel()], dtype=np.float32)
        t_list.append(sphere_points)
    t_space = np.hstack(t_list)
    acc_row = r_params[2]
    acc_col = theta_params[2]
    accuracy_mat = np.zeros([
        acc_row, acc_col
    ])  # accuracy_mat is used to describe accuracy degree for marker area

    cams = []
    for t in t_space.T:
        cam = cam.clone()
        cam.set_t(-t[0], -t[1], -t[2])
        cam.set_R_mat(Rt_matrix_from_euler_t.R_matrix_from_euler_t(0.0, 0, 0))
        cam.look_at([0, 0, 0])

        radius = sqrt(t[0] * t[0] + t[1] * t[1] + t[2] * t[2])
        angle = np.rad2deg(np.arccos(t[1] / radius))
        cam.set_radius(radius)
        cam.set_angle(angle)

        plane.set_origin(np.array([0, 0, 0]))
        plane.uniform()
        objectPoints = plane.get_points()
        # print "objectPoints",objectPoints
        imagePoints = cam.project(objectPoints)
        # print "imagePoints\n",imagePoints
        if ((imagePoints[0, :] < cam.img_width) &
            (imagePoints[0, :] > 0)).all():
            if ((imagePoints[1, :] < cam.img_height) &
                (imagePoints[1, :] > 0)).all():
                cams.append(cam)

    if plot:
        planes = []
        plane.uniform()
        planes.append(plane)
        # plot3D(cams, planes) #TODO comment because of from mayavi import mlab

    return cams, accuracy_mat


# ==============================Test=================================================
#cams = create_cam_distribution(cam = None, plane_size = (0.3,0.3), theta_params = (0,360,10), phi_params =  (0,70,5), r_params = (0.25,1.0,4), plot=True)
#create_cam_distribution_in_YZ(cam = None, plane_size = (0.3,0.3), theta_params = (0,180,3), r_params = (0.3,0.9,3), plot=False)
# print "cams size: ",len(cams)
# -----------------------------Test for cam look at method------------------------------
# cam = Camera()
# f = 800
# cam.set_K(fx = f, fy = f, cx = 320, cy = 240)  #Camera Matrix
# cam.img_width = 320*2
# cam.img_height = 240*2
# cam.set_t(1,1,1,"world")
# cam.set_R_mat(Rt_matrix_from_euler_t.R_matrix_from_euler_t(0,np.deg2rad(0),0))
# cam.look_at([0,0,0])
# plane_size = (0.3,0.3)
# plane =  Plane(origin=np.array([0, 0, 0] ), normal = np.array([0, 0, 1]), size=plane_size, n = (2,2))
# plane.set_origin(np.array([0, 0, 0]))
# plane.uniform()
# planes = []
# planes.append(plane)
# cams = []
# cams.append(cam)
# plot3D(cams,planes)
#
# print "cam.R",cam.R
# print "cam.Rt",cam.Rt
# print "cam.P",cam.P
# ------------------Code End-----------Test for cam look at method------------------------------

# create_cam_distribution_rotation_around_Z(cam=None, plane_size=(0.3, 0.3), theta_params=(0, 360, 10), phi_params=(45, 45, 1),
#                             r_params=(3.0, 3.0, 1), plot=False)
# create_cam_distribution_square_in_XY(cam=None, plane_size=(0.3, 0.3), theta_params=(0, 360, 5), phi_params=(45, 45, 1),
#                             r_params=(3.0, 3.0, 1), plot=False)
예제 #7
0
def rotate(im, angle):
    return _rotate(im, np.rad2deg(angle), reshape=False)
예제 #8
0
def test_rad2deg():
    fun = lambda x : 3.0 * np.rad2deg(x)
    d_fun = grad(fun)
    check_grads(fun, 10.0*npr.rand())
    check_grads(d_fun, 10.0*npr.rand())
예제 #9
0
def test_rad2deg():
    unary_ufunc_check(lambda x: np.rad2deg(x) / 50.0)
예제 #10
0
         theta = keypoints[0][j].angle
         dx_, dy_ = np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta))
         x_0, y_0, _ = tuple(
             h_apply(H[0, i], (x0 + s * dx_, y0 + s * dy_, 1)))
         x_1, y_1, _ = tuple(
             h_apply(H[0, i], (x0 - s * dy_, y0 + s * dx_, 1)))
         x_2, y_2, _ = tuple(
             h_apply(H[0, i], (x0 + s * dy_, y0 - s * dx_, 1)))
         x_3, y_3, _ = tuple(
             h_apply(H[0, i], (x0 - s * dx_, y0 - s * dy_, 1)))
         s_new = np.mean([
             np.linalg.norm((x_0 - x_3, y_0 - y_3)) / 2,
             np.linalg.norm(((x_1 - x_2, y_1 - y_2))) / 2
         ])
         angle_new = np.arctan2(-y_0 + y_3, x_0 - x_3)
         angle_new = np.rad2deg(angle_new + 2 * np.pi * (angle_new < 0))
         myplot.append((s_new, s, octave, layer, scale, theta, angle_new))
 # Angles
 a_orig = np.array([x[5] for x in myplot])
 a_new = np.array([x[6] for x in myplot])
 plt.scatter(a_orig, a_new)
 plt.title('Angle differences')
 plt.xlabel('Original')
 plt.ylabel('New')
 plt.show()
 sys.exit()
 # Plot sizes and octaves
 z = np.array([0 for x in myplot])
 sorig = np.array([x[1] for x in myplot])
 snew = np.array([x[0] for x in myplot])
 c_octaves = np.array([x[2] for x in myplot])
예제 #11
0
def invar(mdl, args, inputs_train, targets_train, prev_hess, prev_eigval,
          prev_eigvec, coeff, ang_np, ang_sb, p_angles):
    # calculating hessian
    hess = mdl.hessian(mdl.params_flat)  # Calculating Hessian
    # Converting the Hessian to Tensor
    hess = torch.tensor(hess).float()
    # Extracting the eigenvalues and Eigen Vectors from the Calculated Hessian
    eigenvalues, eigenvec = torch.symeig(hess, eigenvectors=True)

    top = args.top  # This decides how many top eigenvectors are considered
    # |The reason for negative top :: torch.symeig outputs eigen vectors in the increasing order and as a result |
    dom = eigenvec[:, -top:]
    # |       mdl                        the top (maximum) eigenvectors will be atlast.                             |
    dom = dom.float()
    # A random vector which is of the dim of variable "top" is being initialized
    alpha = torch.rand(top)

    # Finding the top vector
    # Representing alpha onto dominant eigen vector
    vec = (alpha * dom.float()).sum(1)
    vec = vec / torch.sqrt((vec * vec).sum())  # Normalization of top vector

    # Dummy Model for calculating gradient
    mdl_test = model.create_model(args, inputs_train, targets_train)

    # Finding gradient at top vec using Dummy network.
    mdl_test.params_flat = np.array(vec)

    # Find coeff and append.
    top_vec = mdl_test.params_flat
    c = torch.mv(hess.transpose(0, 1),
                 torch.tensor(mdl_test.params_flat).float())
    if np.size(coeff) == 0:
        coeff = c.detach().cpu().numpy()
        coeff = np.expand_dims(coeff, axis=0)
    else:
        coeff = np.concatenate(
            (coeff, np.expand_dims(c.detach().cpu().numpy(), axis=0)), 0)

    # Statistics of subspaces, (1) Angle between top subpaces
    eigenvalues_prev, eigenvec_prev = torch.symeig(prev_hess,
                                                   eigenvectors=True)
    # Is it not the same as the variable "dom" that was calculated earlier ?
    dom_prev = eigenvec_prev[:, -top:]

    # calculation 1 norm, which is nothing but angle between subspaces
    ang = np.linalg.norm(torch.mm(dom_prev, dom.transpose(0, 1)).numpy(), 1)
    ang_sb.append(ang)
    ang = np.rad2deg(subspace_angles(dom_prev, dom))
    ang_np.append(ang)

    # Calculating principal angles
    u, s, v = torch.svd(torch.mm(dom.transpose(0, 1), dom_prev))

    # Output in radians
    s = torch.acos(torch.clamp(s, min=-1, max=1))
    s = s * 180 / math.pi

    # Attach 's' to p_angles
    if np.size(p_angles) == 0:
        p_angles = s.detach().cpu().numpy()
        p_angles = np.expand_dims(p_angles, axis=0)
    else:
        p_angles = np.concatenate(
            (p_angles, np.expand_dims(s.detach().cpu().numpy(), axis=0)), 0)
    prev_hess = hess
    prev_eigval = eigenvalues
    prev_eigvec = eigenvec

    return hess, eigenvalues, eigenvec, coeff, ang_np, ang_sb, p_angles, top_vec
예제 #12
0
def get_angle_subspaces(U, V, verbose=True):
    """
    Gets the angle between two subspaces.
    """

    use_simple_meth = True

    U = orth(U)
    V = orth(V)

    len_1_u, len_2_u = U.shape
    len_1_v, len_2_v = V.shape

    if len_2_u > len_2_v:
        a = U.copy()
        U = V.copy()
        V = a.copy()

        len_1_u, len_2_u = U.shape
        len_1_v, len_2_v = V.shape

    if use_simple_meth:
        print(180 * subspace_angles(U, V) /
              np.pi)  #180 * orthogonality_matrix / np.pi
        angle = np.rad2deg(max(subspace_angles(U, V)))

    else:

        # normalizing subspaces:
        for i in range(0, len_2_u):
            U[:, i] *= (1 / np.linalg.norm(U[:, i]))
        for i in range(0, len_2_v):
            V[:, i] *= (1 / np.linalg.norm(V[:, i]))

        # constuct M
        M = np.zeros((len_2_u, len_2_v))

        for i in range(0, len_2_u):
            for j in range(0, len_2_v):
                M[i, j] = np.dot(V[:, j], U[:, i])

        MMt = M.dot(M.transpose())

        det_MMt = np.linalg.det(MMt)

        # construct u
        uu = np.zeros((len_2_u, len_2_u))

        for i in range(0, len_2_u):
            for j in range(0, len_2_u):
                uu[i, j] = np.dot(U[:, j], U[:, i])
                #print(i,j,np.dot(V[:,j], U[:,i]))

        det_uu = np.linalg.det(uu)

        cos_square_theta = det_MMt / (det_uu + 1e-14)
        # print("cos_square_theta",cos_square_theta)
        theta = np.sqrt(cos_square_theta)

        # print("theta",theta)

        # print("angle",np.arccos(theta))
        angle = np.rad2deg(np.arccos(theta))

    if verbose:
        print("angle", angle)

    return angle
예제 #13
0
def rotate(im, angle):
    return _rotate(im, np.rad2deg(angle), reshape=False)
예제 #14
0
def test_rad2deg(): unary_ufunc_check(lambda x : np.rad2deg(x)/50.0)
def test_sign():    unary_ufunc_check(np.sign)
예제 #15
0
def test_rad2deg():
    unary_ufunc_check(lambda x: np.rad2deg(x) / 50.0, test_complex=False)
예제 #16
0
def test_rad2deg(): unary_ufunc_check(lambda x : np.rad2deg(x)/50.0, test_complex=False)
def test_radians(): unary_ufunc_check(np.radians, test_complex=False)
def train_model(args, mdl, mdl_test, results):
    coeff = []
    ang_sb=[]
    ang_np=[]
    p_angles = []
    all_w=[]
    results['args'] = args
    init_loss = mdl.loss(mdl.params_flat)
    init_grad_norm = np.linalg.norm(mdl.gradient(mdl.params_flat))
    print("\n===============================================================================================\n")

    print('\nInitial loss: {}, norm grad: {}\n'.format(init_loss, init_grad_norm))
    results['init_full_loss'] = init_loss
    results['init_full_grad_norm'] = init_grad_norm

    results['history1'] = []
    results['history1_columns'] = ['iter_no', 'batch_loss', 'batch_grad_norm', 'batch_param_norm']
    results['history2'] = []
    results['history2_columns'] = ['full_hessian', 'full_hessian_evals']

    for iter_no in tqdm(range(args.max_iterations), desc="Training Progress", dynamic_ncols=True):
        inputs, targets = get_batch_samples(iter_no, args, mdl)
        batch_loss = mdl.loss(mdl.params_flat, inputs, targets)
        batch_grad = mdl.gradient(mdl.params_flat, inputs, targets)
        batch_grad_norm = np.linalg.norm(batch_grad)
        batch_param_norm = np.linalg.norm(mdl.params_flat)

        if iter_no % args.freq == 0:

            # calculating hessian
            layer_interest = 2
            hess_len, hess_start = layer_weights(layer_size(args), layer_interest)

            hess = mdl.hessian(mdl.params_flat) 
         
            new_hess = hess[hess_start:(hess_start+hess_len),hess_start:(hess_start+hess_len)]
          # Calculating Hessian
            new_hess = torch.tensor(new_hess).float()                   # Converting the Hessian to Tensor
            eigenvalues, eigenvec = torch.symeig(new_hess,eigenvectors=True)    # Extracting the eigenvalues and Eigen Vectors from the Calculated Hessian
            
            if iter_no == 0:
                prev_hess = new_hess
                prev_eigval = eigenvalues
                prev_eigvec = eigenvec
            
            top = args.top      # This decides how many top eigenvectors are considered
            dom = eigenvec[:,-top:]     # |The reason for negative top :: torch.symeig outputs eigen vectors in the increasing order and as a result |
                                        # |                               the top (maximum) eigenvectors will be atlast.                             |
            dom = dom.float()
            alpha=torch.rand(top)       # A random vector which is of the dim of variable "top" is being initialized

            # Finding the top vector
            
            vec=(alpha*dom.float()).sum(1)          # Representing alpha onto dominant eigen vector
            vec=vec/torch.sqrt((vec*vec).sum())     # Normalization of top vector


            # Finding gradient at top vec using Dummy network.
            
        
            mdl_test.params_flat = mdl.params_flat

            mdl_test.params_flat[hess_start:(hess_start+hess_len)] = np.array(vec)



            
            batch_grad_mdl_test = mdl_test.gradient(mdl_test.params_flat, inputs, targets)
            
            # Gradients are updated only for the first layer.
            mdl_test.params_flat[hess_start:(hess_start+hess_len)] -= batch_grad_mdl_test[hess_start:(hess_start+hess_len)] * args.learning_rate





            # Find coeff and append. But why do we need to find the coeffs ?                                              
            c =  torch.mv(new_hess.transpose(0,1), torch.tensor(mdl_test.params_flat[hess_start:(hess_start+hess_len)]).float())
            if np.size(coeff) == 0:
                coeff = c.detach().cpu().numpy()
                coeff = np.expand_dims(coeff, axis=0)
            else:
                coeff = np.concatenate((coeff,np.expand_dims(c.detach().cpu().numpy(),axis=0)),0) 

#	Statistics of subspaces, (1) Angle between top subpaces
            eigenvalues_prev, eigenvec_prev = torch.symeig(prev_hess, eigenvectors = True)
            dom_prev = eigenvec_prev[:,-top:]           # Is it not the same as the variable "dom" that was calculated earlier ?
            # calculation 1 norm, which is nothing but angle between subspaces
            ang=np.linalg.norm(torch.mm(dom_prev, dom.transpose(0,1)).numpy(),1)
            ang_sb.append(ang)
            ang = np.rad2deg(subspace_angles(dom_prev, dom))
            ang_np.append(ang)
#    Calculating principal angles
            u,s,v =torch.svd(torch.mm(dom.transpose(0, 1), dom_prev))
            #    Output in radians
            s = torch.acos(torch.clamp(s,min=-1,max=1))
            s = s*180/math.pi
#    Attach 's' to p_angles
            if np.size(p_angles) == 0:
                p_angles = s.detach().cpu().numpy()
                p_angles = np.expand_dims(p_angles, axis=0)
            else:
                p_angles = np.concatenate((p_angles,np.expand_dims(s.detach().cpu().numpy(),axis=0)),0) 
            prev_hess = new_hess
            prev_eigval = eigenvalues
            prev_eigvec = eigenvec

#    saving weights in all iterations
        if batch_grad_norm <= args.stopping_grad_norm:
            break
        mdl.params_flat -= batch_grad * args.learning_rate
        all_w.append(np.power(math.e,mdl.params_flat))
        #print('{:06d} {} loss: {:.8f}, norm grad: {:.8f}'.format(iter_no, datetime.now(), batch_loss, batch_grad_norm))

    final_loss = mdl.loss(mdl.params_flat)
    final_grad_norm = np.linalg.norm(mdl.gradient(mdl.params_flat))
    print('\nFinal loss: {}, norm grad: {}'.format(final_loss, final_grad_norm))
    args.suffix=args.results_folder+'/coeff.npy'
    np.save(args.suffix,coeff)
    args.suffix=args.results_folder+'/ang_sb.npy'
    np.save(args.suffix,ang_sb)
    args.suffix=args.results_folder+'/ang_np.npy'
    np.save(args.suffix,ang_np)   
    args.suffix=args.results_folder+'/p_angles.npy'
    np.save(args.suffix,p_angles)   
    args.suffix=args.results_folder+'/all_weights.npy'
    np.save(args.suffix,np.array(all_w))
    


#    Saving png plots
    coeff = torch.tensor(coeff)
    for i in range(coeff.shape[0]):
        a=torch.zeros(coeff[i].shape[0]).long()
        b=torch.arange(0, coeff[i].shape[0])
        c=torch.where(((coeff[i] > -0.1) & (coeff[i] < 0.1)),b,a)
        z = torch.zeros(coeff[i].shape[0]).fill_(0)
        z[torch.nonzero(c)] = coeff[i][torch.nonzero(c)]
        z = np.array(z)
        plt.plot(z)
    plt.xlabel('Dimension',fontsize=14)
    plt.ylabel('Coefficient',fontsize=14)
    pnpy = args.results_folder+'/plot1'
    plt.savefig(pnpy, format='png', pad_inches=5)

    return args.results_folder
예제 #18
0
def test_rad2deg():
    fun = lambda x : 3.0 * np.rad2deg(x)
    d_fun = grad(fun)
    check_grads(fun, 10.0*npr.rand())
    check_grads(d_fun, 10.0*npr.rand())
예제 #19
0
def test_rad2deg():
    fun = lambda x : 3.0 * np.rad2deg(x)
    check_grads(fun)(10.0*npr.rand())