Exemplo n.º 1
0
def print_trans_rot_errors(gts, obj_id, ts_est, ts_est_old, Rs_est, Rs_est_old):      

    t_errs = []
    obj_gts = []

    for gt in gts:
        if gt['obj_id'] == obj_id:
            t_errs.append(ts_est[0]-gt['cam_t_m2c'].squeeze())
            obj_gts.append(gt)

    min_t_err_idx = np.argmin(np.linalg.norm(np.array(t_errs),axis=1))
    print min_t_err_idx
    print np.array(t_errs).shape
    print len(obj_gts)
    gt = obj_gts[min_t_err_idx].copy()   

    try:
        print 'Translation Error before refinement'
        print ts_est_old[0]-gt['cam_t_m2c'].squeeze()
        print 'Translation Error after refinement'
        print t_errs[min_t_err_idx]
        print 'Rotation Error before refinement'
        print pose_error.re(Rs_est_old[0],gt['cam_R_m2c'])
        print 'Rotation Error after refinement'
        R_err = pose_error.re(Rs_est[0],gt['cam_R_m2c'])
        print R_err
    except:
        pass


        

    return (t_errs[min_t_err_idx], R_err)
def main2():
    R_errors = []
    
    for R in range(100000):
        R_gt = transform.random_rotation_matrix()[:3,:3]
        R_est = transform.random_rotation_matrix()[:3,:3]
        R_errors.append(pose_error.re(R_est,R_gt))
    plot_R_err_hist2(R_errors,'',bins=90,save=False)

    azimuth_range = (0, 2 * np.pi)
    elev_range = (-0.5 * np.pi, 0.5 * np.pi)
    views, _ = view_sampler.sample_views(
        2563, 
        100, 
        azimuth_range, 
        elev_range
    )
        
    Rs = []
    for view in views:
        R_errors.append(pose_error.re(view['R'],views[np.random.randint(0,len(views))]['R']))
        Rs.append(view['R'])
    plot_R_err_hist2(R_errors,'',bins=45,save=False)

    plot_viewsphere_for_embedding(np.array(Rs),'',np.array(R_errors),save=False)

    plt.show()
    def refined_nearest_rotation(self,
                                 session,
                                 target_view,
                                 top_n,
                                 R_init=None,
                                 t_init=None,
                                 budget=10,
                                 epochs=3,
                                 high=6. / 180 * np.pi,
                                 obj_id=0,
                                 top_n_refine=1,
                                 target_bb=None):

        from sixd_toolkit.pysixd import transform, pose_error
        from sklearn.metrics.pairwise import cosine_similarity

        if target_view.dtype == 'uint8':
            target_view = target_view / 255.
        if target_view.ndim == 3:
            target_view = np.expand_dims(target_view, 0)

        cosine_similar, orig_in_emb = session.run(
            [self.cos_similarity, self.normalized_embedding_query],
            {self._encoder.x: target_view})

        if top_n_refine == 1:
            idcs = np.argmax(cosine_similar, axis=1)
            # orig_cosine_sim = cosine_similar[0,idcs]
        else:
            unsorted_max_idcs = np.argpartition(-cosine_similar.squeeze(),
                                                top_n_refine)[:top_n_refine]
            idcs = unsorted_max_idcs[np.argsort(
                -cosine_similar.squeeze()[unsorted_max_idcs])]
            # orig_cosine_sim = cosine_similar[0,idcs[0]]
        print('original cosine sim: ', cosine_similar[0, idcs])

        ### intitializing rotation estimates from existing codebook
        Rs = self._dataset.viewsphere_for_embedding[idcs].copy()
        Rs_new = [Rs[0]]
        for R in Rs:
            res = [pose_error.re(R_new, R) for R_new in Rs_new]
            if np.min(res) > 80:
                Rs_new.append(R)

        if R_init is None:
            Rs = Rs_new[:]
        ######
        else:
            Rs = [R_init]

        top_n_new = len(Rs)
        max_cosine_sim = 0.0
        K = eval(self._dataset._kw['k'])
        K = np.array(K).reshape(3, 3)
        render_dims = eval(self._dataset._kw['render_dims'])
        clip_near = float(self._dataset._kw['clip_near'])
        clip_far = float(self._dataset._kw['clip_far'])
        pad_factor = float(self._dataset._kw['pad_factor'])

        R_perts = []
        fine_views = []
        bbs = []

        for j in range(epochs):
            noof_perts = budget * top_n_new
            for i in range(noof_perts):
                if j > 0 and i == 0:
                    R_perts = Rs
                    print(noof_perts)
                    # fine_views = list(view_best) if not isinstance(view_best, list) else view_best
                    # bbs = list(bb_best) if not isinstance(bbs, list) else bbs
                    continue
                if i < top_n_new and j == 0:
                    R_off = np.eye(3, 3)
                else:
                    rand_direction = transform.make_rand_vector(3)
                    rand_angle = np.random.uniform(0, high / (j + 1))
                    R_off = transform.rotation_matrix(rand_angle,
                                                      rand_direction)[:3, :3]

                R_pert = np.dot(R_off, Rs[i % top_n_new])
                R_perts.append(R_pert)

                if target_bb is not None and t_init is not None:
                    bgr_full, _ = self._dataset.renderer.render(
                        obj_id=obj_id,
                        W=render_dims[0],
                        H=render_dims[1],
                        K=K.copy(),
                        R=R_pert,
                        t=t_init,
                        near=clip_near,
                        far=clip_far,
                        random_light=False)
                    bgr = self._dataset.extract_square_patch(
                        bgr_full, target_bb, pad_factor)
                    obj_bb = np.array([0, 0, 1, 1])
                else:
                    bgr, obj_bb = self._dataset.render_rot(R_pert,
                                                           downSample=1,
                                                           obj_id=obj_id,
                                                           return_bb=True)

                fine_views.append(bgr)
                bbs.append(obj_bb)

            float_imgs = session.run(self.image_ph_tofloat,
                                     {self._image_ph: np.array(fine_views)})
            normalized_embedding_query = session.run(
                self.normalized_embedding_query, {self._encoder.x: float_imgs})
            cosine_sim = cosine_similarity(orig_in_emb,
                                           normalized_embedding_query)
            idx = np.argmax(cosine_sim, axis=1)
            R_perts = np.array(R_perts)

            if cosine_sim[0, idx] >= max_cosine_sim:

                max_cosine_sim = cosine_sim[0, idx]

                fine_views = np.array(fine_views)
                bbs = np.array(bbs)

                unsorted_max_idcs = np.argpartition(
                    -cosine_sim.squeeze(), top_n_refine)[:top_n_refine]
                idcs = unsorted_max_idcs[np.argsort(
                    -cosine_sim.squeeze()[unsorted_max_idcs])]

                Rs = R_perts[idcs]
                view_best = fine_views[idcs[0]]
                bb_best = bbs[idcs[0]]
                # if top_n_new > 1:´

                # cv2.imshow('refined',view_best)
                # cv2.imshow('orig_rendered',fine_views[0])
                # cv2.imshow('orig_in',x[0])
                # cv2.waitKey(0)

                view_best_new = [view_best.squeeze()]
                bb_best_new = [bb_best.squeeze()]

                ##  if more than one neighbor, look for far apart alternatives
                Rs_new = [Rs[0]]
                for r, R in enumerate(Rs):
                    res = [pose_error.re(R_new, R) for R_new in Rs_new]
                    if np.min(res) > 80:
                        Rs_new.append(R)
                        view_best_new.append(fine_views[idcs[r]])
                        bb_best_new.append(bbs[idcs[r]])

                Rs = Rs_new[:]
                fine_views = view_best_new[:]
                bbs = bb_best_new[:]

                top_n_new = len(view_best_new)

                print('refined')
                # cv2.imshow('chosen', fine_views[idx])
                # cv2.waitKey(0)

                # Rs_best = list(Rs)
            else:
                print('not refined')
        print('final cosine sim: ', max_cosine_sim)
        # idx = np.argmax(cosine_sim)

        return np.array(Rs)[0:top_n], bbs[0:top_n]
def eval_calc_errors(eval_args, eval_dir):
    # Results for which the errors will be calculated
    #-------------------------------------------------------------------------------

    # result_base = '/path/to/results/'
    # result_paths = [
    #     pjoin(result_base, 'hodan-iros15_hinterstoisser'),
    #     # pjoin(result_base, 'hodan-iros15_tless_primesense'),
    # ]

    #[METHOD]
    method = eval_args.get('METHOD', 'METHOD')

    #[DATA]
    dataset = eval_args.get('DATA', 'DATASET')
    test_type = eval_args.get('DATA', 'CAM_TYPE')

    #[METRIC]
    # Top N pose estimates (with the highest score) to be evaluated for each
    # object in each image
    n_top = eval_args.getint(
        'EVALUATION', 'TOP_N_EVAL'
    )  # 0 = all estimates, -1 = given by the number of GT poses
    n_top_str = eval_args.getint('METRIC', 'TOP_N')
    # Pose error function
    error_types = eval(eval_args.get(
        'METRIC', 'ERROR_TYPE'))  # 'vsd', 'adi', 'add', 'cou', 're', 'te'
    # VSD parameters
    vsd_delta = eval_args.getint('METRIC', 'VSD_DELTA')
    vsd_tau = eval_args.getint('METRIC', 'VSD_TAU')
    vsd_cost = eval_args.get('METRIC', 'VSD_COST')  # 'step', 'tlinear'

    result_path = eval_dir
    print('Processing: ' + result_path)
    # Other paths
    #-------------------------------------------------------------------------------

    for error_type in error_types:

        # Mask of path to the output file with calculated errors
        # errors_mpath = pjoin('{result_path}', '..', '..', 'eval', '{result_name}',
        #                      '{error_sign}', 'errors_{scene_id:02d}.yml')
        errors_mpath = '{result_path}/{error_sign}/errors_{scene_id:02d}.yml'

        # Error signature
        error_sign = 'error=' + error_type + '_ntop=' + str(n_top_str)
        if error_type == 'vsd':
            error_sign += '_delta={}_tau={}_cost={}'.format(
                vsd_delta, vsd_tau, vsd_cost)

        # Error calculation
        #-------------------------------------------------------------------------------

        # Select data type
        if dataset == 'tless':
            cam_type = test_type
            if error_type in ['adi', 'add']:
                model_type = 'cad_subdivided'
            else:
                model_type = 'cad'
        else:
            model_type = ''
            cam_type = ''

        # Load dataset parameters
        dp = get_dataset_params(dataset,
                                model_type=model_type,
                                test_type=test_type,
                                cam_type=cam_type)

        # Load object models
        if error_type in ['vsd', 'add', 'adi', 'cou', 'proj', 'projamb']:
            print('Loading object models...')
            models = {}
            for obj_id in range(1, dp['obj_count'] + 1):
                models[obj_id] = inout.load_ply(
                    dp['model_mpath'].format(obj_id))

        test_sensor = pjoin(dp['base_path'], dp['test_dir'])
        # Directories with results for individual scenes
        scene_dirs = sorted([
            d for d in glob.glob(os.path.join(result_path, '*'))
            if os.path.isdir(d) and os.path.basename(d).isdigit()
        ])
        print scene_dirs

        for scene_dir in scene_dirs:
            scene_id = int(os.path.basename(scene_dir))

            # Load info and GT poses for the current scene
            scene_info = inout.load_info(
                dp['scene_info_mpath'].format(scene_id))
            scene_gt = inout.load_gt(dp['scene_gt_mpath'].format(scene_id))

            res_paths = sorted(glob.glob(os.path.join(scene_dir, '*.yml')))

            errs = []
            im_id = -1
            depth_im = None
            for res_id, res_path in enumerate(res_paths):
                # t = time.time()

                # Parse image ID and object ID from the filename
                filename = os.path.basename(res_path).split('.')[0]
                im_id_prev = im_id
                im_id, obj_id = map(int, filename.split('_'))

                if res_id % 10 == 0:
                    dataset_str = dataset
                    if test_type != '':
                        dataset_str += ' - {}'.format(test_type)
                    print('Calculating error: {}, {}, {}, {}, {}, {}'.format(
                        error_type, method, dataset_str, scene_id, im_id,
                        obj_id))

                # Load depth image if VSD is selected
                if error_type == 'vsd' and im_id != im_id_prev:
                    depth_path = dp['test_depth_mpath'].format(scene_id, im_id)
                    # depth_im = inout.load_depth(depth_path)
                    depth_im = inout.load_depth2(depth_path)  # Faster
                    depth_im *= dp['cam']['depth_scale']  # to [mm]

                # Load camera matrix
                if error_type in ['vsd', 'cou', 'proj', 'projamb']:
                    K = scene_info[im_id]['cam_K']

                # Load pose estimates
                res = inout.load_results_sixd17(res_path)
                ests = res['ests']

                # Sort the estimates by score (in descending order)
                ests_sorted = sorted(enumerate(ests),
                                     key=lambda x: x[1]['score'],
                                     reverse=True)

                # Select the required number of top estimated poses
                if n_top == 0:  # All estimates are considered
                    n_top_curr = None
                elif n_top == -1:  # Given by the number of GT poses
                    n_gt = sum(
                        [gt['obj_id'] == obj_id for gt in scene_gt[im_id]])
                    n_top_curr = n_gt
                else:
                    n_top_curr = n_top
                ests_sorted = ests_sorted[slice(0, n_top_curr)]

                for est_id, est in ests_sorted:
                    est_errs = []
                    R_e = est['R']
                    t_e = est['t']

                    errs_gts = {}  # Errors w.r.t. GT poses of the same object
                    for gt_id, gt in enumerate(scene_gt[im_id]):
                        if gt['obj_id'] != obj_id:
                            continue

                        e = -1.0
                        R_g = gt['cam_R_m2c']
                        t_g = gt['cam_t_m2c']

                        if error_type == 'vsd':
                            e = pose_error.vsd(R_e, t_e, R_g, t_g,
                                               models[obj_id], depth_im, K,
                                               vsd_delta, vsd_tau, vsd_cost)
                        elif error_type == 'add':
                            e = pose_error.add(R_e, t_e, R_g, t_g,
                                               models[obj_id])
                        elif error_type == 'adi':
                            e = pose_error.adi(R_e, t_e, R_g, t_g,
                                               models[obj_id])
                        elif error_type == 'proj':
                            e = pose_error.arp_2d(R_e, t_e, R_g, t_g,
                                                  models[obj_id], K)
                        elif error_type == 'projamb':
                            e = pose_error.arpi_2d(R_e, t_e, R_g, t_g,
                                                   models[obj_id], K)
                        elif error_type == 'cou':
                            e = pose_error.cou(R_e, t_e, R_g, t_g,
                                               models[obj_id],
                                               dp['test_im_size'], K)
                        elif error_type == 're':
                            e = pose_error.re(R_e, R_g)
                        elif error_type == 'te':
                            e = pose_error.te(t_e, t_g)

                        errs_gts[gt_id] = e

                    errs.append({
                        'im_id': im_id,
                        'obj_id': obj_id,
                        'est_id': est_id,
                        'score': est['score'],
                        'errors': errs_gts
                    })
                # print('Evaluation time: {}s'.format(time.time() - t))

            print('Saving errors...')
            errors_path = errors_mpath.format(result_path=result_path,
                                              error_sign=error_sign,
                                              scene_id=scene_id)

            misc.ensure_dir(os.path.dirname(errors_path))
            inout.save_errors(errors_path, errs)

            print('')
    print('Done.')
    return True
Exemplo n.º 5
0
def compute_pose_errors(res_dict, args_latent, dataset):

    num_obj = args_latent.getint('Data', 'num_obj')
    num_views = args_latent.getint('Data', 'num_views')
    test_class = args_latent.get('Data', 'test_class')

    K = eval(dataset._kw['k'])
    K = np.array(K).reshape(3, 3)
    K = np.array([[572.4114, 0, 320.], [0, 573.57043, 240], [0, 0, 1]])  # LM

    R_init_errs = []
    R_1_errs = []
    R_2_errs = []
    R_3_errs = []
    t_init_errs = []
    t_1_errs = []
    t_2_errs = []
    t_3_errs = []
    add_recalls_init = []
    add_recalls = []
    proj_recalls_init = []
    proj_recalls = []
    proj_recalls2 = []

    all_model_pts = [np.array(v) for v in dataset.renderer.verts]

    diameters = []
    for model_pts in all_model_pts:
        # model_pts_01 = model_pts * 0.1
        vec = model_pts.max(0) - model_pts.min(0)
        print(vec)
        diameters.append(np.linalg.norm(vec))
    print(diameters)

    for i in range(0, num_obj):
        for j in range(num_views):
            R_target = res_dict['preds'][i]['R_init'][j]
            t_target = res_dict['preds'][i]['t_init'][j]

            R_init_errs.append(
                pose_error.re(R_target,
                              res_dict['preds'][i]['R_init_pert'][j]))
            R_1_errs.append(
                pose_error.re(R_target, res_dict['preds'][i]['R_1'][j]))
            R_2_errs.append(
                pose_error.re(R_target, res_dict['preds'][i]['R_2'][j]))
            R_3_errs.append(
                pose_error.re(R_target, res_dict['preds'][i]['R_3'][j]))
            t_init_errs.append(
                pose_error.te(t_target,
                              res_dict['preds'][i]['t_init_pert'][j]))
            t_1_errs.append(
                pose_error.te(t_target, res_dict['preds'][i]['t_1'][j]))
            t_2_errs.append(
                pose_error.te(t_target, res_dict['preds'][i]['t_2'][j]))
            t_3_errs.append(
                pose_error.te(t_target, res_dict['preds'][i]['t_3'][j]))

            add_recalls_init.append(
                add_recall_diameter(res_dict['preds'][i]['R_init_pert'][j],
                                    res_dict['preds'][i]['t_init_pert'][j],
                                    R_target, t_target,
                                    {'pts': all_model_pts[i]}, diameters[i]))
            add_recalls.append(
                add_recall_diameter(res_dict['preds'][i]['R_3'][j],
                                    res_dict['preds'][i]['t_3'][j], R_target,
                                    t_target, {'pts': all_model_pts[i]},
                                    diameters[i]))
            proj_recalls_init.append(
                proj_recall_diameter(res_dict['preds'][i]['R_init_pert'][j],
                                     res_dict['preds'][i]['t_init_pert'][j],
                                     R_target, t_target,
                                     {'pts': all_model_pts[i]}, diameters[i],
                                     K))
            proj_recalls.append(
                proj_recall_diameter(res_dict['preds'][i]['R_3'][j],
                                     res_dict['preds'][i]['t_3'][j], R_target,
                                     t_target, {'pts': all_model_pts[i]},
                                     diameters[i], K))
            proj_recalls2.append(
                proj_recall_diameter(res_dict['preds'][i]['R_3'][j],
                                     res_dict['preds'][i]['t_2'][j], R_target,
                                     t_target, {'pts': all_model_pts[i]},
                                     diameters[i], K))

    R_init_errs = np.array(R_init_errs)
    R_1_errs = np.array(R_1_errs)
    R_2_errs = np.array(R_2_errs)
    R_3_errs = np.array(R_3_errs)
    t_init_errs = np.array(t_init_errs)
    t_1_errs = np.array(t_1_errs)
    t_2_errs = np.array(t_2_errs)
    t_3_errs = np.array(t_3_errs)

    res = {}

    # res['R_init_errs'] = np.array(R_init_errs)
    # res['R_1_errs']    = np.array(R_1_errs)
    # res['R_2_errs']    = np.array(R_2_errs)
    # res['R_3_errs']    = np.array(R_3_errs)
    # res['t_init_errs'] = np.array(t_init_errs)
    # res['t_1_errs']    = np.array(t_1_errs)
    # res['t_2_errs']    = np.array(t_2_errs)

    res['mean_add_recall_init'] = np.mean(add_recalls_init)
    res['mean_add_recall'] = np.mean(add_recalls)
    res['mean_proj_recall_init'] = np.mean(proj_recalls_init)
    res['mean_proj_recall'] = np.mean(proj_recalls)
    res['mean_proj_recall2'] = np.mean(proj_recalls2)
    res['<5deg_<5cm_init'] = len(R_init_errs[
        (R_init_errs <= 5) & (t_init_errs <= 50)]) / 1.0 / len(R_init_errs)
    res['<5deg_<5cm_R1'] = len(
        R_1_errs[(R_1_errs <= 5) & (t_init_errs <= 50)]) / 1.0 / len(R_1_errs)
    res['<5deg_<5cm_R1_t1'] = len(
        R_1_errs[(R_1_errs <= 5) & (t_1_errs <= 50)]) / 1.0 / len(R_1_errs)
    res['<5deg_<5cm_R2_t1'] = len(
        R_2_errs[(R_2_errs <= 5) & (t_1_errs <= 50)]) / 1.0 / len(R_2_errs)
    res['<5deg_<5cm_R2_t2'] = len(
        R_2_errs[(R_2_errs <= 5) & (t_2_errs <= 50)]) / 1.0 / len(R_2_errs)
    res['<5deg_<5cm_R2_t2'] = len(
        R_2_errs[(R_2_errs <= 5) & (t_2_errs <= 50)]) / 1.0 / len(R_2_errs)
    res['<5deg_<5cm_R3_t2'] = len(
        R_3_errs[(R_3_errs <= 5) & (t_2_errs <= 50)]) / 1.0 / len(R_3_errs)
    res['<5deg_<5cm'] = len(
        R_3_errs[(R_3_errs <= 5) & (t_3_errs <= 50)]) / 1.0 / len(R_3_errs)
    res['mean_rot_err'] = np.mean(R_3_errs)
    res['median_rot_err'] = np.median(R_3_errs)
    print(res)

    print(('pose_errs_init: median: ' + str(np.median(R_init_errs)) +
           ', mean: ' + str(np.mean(R_init_errs)) + ', <5deg & <5cm: ' + str(
               len(R_init_errs[(R_init_errs <= 5) & (t_init_errs <= 50)]) /
               1.0 / len(R_init_errs)) + ', <5deg: ' +
           str(len(R_init_errs[(R_init_errs <= 5)]) / 1.0 / len(R_init_errs)) +
           ', <5cm: ' +
           str(len(t_init_errs[t_init_errs <= 50]) / 1.0 / len(t_init_errs))))

    print(('pose_errs_final: median: ' + str(np.median(R_3_errs)) +
           ', mean: ' + str(np.mean(R_3_errs)) + ', <5deg & <5cm: ' + str(
               len(R_3_errs[(R_3_errs <= 5) & (t_3_errs <= 50)]) / 1.0 /
               len(R_3_errs)) + ', <5deg: ' +
           str(len(R_3_errs[(R_3_errs <= 5)]) / 1.0 / len(R_3_errs)) +
           ', <5cm: ' +
           str(len(t_3_errs[t_3_errs <= 50]) / 1.0 / len(t_3_errs))))

    if args_latent.getboolean('Visualization', 'rot_err_histogram'):

        plt.figure(1)
        plt.hist(R_1_errs, bins=180)
        plt.title('R_1_errs: median: ' + str(np.median(R_1_errs)) +
                  ', mean: ' + str(np.mean(R_1_errs)) + ', <5deg: ' +
                  str(len(R_1_errs[R_1_errs < 5]) / 1.0 / len(R_1_errs)))

        plt.figure(2)
        plt.hist(R_3_errs, bins=180)
        plt.title('R_3_errs: median: ' + str(np.median(R_3_errs)) +
                  ', mean: ' + str(np.mean(R_3_errs)) + ', <5deg & <5cm: ' +
                  str(
                      len(R_3_errs[(R_3_errs <= 5) & (t_2_errs <= 50)]) / 1.0 /
                      len(R_3_errs)) + ', <5deg: ' +
                  str(len(R_3_errs[(R_3_errs <= 5)]) / 1.0 / len(R_3_errs)))

        plt.figure(3)
        plt.hist(t_2_errs, bins=180)
        plt.title('t_2_errs: median: ' + str(np.median(t_2_errs)) +
                  ', mean: ' + str(np.mean(t_2_errs)) + ', <5cm: ' +
                  str(len(t_2_errs[t_2_errs <= 50]) / 1.0 / len(t_2_errs)))

        plt.show()

    return res
from sixd_toolkit.pysixd import pose_error, transform, view_sampler

R_errors = []
# R_gt = transform.random_rotation_matrix()[:3,:3]
# for R in xrange(100000):
#     R_est = transform.random_rotation_matrix()[:3,:3]
#     R_errors.append(pose_error.re(R_est,R_gt))
# plot_R_err_hist2(R_errors,'',bins=90)

azimuth_range = (0, 2 * np.pi)
elev_range = (-0.5 * np.pi, 0.5 * np.pi)
views, _ = view_sampler.sample_views(2562, 100, azimuth_range, elev_range)

Rs = []
for view in views:
    R_errors.append(pose_error.re(view['R'], views[0]['R']))
    Rs.append(view['R'])

# plot_viewsphere_for_embedding(np.array(Rs),'',np.array(R_errors),save=False)
# plot_R_err_hist2(R_errors,'',bins=45,save=False)

from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt


def cart2sph(x, y, z):
    dxy = np.sqrt(x**2 + y**2)
    r = np.sqrt(dxy**2 + z**2)
    theta = np.arctan2(y, x)
    phi = np.arctan2(z, dxy)
    theta, phi = np.rad2deg([theta, phi])