Exemple #1
0
def gsp_dataset(root='/home/lou00015/data/gsp', exp_coord=True):
    f = open(os.path.join(root, 'label.txt'))
    label_str = f.readline()
    f.close()
    label = [int(s) for s in label_str]
    # label = np.fromstring(label_str, dtype=int, sep=',')
    print(len(label), np.sum(label))
    x1, x2, y = [], [], []
    for i in range(4000):
        cloud = np.load(root+'/cloud_{}.npy'.format(str(i)))
        state = np.load(root+'/action_{}.npy'.format(str(i)))
        if exp_coord:
            pt = [state[3], state[7], state[11]]
            pose = np.array([[state[0], state[1], state[2]],
                             [state[4], state[5], state[6]],
                             [state[8], state[9], state[10]]])
            txyz = rot2vec(pose)
            action = np.concatenate((txyz, pt))
            cloud = cloud - pt
            v = voxelize(cloud, 0.1)

            x1.append(v)
            x2.append(action)
            y.append(label[i])
    np.savez_compressed(root+'/gsp_train.npz', x1=x1, x2=x2, y=y)
Exemple #2
0
def zb_data(root):
    env_cloud = np.load(root+'env.npy')
    # poses = [filename for filename in os.listdir(root) if filename.startswith("hand")]
    # objects = [filename for filename in os.listdir(root) if filename.startswith("object")]
    f = open(os.path.join(root, 'label.txt'))
    label_str = f.readline()
    f.close()
    label = np.fromstring(label_str, dtype=int, sep=',')
    # label = np.ones_like(label) - np.floor(label/100)
    # label = np.load(os.path.join(root, 'balanced_label.npy'))
    # label = label
    print(len(label), np.sum(label))
    x1 = []
    x2 = []
    lb = []
    for idx in range(3332):
        state = np.load(os.path.join(root, 'hand_pose_'+str(idx)+'.npy'))
        # print(state)
        # print([state[2], state[6], state[10]])
        # print(label[idx])
        # pcd = np.load(os.path.join(root, objects[idx]))
        # tmp_cloud = np.concatenate((env_cloud, pcd), axis=0)
        # print(np.shape(tmp_cloud))

        tmp_cloud = np.delete(env_cloud, np.where(env_cloud[:, 2] < 0.001), axis=0)
        pt = np.asarray([state[3], state[7], state[11]], dtype=float)
        pose = np.asarray([[state[0], state[1], state[2]],
                           [state[4], state[5], state[6]],
                           [state[8], state[9], state[10]]])
        # print(pose)
        rotated_cloud = rotate_cloud(pt, pose, tmp_cloud)
        vg = voxelize(rotated_cloud, 1.0)

        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.grid(False)
        ax.voxels(vg, facecolors=(1.0, 0.0, 0.0, 1.0), edgecolors='k')
        plt.axis('off')
        plt.savefig('log/voxel_'+str(idx)+'.png')

        x1.append(vg)
        x2.append(state)
        # print(label[idx])
        lb.append(label[idx])
        print(idx, label[idx])

        # sys.stdout.write("\r Processing {}/{}".format(idx, len(poses)))
        # sys.stdout.flush()
    np.savez_compressed(root+'zb.npz', x1=x1, x2=x2, y=lb)
Exemple #3
0
 def grasping(self, cloud, pt, pose_set):
     vg_set = []
     for i, pose in enumerate(pose_set, 0):
         point = pt
         r_cloud = rotate_cloud(point, pose, cloud)
         vg = voxelize(r_cloud, 0.1)
         vg_set.append(vg)
     vg_set = np.reshape(vg_set, (len(vg_set), 1, 32, 32, 32))
     vg_set = torch.from_numpy(vg_set)
     vg_set = vg_set.type(torch.FloatTensor)
     vg_set = vg_set.cuda()
     grasping_scores = self.cnn3d(vg_set)
     grasping_scores = grasping_scores.detach().cpu().numpy()
     idx = np.argmax(grasping_scores)
     pose = pose_set[idx]
     return pose
Exemple #4
0
 def carp(self, cloud, pt, pose_set):
     scores, collision_free_pose = [], []
     for i in range(len(pose_set)):
         pose = pose_set[i]
         tmp_cloud = rotate_cloud(pt, pose, cloud)
         env_vox = voxelize(tmp_cloud, 1.0)
         env_vox = np.asarray(env_vox, dtype=float)
         env_vox = torch.tensor(env_vox.reshape((1, 1, 32, 32, 32)))
         env_vox = env_vox.type(torch.FloatTensor)
         env_vox = env_vox.cuda()
         yhat = self.carp_model(env_vox)
         yhat = yhat.detach().cpu().numpy()
         scores.append(yhat)
     scores = np.asarray(scores)
     scores = scores.reshape((200, ))
     for idx in list(np.where(scores > 0.8)[0]):
         collision_free_pose.append(pose_set[idx])
     return np.asarray(collision_free_pose)
Exemple #5
0
def feature_refinement(pc):
    """
    chessboard pointcloud refinement
    :param pc:
    :return:
    """
    # 拟合标定板平面
    indices, coefficients = fit_chessboard_plane(pc)
    pc = pc[indices, :]

    # 拟合intensity分布的gap,后续以此为pivot将点云二值化
    intensity_pivot = utils.fit_intensity_pivot(pc, DEBUG)

    # 平面参数归一化
    coefficients = np.array(coefficients) / -coefficients[-1]
    A, B, C, D = coefficients

    # # 沿x轴方向把有噪声的点投影到平面上
    # pc[:, 0] = (1 - B * pc[:, 1] - C * pc[:, 2]) / A

    # 按照射线模型将点投影到平面上
    pc = ray_projection(pc, A, B, C, D)

    if DEBUG > 2:
        utils.visualize(pc, mode='cube', show=True)
    # 将标定板转换到xoy平面上,方便将它和chessboard model进行配准
    # 都在xoy平面上,只需要2d的xy偏移和旋转角就可以配准了
    rot_3d, transed_pcd = transfer_by_pca(pc[:, :3])
    # rot1_3d, transed_pcd = transfer_by_normal_vector(pc[:, :3], norm=np.array([A, B, C]))
    trans_3d = transed_pcd.mean(axis=0)
    pc[:, :3] = transed_pcd - trans_3d

    # uniformally re-sample pointcloud
    pc = resample(pc)

    if DEBUG > 2:
        utils.visualize(pc, mode='cube', show=True)

    # voxelize 减小计算量
    pc = utils.voxelize(pc, voxel_size=0.002)

    print('Points after voxelize: {}'.format(len(pc)))

    return pc, trans_3d, rot_3d, intensity_pivot
Exemple #6
0
def carp_data(root):
    env_cloud = np.load(root+'env.npy')
    poses = [filename for filename in os.listdir(root) if filename.startswith("hand")]
    f = open(os.path.join(root, 'label.txt'))
    label_str = f.readline()
    f.close()
    label = np.fromstring(label_str, dtype=int, sep=',')
    # label = np.ones_like(label) - np.floor(label/100)
    # label = np.load(os.path.join(root, 'balanced_label.npy'))
    # label = label
    print(len(poses), len(label), label, np.sum(label))
    x1 = []
    x2 = []
    for idx in range(len(poses)):
        state = np.load(os.path.join(root, poses[idx]))
        # pptk.viewer(pcd)
        # env_cloud = get_pointcloud(depth_raw, cam_intrinsics, panda.depth_m)
        pt = np.asarray([state[3], state[7], state[11]], dtype=float)
        pose = np.asarray([[state[0], state[1], state[2]],
                           [state[4], state[5], state[6]],
                           [state[8], state[9], state[10]]])
        rotated_cloud = rotate_cloud(pt, pose, env_cloud)
        vg = voxelize(rotated_cloud, 0.2)

        # fig = plt.figure()
        #
        # ax = fig.gca(projection='3d')
        # ax.voxels(vg, facecolors='green', edgecolors='k')
        # plt.show()
        # input('s')
        # xyz = np.asarray([state[3], state[7], state[11]])
        # print(xyz, label[idx])
        x1.append(vg)
        x2.append(state)
        sys.stdout.write("\r Processing {}/{}".format(idx, len(poses)))
        sys.stdout.flush()
    np.savez_compressed(root+'carp_20.npz', x1=x1, x2=x2, y=label)
Exemple #7
0
def locate_chessboard(input):
    # limit
    pc = input[np.where((BOUND[0] < input[:, 0]) & (input[:, 0] < BOUND[1])
                        & (BOUND[2] < input[:, 1]) & (input[:, 1] < BOUND[3])
                        & (BOUND[4] < input[:, 2]) & (input[:, 2] < BOUND[5]))]
    pc = utils.voxelize(pc, voxel_size=configs['calibration']['RG_VOXEL'])

    # region growing segmentation
    segmentation = segmentation_ext.region_growing_kernel(
        pc, configs['calibration']['RG_GROUND_REMOVAL'],
        configs['calibration']['RG_NUM_NEIGHBOR'],
        configs['calibration']['RG_MIN_REGION_SIZE'],
        configs['calibration']['RG_MAX_REGION_SIZE'],
        configs['calibration']['RG_SMOOTH_TH'],
        configs['calibration']['RG_CURV_TH'])
    segmentation = segmentation[np.where(segmentation[:, 4] > -1)]

    if DEBUG > 1:
        seg_vis = np.copy(segmentation)
        seg_vis[:, 3] = seg_vis[:, 4] + 10
        utils.visualize(seg_vis, show=True)

    # find in segs that bset fits the chessboard
    std_diag = LA.norm([(W + 1) * GRID_SIZE, (H + 1) * GRID_SIZE], ord=2)
    seg_costs = []
    for label_id in range(int(segmentation[:, 4].max() + 1)):
        seg = segmentation[np.where(segmentation[:, 4] == label_id)]

        # if len(seg) > 1500:
        #     continue

        # remove components that are too big
        diag = LA.norm(np.max(seg[:, :3], axis=0) - np.min(seg[:, :3], axis=0),
                       ord=2)
        if diag > std_diag * 2:
            continue

        # transfer to XOY plane
        rot1_3d, transed_pcd = transfer_by_pca(seg[:, :3])
        trans_3d = transed_pcd.mean(axis=0)
        seg[:, :3] = transed_pcd - trans_3d

        fixed_intensity_pivot = 50
        # 优化实际点云和chessboard之间的变换参数
        rst = minimize(matching_loss,
                       x0=np.zeros(3),
                       args=(
                           seg,
                           fixed_intensity_pivot,
                           H + 1,
                           W + 1,
                           GRID_SIZE,
                       ),
                       method='L-BFGS-B',
                       tol=1e-10,
                       options={"maxiter": 10000000})

        seg_costs.append([label_id, rst.fun / len(seg) * 1000])

        if DEBUG > 4:
            print(rst.fun / len(seg) * 1000)
            utils.visualize(seg, show=True, mode='cube')

    if len(seg_costs) == 0:
        return 0, 0, 0, 0, 0, 0
    # find the one with minimal cost
    seg_costs = np.array(seg_costs)
    label_id = seg_costs[np.argmin(seg_costs[:, 1]), 0]
    segmentation = segmentation[np.where(segmentation[:, 4] == label_id)]
    print('\nLocalization done. min cost={}'.format(seg_costs[:, 1].min()))

    X_MAX, Y_MAX, Z_MAX = np.max(segmentation[:, :3], axis=0) + 0.03
    X_MIN, Y_MIN, Z_MIN = np.min(segmentation[:, :3], axis=0) - 0.03
    return X_MIN, X_MAX, Y_MIN, Y_MAX, Z_MIN, Z_MAX
    def __init__(self, directory_path_train,directory_path_test, out_path, clearance=10, preload=False,
                 height_min_dif=0.5, max_height=15.0, device="cpu",n_samples=2048,final_voxel_size=[3., 3., 4.],
                 rotation_augment = True,n_samples_context=2048, context_voxel_size = [3., 3., 4.],
                mode='train',verbose=False,voxel_size_final_downsample=0.07,include_all=False,self_pairs_train=True):

        print(f'Dataset mode: {mode}')
        self.mode = mode
     
        if self.mode =='train':
            directory_path = directory_path_train
        elif self.mode == 'test':
            directory_path = directory_path_test
        else:
            raise Exception('Invalid mode')
        self.verbose = verbose 
        self.include_all = include_all
        self.voxel_size_final_downsample = voxel_size_final_downsample
        self.n_samples_context = n_samples_context
        self.context_voxel_size = torch.tensor(context_voxel_size)
        self.directory_path = directory_path
        self.clearance = clearance
        self.self_pairs_train = self_pairs_train
        self.out_path = out_path
        self.height_min_dif = height_min_dif
        self.max_height = max_height
        self.rotation_augment = rotation_augment
        self.save_name = f'ams_{mode}_save_dict_{clearance}.pt'
        name_insert = self.save_name.split('.')[0]
        self.filtered_scan_path = os.path.join  (
            out_path, f'{name_insert}_filtered_scans.pt')
        if self.mode == 'train':
            self.all_valid_combs_path = os.path.join  (
                out_path, f'{name_insert}_all_valid_combs_{self_pairs_train}.pt')
        else:
            self.all_valid_combs_path = os.path.join  (
                out_path, f'{name_insert}_all_valid_combs.pt')
        self.years = [2019, 2020]
   
        
        self.n_samples = n_samples
        self.final_voxel_size = torch.tensor(final_voxel_size)
        save_path = os.path.join(self.out_path, self.save_name)
        voxel_size_icp = 0.05




        if not preload:

            with open(os.path.join(directory_path, 'args.json')) as f:
                self.args = json.load(f)
            with open(os.path.join(directory_path, 'response.json')) as f:
                self.response = json.load(f)

            print(f"Recreating dataset, saving to: {self.out_path}")
            self.scans = [Scan(x, self.directory_path)
                          for x in self.response['RecordingProperties']]
            self.scans = [
                x for x in self.scans if x.datetime.year in self.years]
            if os.path.isfile(self.filtered_scan_path):
                with open(self.filtered_scan_path, "rb") as fp:
                    self.filtered_scans = pickle.load(fp)
            else:
                self.filtered_scans = filter_scans(self.scans, 3)
                with open(self.filtered_scan_path, "wb") as fp:
                    pickle.dump(self.filtered_scans, fp)

            self.save_dict = {}
            save_id = -1

            for scene_number, scan in enumerate(tqdm(self.filtered_scans)):
                # Gather scans within certain distance of scan center
                relevant_scans = [x for x in self.scans if np.linalg.norm(
                    x.center-scan.center) < 7]

                relevant_times = set([x.datetime for x in relevant_scans])

                # Group by dates
                time_partitions = {time: [
                    x for x in relevant_scans if x.datetime == time] for time in relevant_times}

                # Load and combine clouds from same date
                clouds_per_time = [torch.from_numpy(np.concatenate([load_las(
                    x.path) for x in val])).double().to(device) for key, val in time_partitions.items()]

                # Make xy 0 at center to avoid large values
                center_trans = torch.cat(
                    (torch.from_numpy(scan.center), torch.tensor([0, 0, 0, 0]))).double().to(device)
                
                clouds_per_time = [x-center_trans for x in clouds_per_time]
                # Extract square at center since only those will be used for grid
                clouds_per_time = [x[extract_area(x, center=np.array(
                    [0, 0]), clearance=self.clearance, shape='square'), :] for x in clouds_per_time]
                # Apply registration between each cloud and first in list, store transforms
                # First cloud does not need to be transformed
                

                clouds_per_time = registration_pipeline(
                    clouds_per_time, voxel_size_icp, self.voxel_size_final_downsample)

               
                # Remove below ground and above cutoff
                # Cut off slightly under ground height
        

                ground_cutoff = scan.ground_height - 0.05 
                height_cutoff = ground_cutoff+max_height 
                clouds_per_time = [x[torch.logical_and(
                    x[:, 2] > ground_cutoff, x[:, 2] < height_cutoff), ...] for x in clouds_per_time]

                clouds_per_time = [x.float().cpu() for x in clouds_per_time]

                save_id += 1
                save_entry = {'clouds': clouds_per_time,
                              'ground_height': scan.ground_height}

                self.save_dict[save_id] = save_entry
                if scene_number % 100 == 0 and scene_number != 0:
                    print(f"Progressbackup: {scene_number}!")
                    torch.save(self.save_dict, save_path)

            print(f"Saving to {save_path}!")
            torch.save(self.save_dict, save_path)
        else:
            self.save_dict = torch.load(save_path)


    
            
        if  os.path.isfile(self.all_valid_combs_path):
            self.all_valid_combs = torch.load(self.all_valid_combs_path)
        else:
            self.all_valid_combs = []
            for idx, (save_id,save_entry) in enumerate(tqdm(self.save_dict.items())):
                clouds = save_entry['clouds']     
                clouds = {index:x for index,x in enumerate(clouds) if x.shape[0] > 5000}
                
                if len(clouds) < 2:
                    if self.verbose:
                        print(f'Not enough clouds {idx}, skipping ')
                    continue
                
                cluster_min = torch.stack([torch.min(x,dim=0)[0][:3] for x in clouds.values()]).min(dim=0)[0]
                cluster_max = torch.stack([torch.max(x,dim=0)[0][:3] for x in clouds.values()]).max(dim=0)[0]
                clusters = {}
                for index,x in clouds.items():
                    labels,voxel_centers = voxelize(x[:, :3],start= cluster_min,end=cluster_max,size= self.final_voxel_size)
                    clusters[index] = labels

                

                
                valid_voxels = {}
                for ind,cluster in clusters.items():
                    cluster_indices, counts = cluster.unique(return_counts=True)
                    valid_indices = cluster_indices[counts > self.n_samples_context]
                    valid_voxels[ind] = valid_indices
                common_voxels = []
                for ind_0, ind_1 in combinations(valid_voxels.keys(), 2):
                    if ind_0 == ind_1:
                        continue
                    common_voxels.append([ind_0, ind_1,[x.item() for x in valid_voxels[ind_0] if x in valid_voxels[ind_1]]  ])
                valid_combs = []
                for val in common_voxels:
                    valid_combs.extend([(val[0], val[1], x) for x in val[2]])
                    # Self predict (only on index,since clouds shuffled and 1:1 other to same)
                    if self.mode == 'train' and self.self_pairs_train:
                        valid_combs.extend([(val[0], val[0], x) for x in val[2]])

                if len(valid_combs) < 1:
                    # If not enough recursively give other index from dataset
                    continue
                

                
                
                for draw_ind,draw in enumerate(valid_combs):
                    
                    
                    voxel_center = voxel_centers[draw[2]]
                    cloud_ind_0 = draw[0]
                    voxel_0 = get_voxel(clouds[cloud_ind_0],voxel_center,self.context_voxel_size)
                    if voxel_0.shape[0]>=self.n_samples_context:
                        final_comb = (save_id,draw[0],draw[1],draw[2])
                        self.all_valid_combs.append({'combination':final_comb,'voxel_center':voxel_center})
                    else:
                        print('Invalid')
                        continue
                        
                
                    
                        
                
                n_same =0
                n_dif = 0
                for comb_dict in self.all_valid_combs:
                    if comb_dict['combination'][1] == comb_dict['combination'][2]:
                        n_same+=1
                    else:
                        n_dif +=1
                print(f"n_same/n_dif: {n_same/n_dif}")

                
                torch.save(self.all_valid_combs,self.all_valid_combs_path)
                        
                    

        print('Loaded dataset!')
Exemple #9
0
def gsp_test():
    wd = '/home/lou00015/data/gsp_test/'
    model = GSP3d().cuda()
    model.load_state_dict(torch.load('gsp.pt'))
    model.eval()
    cid = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
    eid = 0
    nb_grasp = 300
    if cid != -1:
        pos = [0, 0, 0.15]
        while True:
            vrep.simxStartSimulation(cid, vrep.simx_opmode_blocking)
            panda = Robot(cid)
            obj_name, obj_hdl = add_object(cid, 'imported_part_0', pos)
            time.sleep(1.0)
            cloud = panda.get_pointcloud()
            centroid = np.average(cloud, axis=0)
            if len(cloud) == 0:
                print('no cloud found')
                continue
            elif centroid[2] > 0.045:
                print('perception error')
                continue
            # np.save(wd + 'cloud_' + str(eid) + '.npy', cloud) # save point cloud
            cloud = np.delete(cloud, np.where(cloud[:, 2] < 0.015), axis=0)
            v = voxelize(cloud - centroid, 0.1)
            pose_set, pt_set = grasp_pose_generation(45, cloud, nb_grasp)
            x1, x2 = [], []
            emptyBuff = bytearray()
            for i in range(nb_grasp):
                pose = pose_set[i]
                pt = pt_set[i]
                landing_mtx = np.asarray([
                    pose[0][0], pose[0][1], pose[0][2], pt[0], pose[1][0],
                    pose[1][1], pose[1][2], pt[1], pose[2][0], pose[2][1],
                    pose[2][2], pt[2]
                ])
                x1.append(v)
                x2.append(landing_mtx)
            x1, x2 = np.stack(x1), np.stack(x2)
            X1 = torch.tensor(x1.reshape((x2.shape[0], 1, 32, 32, 32)),
                              dtype=torch.float,
                              device=device)
            X2 = torch.tensor(x2.reshape((x2.shape[0], 12)),
                              dtype=torch.float,
                              device=device)
            yhat = model(X1, X2)
            yhat = yhat.detach().cpu().numpy()
            scores = np.asarray(yhat)
            scores = scores.reshape((nb_grasp, ))
            g_index = np.argmax(scores)
            print('Highest score: {}, the {}th.'.format(
                str(scores[g_index]), str(g_index)))
            pose = pose_set[g_index]
            pt = centroid
            landing_mtx = np.asarray([
                pose[0][0], pose[0][1], pose[0][2], pt[0], pose[1][0],
                pose[1][1], pose[1][2], pt[1], pose[2][0], pose[2][1],
                pose[2][2], pt[2]
            ])
            vrep.simxCallScriptFunction(cid, 'landing',
                                        vrep.sim_scripttype_childscript,
                                        'setlanding', [], landing_mtx, [],
                                        emptyBuff, vrep.simx_opmode_blocking)
            ending_mtx = [
                pose[0][0], pose[0][1], pose[0][2], pt[0], pose[1][0],
                pose[1][1], pose[1][2], pt[1], pose[2][0], pose[2][1],
                pose[2][2], pt[2] + 0.15
            ]
            vrep.simxCallScriptFunction(cid, 'ending',
                                        vrep.sim_scripttype_childscript,
                                        'setending', [], ending_mtx, [],
                                        emptyBuff, vrep.simx_opmode_blocking)
            time.sleep(1.0)
            print('executing experiment %d: ' % g_index)
            print('at: ', pt)
            vrep.simxCallScriptFunction(cid, 'Sphere',
                                        vrep.sim_scripttype_childscript,
                                        'grasp', [], [], [], emptyBuff,
                                        vrep.simx_opmode_blocking)
            while True:
                res, finish = vrep.simxGetIntegerSignal(
                    cid, "finish", vrep.simx_opmode_oneshot_wait)
                if finish == 18:
                    res, end_pos = vrep.simxGetObjectPosition(
                        cid, obj_hdl, -1, vrep.simx_opmode_blocking)
                    break
            if end_pos[2] > 0.05:
                label = 1
            else:
                label = 0
            print(label)
            # f = open(wd + 'label.txt', 'a+')
            # f.write(str(label))
            # f.close()
            eid += 1
    else:
        print(
            'Failed to connect to simulation (V-REP remote API server). Exiting.'
        )
    exit()