Esempio n. 1
0
    def __init__(self, root, camera='kinect', split='train', num_points=20000, voxel_size=0.005, remove_outlier=True):
        assert(num_points is None or num_points<=50000)
        self.root = root
        self.split = split
        self.num_points = num_points
        self.voxel_size = voxel_size
        self.remove_outlier = remove_outlier
        self.num_sample = 10
        self.dist_thresh = 0.01
        self.score_thresh = 0.11

        graspnet = GraspNet(root, camera=camera, split=split)
        self.rgbpath, self.depthpath, self.labelpath, self.metapath, self.scenename = graspnet.loadData()
        self.grasp_labels = graspnet.loadGraspLabels(retrun_collision=True)

        self.num_views, self.num_angles, self.num_depths = 300, 12, 4
        self.viewpoints = generate_views(self.num_views)
def generate_scene_rectangle_grasp(sceneId, dump_folder, camera):
    g = GraspNet(graspnet_root, camera=camera, split='all')
    objIds = g.getObjIds(sceneIds = sceneId)
    grasp_labels = g.loadGraspLabels(objIds)
    collision_labels = g.loadCollisionLabels(sceneIds = sceneId)
    scene_dir = os.path.join(dump_folder,'scene_%04d' % sceneId)
    if not os.path.exists(scene_dir):
        os.mkdir(scene_dir)
    camera_dir = os.path.join(scene_dir, camera)
    if not os.path.exists(camera_dir):
        os.mkdir(camera_dir)
    for annId in tqdm(range(256), 'Scene:{}, Camera:{}'.format(sceneId, camera)):
        _6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = camera, grasp_labels = grasp_labels, collision_labels = collision_labels, fric_coef_thresh = 1.0)
        rect_grasp_group = _6d_grasp.to_rect_grasp_group(camera)
        rect_grasp_group.save_npy(os.path.join(camera_dir, '%04d.npy' % annId))
Esempio n. 3
0
__author__ = 'mhgou'
__version__ = '1.0'

from graspnetAPI import GraspNet

# GraspNetAPI example for checking the data completeness.
# change the graspnet_root path

if __name__ == '__main__':

    ####################################################################
    graspnet_root = '/home/gmh/graspnet'  ### ROOT PATH FOR GRASPNET ###
    ####################################################################

    g = GraspNet(graspnet_root, 'kinect', 'all')
    if g.checkDataCompleteness():
        print('Check for kinect passed')


    g = GraspNet(graspnet_root, 'realsense', 'all')
    if g.checkDataCompleteness():
        print('Check for realsense passed')
Esempio n. 4
0
from tqdm import tqdm
import os
import time
import cv2

height = 0.02
depth_base = 0.02
grasp_depth = 0.02
grasp_width = 0.08
num_sample = 2000
grasp_points_num = 750
DUMP_DIR = './dump/'
CAMERA = 'kinect'

#g = GraspNet('/home/minghao/graspnet/', camera='kinect', split='test')
g = GraspNet('/ssd1/graspnet/', camera=CAMERA, split='test')
sceneIds = g.getSceneIds()
'''
is_resume = 0
if args.load_model and args.load_epoch != -1:
    is_resume = 1

if is_resume or args.mode == 'test':
    model = torch.load(args.load_model, map_location='cuda:{}'.format(args.gpu))
    model.device_ids = [args.gpu]
    print('load model {}'.format(args.load_model))
else:
    model = PointNetCls(num_points=grasp_points_num, input_chann=point_channel, k=3)
'''

input_channels = 3
Esempio n. 5
0
from graspnetAPI import GraspNet
from graspnetAPI import GraspGroup
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
from rgbmatter.configs import get_config_rgbmatter
from rgbmatter.util import collision_detection_with_full_models

config = get_config_rgbmatter()
graspnet_root = config['dataset_path']  # ROOT PATH FOR GRASPNET

sceneId = 22
annId = 100

# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='train')

# load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
grasp = g.loadGrasp(sceneId=sceneId,
                    annId=annId,
                    format='6d',
                    camera='kinect',
                    fric_coef_thresh=0.1)
print('grasp num after load grasp: ', grasp.grasp_group_array.shape)
grasp = collision_detection_with_full_models(grasp, annId)
print('grasp after collision detection: ', grasp.grasp_group_array.shape)
np.save('a.npy', grasp.grasp_group_array)
Esempio n. 6
0
import open3d as o3d
import cv2
import numpy as np

# GraspNetAPI example for loading grasp for a scene.
# change the graspnet_root path

####################################################################
graspnet_root = '/disk1/graspnet'  # ROOT PATH FOR GRASPNET
####################################################################

sceneId = 1
annId = 3

# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='train')

# load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
_6d_grasp = g.loadGrasp(sceneId=sceneId,
                        annId=annId,
                        format='6d',
                        camera='kinect',
                        fric_coef_thresh=0.2)
print('6d grasp:\n{}'.format(_6d_grasp))

# _6d_grasp is an GraspGroup instance defined in grasp.py
print('_6d_grasp:\n{}'.format(_6d_grasp))

# index
grasp = _6d_grasp[0]
print('_6d_grasp[0](grasp):\n{}'.format(grasp))
Esempio n. 7
0
from graspnetAPI import GraspNet
import open3d as o3d
import cv2

# GraspNetAPI example for loading grasp for a scene.
# change the graspnet_root path

####################################################################
graspnet_root = '/home/gmh/graspnet'  # ROOT PATH FOR GRASPNET
####################################################################

sceneId = 1
annId = 3

# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='train')

# load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
_6d_grasp = g.loadGrasp(sceneId=sceneId,
                        annId=annId,
                        format='6d',
                        camera='kinect',
                        fric_coef_thresh=0.2)
print('6d grasp:\n{}'.format(_6d_grasp))

# visualize the grasps using open3d
geometries = []
geometries.append(
    g.loadScenePointCloud(sceneId=sceneId, annId=annId, camera='kinect'))
geometries += _6d_grasp.random_sample(numGrasp=20).to_open3d_geometry_list()
o3d.visualization.draw_geometries(geometries)
Esempio n. 8
0
__author__ = 'mhgou'
__version__ = '1.0'

# GraspNetAPI example for visualization.
# change the graspnet_root path

####################################################################
graspnet_root = '/home/gmh/graspnet'  # ROOT PATH FOR GRASPNET
####################################################################

from graspnetAPI import GraspNet

# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='train')

# show object grasps
g.showObjGrasp(objIds=0, show=True)

# show 6d poses
g.show6DPose(sceneIds=0, show=True)

# show scene rectangle grasps
g.showSceneGrasp(sceneId=0,
                 camera='realsense',
                 annId=0,
                 format='rect',
                 numGrasp=20)

# show scene 6d grasps(You may need to wait several minutes)
g.showSceneGrasp(sceneId=4, camera='kinect', annId=2, format='6d')
Esempio n. 9
0
__author__ = 'mhgou'
__version__ = '1.0'

# GraspNetAPI example for loading grasp for a scene.
# change the graspnet_root path

####################################################################
graspnet_root = '/DATA1/Benchmark/graspnet' # ROOT PATH FOR GRASPNET
####################################################################

sceneId = 1
from graspnetAPI import GraspNet

# initialize a GraspNet instance  
g = GraspNet(graspnet_root, camera='kinect', split='train')

# load grasp of scene 1 with annotation id = 0, camera = kinect and grasp_thresh = 0.4
_6d_grasp = g.loadGrasp(sceneId = sceneId, annId = 0, format = '6d', camera = 'kinect', grasp_thresh = 0.4)
print('Object ids in scene %d:' % sceneId, _6d_grasp.keys())

for k in _6d_grasp.keys():
    print('=======================\nobject id=%d, grasps number = %d format = 6d\n=======================' % (k,_6d_grasp[k]['depths'].shape[0]))
    print('points:')
    print(_6d_grasp[k]['points'])
    print('Rs:')
    print(_6d_grasp[k]['Rs'])
    print('depths:')
    print(_6d_grasp[k]['depths'])
    print('widths:')
    print(_6d_grasp[k]['widths'])
    print('friction coefficients:')
Esempio n. 10
0
from graspnetAPI import GraspNet
import cv2
import open3d as o3d

# GraspNetAPI example for checking the data completeness.
# change the graspnet_root path

camera = 'kinect'
sceneId = 5
annId = 3

####################################################################
graspnet_root = '/home/gmh/graspnet'  # ROOT PATH FOR GRASPNET
####################################################################

g = GraspNet(graspnet_root, camera=camera, split='all')

bgr = g.loadBGR(sceneId=sceneId, camera=camera, annId=annId)
depth = g.loadDepth(sceneId=sceneId, camera=camera, annId=annId)

# Rect to 6d
rect_grasp_group = g.loadGrasp(sceneId=sceneId,
                               camera=camera,
                               annId=annId,
                               fric_coef_thresh=0.2,
                               format='rect')

# RectGrasp to Grasp
rect_grasp = rect_grasp_group.random_sample(1)[0]
img = rect_grasp.to_opencv_image(bgr)
Esempio n. 11
0
import torch.multiprocessing
import sys
from graspnetAPI import GraspNet, GraspNetEval, GraspGroup
#torch.multiprocessing.set_start_method('spawn')
from grasptoolbox.grasp_sampling.gen_grasp_cloud import estimate_normals, estimate_darboux_frame
from grasptoolbox.grasp_sampling.gen_grasp_image import transform_cloud_to_image
from grasptoolbox.collision_detection.collision_detector import ModelFreeCollisionDetector

from json_dataset import JsonDataset
from network import Net, NetCCFFF
from tqdm import tqdm
import os
import time
import cv2

g = GraspNet('/home/minghao/graspnet/', camera='kinect', split='test')

height = 0.02
depth_base = 0.02
grasp_depth = 0.02
grasp_width = 0.08
num_sample = 2000
DUMP_DIR = './dump/'
sceneIds = g.getSceneIds()

input_channels = 3
model = Net(input_channels)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = nn.DataParallel(model)
model.to(device)
model.load_state_dict(torch.load('model_new_2.pwf'))
Esempio n. 12
0
__author__ = 'mhgou'
__version__ = '1.0'

# GraspNetAPI example for loading grasp for a scene.
# change the graspnet_root path

####################################################################
graspnet_root = '/home/gmh/Desktop/graspnet' # ROOT PATH FOR GRASPNET
####################################################################

sceneId = 1
from graspnetAPI import GraspNet

# initialize a GraspNet instance  
g = GraspNet(graspnet_root, camera='kinect', split='train')

# show object grasps
g.showObjGrasp(objIds = 0, show=True)

# show 6d poses
g.show6DPose(sceneIds=0, show=True)

# show scene rectangle grasps
# g.showSceneGrasp(sceneIds=0, format = 'rect', show = True, numGrasp = 20)

# show scene 6d grasps(You may need to wait several minutes)
g.showSceneGrasp(sceneIds=0, format = '6d', show = True, numGrasp = 2)