Beispiel #1
0
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
from onmt.train_utils.trainer import XETrainer
from onmt.train_utils.fp16_trainer import FP16XETrainer
from onmt.train_utils.multiGPUtrainer import MultiGPUXETrainer
from onmt.modules.Loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.ModelConstructor import build_model, init_model_parameters

parser = argparse.ArgumentParser(description='train.py')
onmt.Markdown.add_md_help_argument(parser)

from options import make_parser
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)

opt = parser.parse_args()

print(opt)

# An ugly hack to have weight norm on / off
onmt.Constants.weight_norm = opt.weight_norm
onmt.Constants.checkpointing = opt.checkpointing
onmt.Constants.max_position_length = opt.max_position_length

# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
    onmt.Constants.static = True

if torch.cuda.is_available() and not opt.gpus:
Beispiel #2
0
def main():
    args = make_parser().parse_args()
    pc = np.load(os.path.join(args.data_path, 'pc.npy'))  # scene point cloud
    print('load %s successfully' % os.path.join(args.data_path, 'pc.npy'))
    if args.method == 'GPNet':
        grasp_poses, grasp_scores = load_pose_GPNet(
            os.path.join(args.data_path, args.pose_file))
    elif args.method == '6dof-graspnet':
        grasp_poses, grasp_scores = load_pose_6dofgraspnet(
            os.path.join(args.data_path, args.pose_file))
    elif args.method == 'graspnet_baseline':
        if args.vis_method == 'mayavi':
            grasp_poses, grasp_scores = load_pose_graspnet_baseline(
                os.path.join(args.data_path, args.pose_file))
        else:
            grasp_poses, grasp_scores = None, None
    else:
        raise NotImplementedError
    print('load %s successfully' %
          os.path.join(args.data_path, args.pose_file))

    # if grasp_poses is None, it means you will visualize the point cloud data
    if args.vis_method == 'mayavi':
        image = cv2.imread(os.path.join(args.data_path, 'rgb.jpg'))
        b, g, r = cv2.split(image)
        image = cv2.merge([r, g, b])
        pc_colors = image.copy()
        pc_colors = np.reshape(pc_colors, [-1, 3])
        mlab.figure(bgcolor=(1, 1, 1))
        draw_scene_mayavi(
            pc=pc,
            pc_color=pc_colors,
            grasps=grasp_poses,
            grasp_scores=grasp_scores,
        )
        mlab.show()
    elif args.vis_method == 'open3d':
        from PIL import Image
        import open3d as o3d
        from graspnetAPI import GraspGroup

        color = np.array(Image.open(os.path.join(args.data_path, 'rgb.jpg')),
                         dtype=np.float32) / 255.0
        color = color.reshape((-1, 3))
        gg_array = []
        if args.method == 'graspnet_baseline':
            gg_array = np.load(os.path.join(args.data_path, args.pose_file))
        else:
            offset_along_approach_vec = -0.02
            for index, pose in enumerate(grasp_poses):
                center = pose[0] + pos_offset_along_approach_vec(
                    pose[2], offset_along_approach_vec)
                rot = ut.to_rot_mat(pose[1])
                te_1 = ut.euler2rot(np.array([0, -np.pi / 2, 0]))
                te_2 = ut.euler2rot(np.array([0, 0, np.pi / 2]))
                final_rot = np.dot(np.dot(rot, te_2), te_1)
                # grasp pose format is recorded in https://graspnetapi.readthedocs.io/en/latest/grasp_format.html
                te = [1, 0.065, 0.02, 0.02] if grasp_scores is None else [
                    grasp_scores[index], 0.1, 0.02, 0.02
                ]
                te = te + list(final_rot.flatten()) + list(center) + [-1]
                gg_array.append(te)
        gg = GraspGroup(np.array(gg_array))
        gg.nms()
        gg.sort_by_score()  # red for high score, blue for low
        max_grasps = 30
        if gg.__len__() > max_grasps:
            print('too many grasps, only keep {} grasps randomly!!!'.format(
                max_grasps))
            gg = gg.random_sample(max_grasps)
        grippers = gg.to_open3d_geometry_list()
        cloud = o3d.geometry.PointCloud()
        cloud.points = o3d.utility.Vector3dVector(pc.astype(np.float32))
        cloud.colors = o3d.utility.Vector3dVector(color.astype(np.float32))
        o3d.visualization.draw_geometries([cloud, *grippers])
    else:
        raise NotImplementedError
Beispiel #3
0
def main():
    """
      in current dir: test_grasp, we will save the whole info(depth, point cloud, info.npy, info.txt) to a
    separate folder, named data_x automatically, x is the number of folders in the current dir
    """
    # load robot and object, set camera parameter
    args = make_parser().parse_args()
    robot_type = parse_robot_type(args.robot_arm)
    robot = ar.Robot(robot_type)
    robot.arm.go_home()
    robot.pb_client.load_urdf('plane.urdf')

    if args.multi_obj:
        _, poses = load_multi_object(robot)
    else:
        load_single_object(robot, args)
    time.sleep(1.3)

    robot.cam.setup_camera(focus_pt=args.cam_focus_pt,
                           camera_pos=args.cam_pos,
                           height=args.cam_height,
                           width=args.cam_width)
    # in current dir: test_grasp, we will save the whole info(depth, point cloud, info.npy, info.txt) to a
    # separate folder, named data_x, x is the number of folders in the current dir
    current_dir = os.path.dirname(os.path.abspath(__file__))
    folder_num = len([
        lists for lists in os.listdir(current_dir)
        if os.path.isdir(os.path.join(current_dir, lists))
    ]) - 1
    save_path = os.path.join(current_dir, 'data_' + str(folder_num))
    os.mkdir(save_path)
    # save camera info to camera_info.npy, in addition, save object and camera info to info.txt
    info = {
        'cam_intrinsic': robot.cam.cam_int_mat,
        'cam_view': robot.cam.view_matrix,
        'cam_external': robot.cam.cam_int_mat,
        'cam_pos': args.cam_pos,
        'cam_focus_pt': args.cam_focus_pt,
        'cam_height': args.cam_height,
        'cam_width': args.cam_width
    }
    if args.multi_obj:
        info['object'] = poses
    else:
        info['object'] = [args.object_pos, args.object_ori]

    np.save(os.path.join(save_path, 'info.npy'), info)

    with open(os.path.join(save_path, 'info.txt'), 'w') as ci:
        if args.multi_obj:
            for i, pose in enumerate(poses):
                ci.write('object_{} position:\n'.format(i))
                ci.write(str(pose[0]))
                ci.write('\nobject_{} rotation:\n'.format(i))
                ci.write(str(pose[1]))
        else:
            ci.write('object position: \n')
            ci.write(str(args.object_pos) + '\n')
            ci.write('object rotation: \n')
            ci.write(str(args.object_ori) + '\n')
        ci.write('\ncamera position: \n')
        ci.write(str(args.cam_pos) + '\n')
        ci.write('camera focus point: \n')
        ci.write(str(args.cam_focus_pt) + '\n')
        ci.write('camera resolution: \n')
        ci.write(str(args.cam_height) + ' * ' + str(args.cam_width) + '\n')
        ci.write('camera view matrix: \n')
        ci.write(str(robot.cam.view_matrix) + '\n')
        ci.write('camera intrinsic matrix: ' + '\n')
        ci.write(str(robot.cam.cam_int_mat) + '\n')
        ci.write('camera external matrix: ' + '\n')
        ci.write(str(robot.cam.cam_ext_mat))
        ci.flush()

    # save rgb. depth, segmentation, point cloud
    img, depth, seg = robot.cam.get_images(get_rgb=True,
                                           get_depth=True,
                                           get_seg=True)
    print('image shape: {}, depth shape: {}'.format(img.shape, depth.shape))
    cv2.imwrite(os.path.join(save_path, 'depth.jpg'),
                encode_depth_to_image(depth))
    b, g, r = cv2.split(img)
    img = cv2.merge([r, g, b])
    cv2.imwrite(os.path.join(save_path, 'rgb.jpg'), img)
    ar.log_info(f'Depth image min:{depth.min()} m, max: {depth.max()} m.')
    np.save(os.path.join(save_path, 'depth.npy'), depth)
    np.save(os.path.join(save_path, 'seg.npy'), seg)  # segmentation
    # get point cloud data in the world frame
    pts = robot.cam.depth_to_point_cloud(depth, in_world=True)
    ar.log_info('point cloud shape: {}'.format(pts.shape))
    np.save(os.path.join(save_path, 'pc.npy'), pts)
    print('saved %s successfully' % os.path.join(save_path, 'pc.npy'))

    input()
Beispiel #4
0
                      robot_category=args.robot_arm,
                      control_mode=args.control_mode,
                      move_up=True)
        time.sleep(3)
        robot.pb_client.reset_body(obj_id,
                                   base_pos=args.object_pos,
                                   base_quat=ut.euler2quat(args.object_ori))
        time.sleep(0.8)
    print('all done!!!!')
    input()


if __name__ == '__main__':
    # when set orientation is euler angle [0, 0, 0], robot's gripper is vertical upward, the gripper plane is x-z plane
    # multi object grasping is complex, including path planning etc, you can try manual_control()
    args = make_parser().parse_args()
    robot_type = parse_robot_type(args.robot_arm)

    robot = ar.Robot(robot_type)
    robot.arm.go_home()
    robot.pb_client.load_urdf('plane.urdf')

    #  ***object pose must be equal to the object pose you set when you run get_data.py***
    #  because grasp poses are generated for the object when you run get_data.py.
    #  if not, you need to add the relative transformation manually in auto_control() function
    if args.multi_obj:
        objects_id = load_multi_object(
            robot)  # object pose in load_multi_object() function
    else:
        object_id = load_single_object(robot,
                                       args)  # object pose in options.py
Beispiel #5
0
class AbstractCommand(binding.Component):
    """Simple, commandline-driven process"""

    protocols.advise(instancesProvide=[ICmdLineApp],
                     classProvides=[ICmdLineAppFactory])

    argv = binding.Obtain(ARGV, offerAs=[ARGV])
    stdin = binding.Obtain(STDIN, offerAs=[STDIN])
    stdout = binding.Obtain(STDOUT, offerAs=[STDOUT])
    stderr = binding.Obtain(STDERR, offerAs=[STDERR])
    environ = binding.Obtain(ENVIRON, offerAs=[ENVIRON])

    def _run(self):
        """Override this in subclasses to implement desired behavior"""
        raise NotImplementedError

    usage = """\
Either this is an abstract command class, or somebody forgot to
define a usage message for their subclass.
"""

    option_parser = binding.Make(lambda self: options.make_parser(self))

    parsed_args = binding.Make(
        lambda self: self.option_parser.parse_args(self.argv[1:], self)[1])

    def showHelp(self):
        """Display usage message on stderr"""
        print >> self.stderr, self.usage
        print >> self.stderr, self.option_parser.format_help()
        return 0

    def isInteractive(self):
        """True if 'stdin' is a terminal"""
        try:
            isatty = self.stdin.isatty
        except AttributeError:
            return False
        else:
            return isatty()

    isInteractive = binding.Make(isInteractive)

    def getSubcommand(self, executable, **kw):
        """Return a 'ICmdLineApp' with our environment as its defaults

        Any 'IExecutable' may be supplied as the basis for creating
        the 'ICmdLineApp'.  'NotImplementedError' is raised if the
        supplied object is not an 'IExecutable'.
        """

        factory = adapt(executable, ICmdLineAppFactory)

        for k in 'argv stdin stdout stderr environ'.split():
            if k not in kw:
                kw[k] = getattr(self, k)

        if 'parentComponent' not in kw:
            kw['parentComponent'] = self.getCommandParent()

        return factory(**kw)

    def _invocationError(self, msg):
        """Write msg and usage to stderr if interactive, otherwise re-raise"""

        if self.isInteractive:
            self.showHelp()
            print >> self.stderr, '\n%s: %s\n' % (self.argv[0], msg)
            # XXX output last traceback frame?
            return 1  # exit errorlevel
        else:
            raise  # XXX is this really appropriate?

    def getCommandParent(self):
        """Get or create a component to be used as the subcommand's parent"""
        # Default is to use the interpreter as the parent
        return self

    def run(self):
        """Run the command"""

        try:
            return self._run() or 0

        except SystemExit, v:
            return v.args and v.args[0] or 0

        except InvocationError, msg:
            return self._invocationError(msg)