示例#1
0
def generate_data(arg_step=10):
    # argparser
    parser = argparse.ArgumentParser()

    parser.add_argument('--step', type=int, default=arg_step)
    parser.add_argument('--nogui', action='store_true')
    parser.add_argument('--path', type=str, default='./log')
    parser.add_argument('--file_name', type=str, default=None)

    args = parser.parse_args()

    # logger
    if args.file_name is not None:
        mkdir(args.path)
    logger = DataLogger()

    # set robot & camera
    cfg = get_sim_config()

    view_matrix = tuple(invRt(cfg.Camera_pose).T.reshape(16))
    imsize_w, imsize_h = 640, 360
    f = 462
    fov_w = math.atan(imsize_w / 2 / f) * 2 / math.pi * 180
    fov_h = math.atan(imsize_h / 2 / f) * 2 / math.pi * 180

    proj_matrix = p.computeProjectionMatrixFOV(fov=fov_h,
                                               aspect=imsize_w / imsize_h,
                                               nearVal=0.01,
                                               farVal=5.0)

    cam = simCam(imsize=[640, 360],
                 view_matrix=view_matrix,
                 proj_matrix=proj_matrix,
                 z_near=0.01,
                 z_far=5.0)
    robot = SimUR5(center=cfg.Table_center,
                   prepare_height=cfg.Gripper_prepare_height,
                   real_time_sim=False,
                   GUI=not args.nogui)

    obj_num = cfg.Object_num

    # set object
    scene = Sim_Scene(view_matrix=view_matrix,
                      fov_w=fov_w,
                      imsize=[imsize_w, imsize_h],
                      cube_num=cfg.Object_num)

    image_RGB_ini, image_depth_ini, _ = cam.get_data()

    # set property
    friction_discrete, mass_discrete = set_friction_and_mass(
        obj_num, scene.object_ids, cfg)
    set_position_and_color(obj_num, scene.object_ids)

    # always go home first
    robot.open_gripper(blocking=True)
    robot.close_gripper(blocking=True)

    logger.save_attribute('friction', friction_discrete)
    logger.save_attribute('mass', mass_discrete)
    cnt_dict = {'collide': 0, 'push': 0}
    all_proposal = {'collide': [], 'push': []}

    # start action
    for step in range(args.step):
        robot.close_gripper(blocking=True)
        image_RGB, image_depth, _ = cam.get_data()

        mask = get_mask_real(image_RGB=image_RGB,
                             image_RGB_ini=image_RGB_ini,
                             image_depth=image_depth,
                             image_depth_ini=image_depth_ini,
                             Object_num=cfg.Object_num)

        # depth normalization
        tmp_depth = image_depth.copy()
        tmp_depth -= np.min(tmp_depth)
        tmp_depth /= np.max(tmp_depth)

        obj_pos, points_list = [], []
        for index in range(cfg.Object_num):
            cur_pos, points = get_pos((mask == (index + 1)).astype(np.int),
                                      image_RGB, image_depth, cfg.Camera_pose,
                                      cfg.Camera_intrinsic)

            obj_pos.append(cur_pos)
            points_list.append(points)
        scene.reset_coord_prev()

        # generate proposal

        # collide
        for i in range(cfg.Object_num):
            for dir in range(2):
                if can_collide(obj_pos, i, cfg.Collide_region, dir):
                    all_proposal['collide'].append({
                        'action_type': 'collide',
                        'obj_id': i,
                        'dir': dir
                    })

        # push
        for i in range(cfg.Object_num):
            for dir in range(cfg.direction_num):
                if can_push(obj_pos, points_list, i,
                            dir / cfg.direction_num * 2 * np.pi,
                            cfg.Table_center):
                    all_proposal['push'].append({
                        'action_type': 'push',
                        'obj_id': i,
                        'dir': dir,
                    })

        np.random.shuffle(all_proposal['push'])
        np.random.shuffle(all_proposal['collide'])

        proposal_list = all_proposal['push'] + all_proposal['collide']
        if cnt_dict['push'] > cnt_dict['collide']:
            proposal_list = all_proposal['collide'] + all_proposal['push']
        if get_random(0, 1) < 0.2:
            np.random.shuffle(proposal_list)

        # choose action randomly
        if len(proposal_list) == 0:
            print('No Proposal')
            exit()
        action = proposal_list[0]
        cnt_dict[action['action_type']] += 1

        # prepare
        cur_pos = obj_pos[action['obj_id']]
        points = points_list[action['obj_id']]

        logger.save_data(step, 'depth', image_depth)
        logger.save_data(step, 'mask', mask)
        logger.save_data(step, 'action_type', action['action_type'])
        logger.save_data(step, 'obj_id', action['obj_id'])

        if action['action_type'] == 'collide':
            dir = action['dir']
            aux_init = cfg.Auxiliary_initialization[dir]

            # set the auxiliary cylinder
            p.resetBasePositionAndOrientation(
                scene.object_ids[cfg.Object_num],
                posObj=aux_init + [cfg.Auxiliary_initialization_height],
                ornObj=p.getQuaternionFromEuler([0, 0, np.pi / 2]))

            # start action
            robot.primitive_topdown_grasp(
                aux_init + [cfg.Gripper_grasp_height], np.pi / 2)
            robot.primitive_place([
                cur_pos[0], cfg.Gripper_place_y[dir], cfg.Gripper_place_height
            ], np.pi / 2, cfg.Slow_speed)
            robot.close_gripper(True)

            # finish action
            robot.primitive_gohome(speed=cfg.Fast_speed)
            p.resetBasePositionAndOrientation(
                scene.object_ids[cfg.Object_num],
                posObj=cfg.Auxiliary_initialization[0] +
                [cfg.Auxiliary_initialization_height],
                ornObj=cfg.Orientation_initialization)

            # save info
            logger.save_data(step, 'action', [1] + [0, 0, 0] + [0] * 30 + [0] +
                             [cur_pos[0], action['dir']])
            logger.save_data(step, 'direction', action['dir'])

        elif action['action_type'] == 'push':
            angle = action['dir'] / cfg.direction_num * 2 * np.pi
            delta = -1 * np.asarray([
                np.cos(angle), np.sin(angle)
            ]) * (get_dist(points, cur_pos,
                           [np.cos(angle), np.sin(angle)]) + 0.075)
            speed_discrete = np.random.choice(len(cfg.Push_speed))
            push_speed = cfg.Push_speed[speed_discrete]

            start = [
                cur_pos[0] + delta[0], cur_pos[1] + delta[1], cfg.Push_height
            ]

            robot.primitive_push_tilt(start, angle, push_speed)
            robot.primitive_gohome(speed=cfg.Fast_speed)

            # save info
            logger.save_data(step, 'action',
                             [0] + start + get_onehot(action['dir'], 30) +
                             [push_speed] + [0, 0])
            logger.save_data(step, 'direction', action['dir'])
            logger.save_data(step, 'speed', speed_discrete)

        # calc the optical flow
        flowim, depth_im_prev, depth_im_curr = scene.comput_2dflow(
            get_mask=False)
        _, flow = depth_smooth(depth_im_prev, flowim)
        flow = flow * (mask == (action['obj_id'] + 1)).astype(np.float32)
        logger.save_data(step, 'flow', flow)

    if args.file_name is not None:
        joblib.dump((logger.attribute_dict, logger.data_dict),
                    os.path.join(args.path, args.file_name),
                    compress=True)