Пример #1
0
def gentle_move(dir_index, q_initial, pos, gripperOri_list, tip_hand_transform,
                listener, point_counter):
    plans = []
    axis_list = [[0, 1, 0], [-1, 0, 0], [0, -1, 0], [1, 0, 0], [0, 0, -1],
                 [0, 0, 1]]
    axis = axis_list[dir_index]
    if point_counter == 4:
        bin_counter = 0
    elif point_counter == 9:
        bin_counter = 1
    elif point_counter == 14:
        bin_counter = 2
    elif point_counter == 19:
        bin_counter = 3

    for i in range(0, 10000):
        for i in range(0, 3):
            pos[i] = pos[i] + 0.002 * axis[i]
        with Timer('generatePlan'):
            plan, qf, plan_possible = generatePlan(q_initial, pos,
                                                   gripperOri_list,
                                                   tip_hand_transform,
                                                   'fastest')
            if plan_possible:
                plans.append(plan)
                q_initial = qf
            else:
                return '[IK] Error'
        #~execute plan
        with Timer('executePlanForward'):
            executePlanForward(plans, False, False)
        value = raw_input(
            "Enter: Move 2mm closer to wall, s: Stop robot and record value ")
        #save data if s
        if value == 's':
            fpath = os.environ[
                'CODE_BASE'] + '/catkin_ws/src/apc_config/storage_corners.yaml'
            stream = open(fpath, 'r')
            data = yaml.load(stream)
            tcp_pose = get_tcp_pose(listener)
            if dir_index == 4:
                terms = ['z']
            else:
                terms = ['x', 'y']
            for i in range(0, len(terms)):
                if dir_index == 4:
                    param_name = '/bin' + str(bin_counter) + '/' + terms[i]
                    value_tmp = tcp_pose[2] - rospy.get_param(
                        '/gripper/spatula_tip_to_tcp_dist')
                else:
                    param_name = '/corner' + str(
                        point_counter) + '/' + terms[i]
                    value_tmp = tcp_pose[i]
                value = value_tmp.astype(type('float', (float, ), {}))
                data[param_name] = value
            with open(fpath, 'w') as yaml_file:
                yaml_file.write(yaml.dump(data, default_flow_style=False))
            break
        else:
            plans = []
Пример #2
0
    def update_real_height_map(self, b):
        
        HM = self.HeightMap[b]  #this is pointer cop so self.HeightMap[b] will be updated when modifying HM
        # Get height map from vision
        if self.available[b]:
            with Timer('Call passive vision to request new height map for %d' % (b)):
                print('getPassiveVisionEstimate ', 'request', '',  b)
                while True:
                    try:
                        self.passive_vision_state = self.getPassiveVisionEstimate('request', '',  b)
                        break
                    except:
                        print('Keep waiting.')
            M = np.asarray(self.passive_vision_state.height_map)
            if len(M) > 0:
                M = M.reshape((200,300))
                M = M.transpose()
            else:  
                print('There is no HM, we will be using the old one....')
                import pdb; pdb.set_trace()
                return
        else:
            file_name =os.environ['ARCDATA_BASE']+'/graspdata/debug/foreground-top-view.depth.png'
            M = img.imread(file_name)
            M = np.transpose(M)

        for i in range(len(HM)-2):
            for j in range(len(HM[i])-2):
                [x_ini, x_fin, y_ini, y_fin] = [int(floor(i*self.disc/self.vis_disc)), int(ceil((i+1)*self.disc/self.vis_disc)), int(floor(j*self.disc/self.vis_disc)), int(ceil((j+1)*self.disc/self.vis_disc))]
                x_fin = max(x_ini+1, x_fin)
                y_fin = max(y_ini+1, y_fin)
                HM[i][j] = np.max(M[[x_ini,x_fin],:][:,[y_ini,y_fin]])
        
        #Now that you have the right HM, update other maps. 
        self.compute_variance_height_maps(b)
Пример #3
0
    def update_real_height_map(self, b=None, img_path=None):
        if b == None:
            for i in range(3):  #There are only 3 bins and 3 boxes
                self.update_real_height_map(i)
            return
        else:
            assert type(b) == int, 'Can only update HM if b=None or an integer'
            HM = self.HeightMap[
                b]  #this is pointer cop so self.HeightMap[b] will be updated when modifying HM
        # Get height map from vision
        #update foreground-top-view
        if self.available[b]:
            # self.passive_vision_state = self.getPassiveVisionEstimate('',b+1) # change
            # while long(self.passive_vision_state.heightmap_timestamp) < self.time_since_clean[b+1]:
            # print('Vision is not yet updated, clean time according planner: ', self.time_since_clean[b+1], ' according vision: ', self.passive_vision_state.heightmap_timestamp)
            # time.sleep(0.5)
            # self.passive_vision_state = self.getPassiveVisionEstimate('', b+1) #self.get_estimate('', bin_id)
            with Timer('Call passive vision to request new height map for %d' %
                       (b + 1)):
                print('getPassiveVisionEstimate ', 'request', '', b + 1)

                while True:
                    try:
                        self.passive_vision_state = self.getPassiveVisionEstimate(
                            'request', '', b + 1)
                        break
                    except:
                        e.db('Keep waiting.')
            M = np.asarray(self.passive_vision_state.height_map)
            if len(M) > 0:
                M = M.reshape((200, 300))
                M = M.transpose()
            else:
                print('There is no HM, we will be using the old one....')
                return
        else:
            file_name = os.environ[
                'ARCDATA_BASE'] + '/graspdata/debug/foreground-top-view.depth.png'
            M = img.imread(file_name)
            M = np.transpose(M)
        #import ipdb; ipdb.set_trace()

        for i in range(len(HM) - 2):
            for j in range(len(HM[i]) - 2):
                [x_ini, x_fin, y_ini, y_fin] = [
                    int(floor(i * self.disc / self.vis_disc)),
                    int(ceil((i + 1) * self.disc / self.vis_disc)),
                    int(floor(j * self.disc / self.vis_disc)),
                    int(ceil((j + 1) * self.disc / self.vis_disc))
                ]
                x_fin = max(x_ini + 1, x_fin)
                y_fin = max(y_ini + 1, y_fin)
                #print i,',',j,': ',len(HM),',',len(HM[0]),':',x_ini, x_fin, y_ini, y_fin, len(M), len(M[0])
                HM[i][j] = np.max(M[[x_ini, x_fin], :][:, [y_ini, y_fin]])

        #Now that you have the right HM, update other maps.
        self.compute_variance_height_maps(b)
Пример #4
0
def get_filtered_pointcloud(obj_ids, bin_num, kinect_num):
    global _pointcloud2_service_srv
    with Timer('pointcloud2_service'):
        service_name = '/getpc_%d/getpc/get_filtered_pointcloud2_service' % kinect_num
        req = GetObjectPointCloud2Request()
        req.bin_num = bin_num
        req.obj_id = obj_ids[0]  # peterkty: need to pass in a list
        print '\tWaiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print '\tCalling service:', service_name
            response = _pointcloud2_service_srv[kinect_num - 1](req)
            return response.pc2, response.foreground_mask
        except:
            print '\tCalling service:', service_name, 'failed'
            print '\tencounters errors:', traceback.format_exc()
            print '\tDid you call capsen.capsen.init()? Is camera connection good?'
            return None, None
Пример #5
0
    def call_passive_vision(self, bin_id=0):
        print(
            'Calling passive vision system for grasp proposals. The bin considered is: ',
            bin_id)
        with Timer('getPassiveVisionEstimate ' + 'request hm sg %d' % bin_id):
            self.passive_vision_state = self.request_passive_vision_wait(
                bin_id)

        print('Received grasp proposals.')
        self.all_grasp_proposals = np.asarray(
            self.passive_vision_state.grasp_proposals)
        #Publish proposals
        grasp_all_proposals_msgs = Float32MultiArray()
        grasp_all_proposals_msgs.data = self.all_grasp_proposals
        self.grasp_all_proposals_pub.publish(grasp_all_proposals_msgs)
        self.all_grasp_proposals = self.all_grasp_proposals.reshape(
            len(self.all_grasp_proposals) / self.param_grasping,
            self.param_grasping)
        #Sorting all points
        grasp_permutation = self.all_grasp_proposals[:, self.param_grasping -
                                                     1].argsort()[::-1]
        self.all_grasp_proposals = self.all_grasp_proposals[grasp_permutation]
Пример #6
0
#!/usr/bin/env python
import gripper
from ik.helper import Timer

gripper.homing()

gripper.open(speed=50)
with Timer('close50'):
    gripper.close(speed=50)

with Timer('open50'):
    gripper.open(speed=50)

with Timer('close100'):
    gripper.close(speed=100)
with Timer('open100'):
    gripper.open(speed=100)

with Timer('close200'):
    gripper.close(speed=200)
with Timer('open200'):
    gripper.open(speed=200)

with Timer('close400'):
    gripper.close(speed=400)
with Timer('open400'):
    gripper.open(speed=400)
#gripper.grasp()
#gripper.grasp(move_pos=10, move_speed=50)
#gripper.release(speed=50)
Пример #7
0
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br

    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num,
                                                  kinect_num)
    if pc is None:
        return (None, None)

    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections

    # 2. prepare constraints

    bin_cnstr = get_bin_cnstr(
    )[bin_num]  # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []

    tol = 0.9  # larger is more strict
    (trans, rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([1, 0, 0], [bin_cnstr[0], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.2 left
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([-1, 0, 0], [bin_cnstr[1], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.3 back
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 1, 0], [0, bin_cnstr[2], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.4 front
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, -1, 0], [0, bin_cnstr[3], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.5 floor
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.6 top
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, -1], [0, 0, bin_cnstr[5]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.SUPPORTING_PLANE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4] - floor_thick / 2],
                           trans, rot, pc.header.frame_id), tol, bin_num))
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    #
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]

        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
            # string name
            # geometry_msgs/Pose pose
            # float32 score
            # float32[] score_components
            if len(ret.detections) > 0:
                print len(
                    ret.detections
                ), 'ObjectHypothesis returned, max score', ret.detections[
                    0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(
                        pose2list(ret.detections[i].pose), pc.header.frame_id,
                        'map', _tflistener)

                    cap_T_our = get_obj_capsentf(obj_id)  # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(
                        cap_T_our,
                        poselist_capsen_world)  # transform to our desired pose

                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map',
                                                   'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)
Пример #8
0
def _detectObjects(obj_ids, bin_num, kinect_num):
    # return pose, retScore
    global _detect_objects_srv
    global br
    global _tflistener

    print 'In', '_detectObjects', 'obj_ids:', obj_ids, 'bin_num:', bin_num

    # 1. filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud(
        obj_ids, bin_num, kinect_num)  # need to pass in list
    if pc is None or foreground_mask is None:
        return (None, None)

    # 2. prepare constraints

    bin_cnstr = get_bin_cnstr(
    )[bin_num]  # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []

    tol = 0.9  # larger is more strict
    (trans, rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([1, 0, 0], [bin_cnstr[0], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.2 left
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([-1, 0, 0], [bin_cnstr[1], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.3 back
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 1, 0], [0, bin_cnstr[2], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.4 front
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, -1, 0], [0, bin_cnstr[3], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.5 floor
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.6 top
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, -1], [0, 0, bin_cnstr[5]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.SUPPORTING_PLANE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4] + floor_thick * 2],
                           trans, rot, pc.header.frame_id), tol, bin_num))
    #visualizeConstraint(ccnstr[6], pc.header.frame_id)
    #pause()

    # 3. detect using capsen
    with Timer('detect_objects'):
        service_name = '/detection_service/detect_objects'
        req = DetectObjectsRequest()
        req.model_names = obj_ids
        req.constraints = ccnstr
        req.cloud = pc
        req.foreground_mask = foreground_mask

        sum_pt = sum(foreground_mask)
        if allFalse(foreground_mask, sum_pt):
            return (None, None)
        foreground_mask = subsample(foreground_mask, sum_pt)

        # outputfile = '/tmp/foreground_mask'
        # with open(outputfile, 'w') as outfile:
        # json.dump(foreground_mask, outfile)
        # pause()
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]  # hack
        req.find_exact_object_list = True

        print '\tWaiting for service up: ', service_name
        rospy.wait_for_service(service_name)

        #pdb.set_trace()
        try:
            print '\tCalling service:', service_name
            ret = _detect_objects_srv(req)
            # ret.detections is a list of capsen_vision/SceneHypothesis
            # [capsen_vision/SceneHypothesis]:
            # std_msgs/Header header
            # uint32 seq
            # time stamp
            # string frame_id
            # capsen_vision/ObjectHypothesis[] objects
            # string name
            # geometry_msgs/Pose pose
            # geometry_msgs/Point position
            # float64 x
            # float64 y
            # float64 z
            # geometry_msgs/Quaternion orientation
            # float64 x
            # float64 y
            # float64 z
            # float64 w
            # float32 score
            # float32[] score_components
            # float32[2] errors
            # float32 score

            if len(ret.detections) > 0:
                print '\t', len(
                    ret.detections
                ), 'SceneHypothesis returned, max score', ret.detections[
                    0].score
                #print ret.detections
                for i in range(len(ret.detections)):
                    scene = ret.detections[i]
                    nobj = len(scene.objects)

                    scene_desired = transformObjectsFromCapsenToDesiredFrame(
                        scene, pc.header.frame_id)
                    if allObjectsInsideBin(scene_desired, bin_num):
                        return (scene_desired.objects, scene_desired.score)
                    #else:
                    #print 'reject scene hypo', i, 'because one object of it is outside the target bin'
                print '\tNo SceneHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print '\tNo SceneHypothesis returned'
                return (None, None)
        except:
            print '\tCalling service:', service_name, 'failed'
            print '\tencounters errors:', traceback.format_exc()
            return (None, None)
Пример #9
0
import sys
camids = ['616205001219']
times = 1

camids = [camid for camid, _ in cams.iteritems()]
if len(sys.argv) == 2:
    camids = [sys.argv[1]]
    times = 100

for i in xrange(times):
    for camid in camids:
        rospy.loginfo('capturing %s  time %d', camid, i + 1)
        cv2.namedWindow("imgrgb")
        cv2.namedWindow("imgdepth")
        cv2.moveWindow("imgdepth", 0, 500)
        with Timer():
            res = capture(camid)

        try:
            rows = 480
            cols = 640
            value = struct.unpack('B' * rows * cols * 3, res.point_cloud_rgb)
            img = np.array(value, dtype=np.uint8)
            #img = img / 255.0
            img = img.reshape((rows, cols, 3))
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            cv2.imshow('imgrgb', img)

            depthimg = np.array(res.point_cloud_xyz)
            depthimg = depthimg.reshape((rows, cols, 3))
            cv2.imshow('imgdepth', depthimg[:, :, 2])