Exemplo n.º 1
0
    def planned_place(self):
        # Place approx. at the center bin
        binId = 0
        drop_pose = get_params_yaml('bin{}_pose'.format(binId))
        drop_pose[1] = drop_pose[1] + 0.03

        # Place depending on the position of the COM of the contact patch
        image_list = self.capture_images()
        img0, aux, contact = crop_contact(self.back_img_list[0],
                                          image_list[0],
                                          gel_id=1,
                                          is_zeros=True)
        img1, aux, contact = crop_contact(self.back_img_list[1],
                                          image_list[1],
                                          gel_id=2,
                                          is_zeros=True)

        img0 = scipy.misc.imresize(img0, (224, 224, 3))
        img1 = scipy.misc.imresize(img1, (224, 224, 3))

        pos_pixel_img0 = get_center_of_mass(img0)
        pos_pixel_img1 = get_center_of_mass(img1)
        pos_pixel_avg = np.mean(np.array([pos_pixel_img0, pos_pixel_img1]),
                                axis=0)
        # Compute actual motion for the robot and check if there is collision (x in pixel frame -> y in hand frame     y in pixel frame -> -z in hand frame)
        pos_world_avg = np.nan_to_num(
            convert_image2world([225 / 2 - pos_pixel_avg[1],
                                 0]))  #np.array([y,-z])
        print('Pixel positions: ', pos_pixel_avg, '. World positions: ',
              pos_world_avg)
        drop_pose[1] = drop_pose[1] + pos_world_avg[0]
        print(drop_pose)
        # Place object using graspingc
        self.rel_pose, self.BoxBody = vision_transform_precise_placing_with_visualization(
            self.bbox_info, viz_pub=self.viz_array_pub, listener=self.listener)
        place(listener=self.listener,
              br=self.br,
              isExecute=self.isExecute,
              binId=binId,
              withPause=self.withPause,
              rel_pose=self.rel_pose,
              BoxBody=self.BoxBody,
              place_pose=drop_pose,
              viz_pub=self.viz_array_pub,
              is_drop=False,
              recorder=self.gdr)
Exemplo n.º 2
0
def flip_image2(data, topic):
    #flip image using opencv
    bridge = CvBridge()
    cv_image = bridge.imgmsg_to_cv2(data, "bgr8")
    # flip_cv_image = cv2.flip(cv_image,1)
    flip_cv_image = cv2.flip(cv_image, 0)
    outImage = flip_cv_image
    # cv2.imwrite('/home/mcube/background_1.png',outImage)
    if rospy.get_param('/is_high_viz', True):
        back_cv_image = cv2.imread('/home/mcube/background_1.png', 1)
        patch_cv_image, initial_img, contact_img = crop_contact(back_cv_image,
                                                                flip_cv_image,
                                                                gel_id=2,
                                                                is_zeros=True)
        COM_pos = get_center_of_mass(contact_img)
        COM_pos = np.nan_to_num(COM_pos)
        print(COM_pos)

        #publish flipped image
        # Convert uint8 to float
        foreground = initial_img
        #foreground = foreground[:-140,55:-70]  #im_crop_grasp = img_grasp[0:-120,135:-135]
        img = contact_img * 255

        # threshold image
        ret, threshed_img = cv2.threshold(
            cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY)
        # find contours and get the external one
        image, contours, hier = cv2.findContours(threshed_img, cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_SIMPLE)

        # with each contour, draw boundingRect in green
        # a minAreaRect in red and
        # a minEnclosingCircle in blue
        biggest_area = 0
        biggest_rect = None
        for c in contours:

            # get the min area rect
            rect = cv2.minAreaRect(c)
            area = float(rect[1][0]) * float(rect[1][1])
            if biggest_area < area:
                biggest_rect = rect
                biggest_area = area
        if biggest_rect:
            box = cv2.boxPoints(biggest_rect)
            # convert all coordinates floating point values to int
            box = np.int0(box)
            # draw a red 'nghien' rectangle
            cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
            cv2.rectangle(
                img,
                (int(biggest_rect[0][0] - 10), int(biggest_rect[0][1] - 10)),
                (int(biggest_rect[0][0] + 10), int(biggest_rect[0][1] + 10)),
                (0, 255, 0), -1)
            cv2.rectangle(img, (int(COM_pos[1] - 10), int(COM_pos[0] - 10)),
                          (int(COM_pos[1] + 10), int(COM_pos[0] + 10)),
                          (0, 0, 255), -1)

            outimg = cv2.drawContours(img, contours, -1, (255, 255, 0), 2)
            # Normalize the alpha mask to keep intensity between 0 and 1
            alpha = 0.8

            #img = cv2.resize(img, (foreground.shape[1], foreground.shape[0]))
            # Add the masked foreground and background.
            outImage = cv2.addWeighted(foreground, alpha, img, 1 - alpha, 0)

            #rotation angle in degree
            rotated = cv2.pyrDown(
                cv2.imread('/home/mcube/vertical_nut.png',
                           cv2.IMREAD_UNCHANGED))
            rows = rotated.shape[1]
            cols = rotated.shape[1]

            if (gripper.getGripperopening() > 0.03):
                if biggest_rect[1][0] > biggest_rect[1][1]:
                    M = cv2.getRotationMatrix2D((cols / 2, rows / 2),
                                                rect[2] + 90, 1)
                else:
                    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rect[2],
                                                1)
                rotated = cv2.warpAffine(rotated, M, (cols, rows))
            else:
                rotated = cv2.imread('/home/mcube/hor2_nut.png',
                                     cv2.IMREAD_UNCHANGED)

            rotated = cv2.resize(rotated,
                                 (outImage.shape[1], outImage.shape[0]))
            outImage = np.concatenate([outImage, rotated], axis=1)

        else:
            outImage = foreground

    flip_image_pub = rospy.Publisher(topic,
                                     sensor_msgs.msg.Image,
                                     queue_size=10)
    flip_image_pub.publish(bridge.cv2_to_imgmsg(outImage, "bgr8"))
Exemplo n.º 3
0
    def use_center_patch(model, image_list, x_validation, rgb_img, out_dict,
                         tcp_pose, binId):
        # Crop and Resize
        img0 = scipy.misc.imresize(image_list[0], (224, 224, 3))
        img1 = scipy.misc.imresize(image_list[1], (224, 224, 3))
        img0 = scipy.misc.imresize(
            crop_gelsight(img0,
                          bottom_edge=40,
                          top_edge=0,
                          left_edge=15,
                          right_edge=18), (224, 224, 3))
        img1 = scipy.misc.imresize(
            crop_gelsight(img1,
                          bottom_edge=25,
                          top_edge=10,
                          left_edge=37,
                          right_edge=25), (224, 224, 3))
        pos_pixel_img0 = get_center_of_mass(img0)
        pos_pixel_img1 = get_center_of_mass(img1)
        pos_pixel_avg = np.mean(np.array([pos_pixel_img0, pos_pixel_img1]),
                                axis=0)  #TODO: check axis

        # Compute actual motion for the robot and check if there is collision
        pos_world_0 = convert_image2world(pos_pixel_img0)  #np.array([y,-z])
        delta_pos_0 = np.array([0, pos_world_0[0], -pos_world_0[1]])
        is_collision_0 = check_collision(tcp_pose, delta_pos_0, self.listener,
                                         self.br, binId)
        pos_world_1 = convert_image2world(pos_pixel_img0)  #np.array([y,-z])
        delta_pos_1 = np.array([0, pos_world_1[0], -pos_world_1[1]])
        is_collision_1 = check_collision(tcp_pose, delta_pos_1, self.listener,
                                         self.br, binId)
        pos_world_avg = convert_image2world(pos_pixel_img0)  #np.array([y,-z])
        delta_pos_avg = np.array([0, pos_world_avg[0], -pos_world_avg[1]])
        is_collision_avg = check_collision(tcp_pose, delta_pos_avg,
                                           self.listener, self.br, binId)

        # Translate in all the possible options
        img0_0 = translate_image(img0, 225 / 2 - pos_pixel_img0[1],
                                 0)  #, borderValue=0#img0.mean(1).mean(0))
        img1_0 = translate_image(img1, 225 / 2 - pos_pixel_img0[1],
                                 0)  #, borderValue=0#img1.mean(1).mean(0))
        img0_1 = translate_image(img0, 225 / 2 - pos_pixel_img1[1],
                                 0)  #, borderValue=0#img0.mean(1).mean(0))
        img1_1 = translate_image(img1, 225 / 2 - pos_pixel_img1[1],
                                 0)  #, borderValue=0#img1.mean(1).mean(0))
        img0_avg = translate_image(img0, 225 / 2 - pos_pixel_avg[1],
                                   0)  #, borderValue=0#img0.mean(1).mean(0))
        img1_avg = translate_image(img1, 225 / 2 - pos_pixel_avg[1],
                                   0)  #, borderValue=0#img1.mean(1).mean(0))

        # Preporcess all the images
        img0_0 = preprocess_image(img0_0)
        img1_0 = preprocess_image(img1_0)
        img0_1 = preprocess_image(img0_1)
        img1_1 = preprocess_image(img1_1)
        img0_avg = preprocess_image(img0_avg)
        img1_avg = preprocess_image(img1_avg)
        img0 = preprocess_image(img0)
        img1 = preprocess_image(img1)

        # Evaluate the score of each image
        score_0 = model.predict(
            [np.expand_dims(img0_0, axis=0),
             np.expand_dims(img1_0, axis=0)])[0][0]
        score_1 = model.predict(
            [np.expand_dims(img0_1, axis=0),
             np.expand_dims(img1_1, axis=0)])[0][0]
        score_avg = model.predict([
            np.expand_dims(img0_avg, axis=0),
            np.expand_dims(img1_avg, axis=0)
        ])[0][0]
        score_inital = model.predict(
            [np.expand_dims(img0, axis=0),
             np.expand_dims(img1, axis=0)])[0][0]
        # Find best way use CoM motion
        best_score = max([
            score_initial, score_0 * (1 - int(is_collision_0)),
            score_1 * (1 - int(is_collision_1)),
            score_avg * (1 - int(is_collision_avg))
        ])

        if best_score == score_0:
            out_dict['images'].append(img0_0)
            out_dict['images2'].append(img1_0)
            out_dict['delta_pos'].append(delta_pos_0)
            print('Moving to the CoM of Gelsight 0')
        elif best_score == score_1:
            out_dict['images'].append(img0_1)
            out_dict['images2'].append(img1_1)
            out_dict['delta_pos'].append(delta_pos_1)
            print('Moving to the CoM of Gelsight 1')
        elif best_score == score_avg:
            out_dict['images'].append(img0_avg)
            out_dict['images2'].append(img1_avg)
            out_dict['delta_pos'].append(delta_pos_avg)
            print('Moving to the CoM of the average')
        else:  #Do nothing
            # TODO: add the patch detection if needed !!!
            img0 = preprocess_image(img0)
            img1 = preprocess_image(img1)
            out_dict['images'].append(img0)
            out_dict['images2'].append(img1)
            out_dict['delta_pos'].append([0, 0, 0])
        return out_dict
Exemplo n.º 4
0
    def use_center_patch(self, model, image_list, out_dict):
        # Crop and Resize
        img0 = scipy.misc.imresize(image_list[0], (224, 224, 3))
        img1 = scipy.misc.imresize(image_list[1], (224, 224, 3))
        # img0 = scipy.misc.imresize(crop_gelsight(img0, bottom_edge = 40, top_edge = 0, left_edge = 15, right_edge = 18), (224,224,3))
        # img1 = scipy.misc.imresize(crop_gelsight(img1,bottom_edge = 25, top_edge = 10, left_edge = 37, right_edge = 25), (224,224,3))
        pos_pixel_img0 = get_center_of_mass(img0)
        pos_pixel_img1 = get_center_of_mass(img1)
        pos_pixel_avg = np.mean(np.array([pos_pixel_img0, pos_pixel_img1]),
                                axis=0)  #TODO: check axis

        #x in pixel frame -> y in hand frame
        #y in pixel frame -> -z in hand frame
        # Compute actual motion for the robot and check if there is collision
        pos_world_0 = convert_image2world([225 / 2 - pos_pixel_img0[1],
                                           0])  #np.array([y,-z])
        delta_pos_0 = np.array([0, -pos_world_0[0], 0])

        pos_world_1 = convert_image2world([225 / 2 - pos_pixel_img1[1],
                                           0])  #np.array([y,-z])
        delta_pos_1 = np.array([0, -pos_world_1[0], 0])

        pos_world_avg = convert_image2world([225 / 2 - pos_pixel_avg[1],
                                             0])  #np.array([y,-z])
        delta_pos_avg = np.array([0, -pos_world_avg[0], 0])
        print('Pixel positions')
        print(pos_pixel_img0)
        print(pos_pixel_img1)
        print(pos_pixel_avg)
        print('World positions')
        print(pos_world_avg)

        out_dict['images_initial'].append(img0)
        out_dict['images2_initial'].append(img1)
        # Translate in all the possible options
        img0_0 = translate_image(img0, 225 / 2 - pos_pixel_img0[1],
                                 0)  #, borderValue=0#img0.mean(1).mean(0))
        img1_0 = translate_image(img1, 225 / 2 - pos_pixel_img0[1],
                                 0)  #, borderValue=0#img1.mean(1).mean(0))
        img0_1 = translate_image(img0, 225 / 2 - pos_pixel_img1[1],
                                 0)  #, borderValue=0#img0.mean(1).mean(0))
        img1_1 = translate_image(img1, 225 / 2 - pos_pixel_img1[1],
                                 0)  #, borderValue=0#img1.mean(1).mean(0))
        img0_avg = translate_image(img0, 225 / 2 - pos_pixel_avg[1],
                                   0)  #, borderValue=0#img0.mean(1).mean(0))
        img1_avg = translate_image(img1, 225 / 2 - pos_pixel_avg[1],
                                   0)  #, borderValue=0#img1.mean(1).mean(0))

        out_dict['images_before'].append(img0_avg)
        out_dict['images2_before'].append(img1_avg)

        # Preporcess all the images
        img0_0 = preprocess_image(img0_0)
        img1_0 = preprocess_image(img1_0)
        img0_1 = preprocess_image(img0_1)
        img1_1 = preprocess_image(img1_1)
        img0_avg = preprocess_image(img0_avg)
        img1_avg = preprocess_image(img1_avg)
        img0 = preprocess_image(img0)
        img1 = preprocess_image(img1)

        out_dict['images'].append(img0_avg)
        out_dict['images2'].append(img1_avg)
        out_dict['delta_pos'].append(delta_pos_avg)
        print('Moving to the CoM of the average')

        best_score = 0
        '''
        # Evaluate the score of each image
        score_0 = model.predict([np.expand_dims(img0_0, axis=0), np.expand_dims(img1_0, axis=0)])[0][0]
        score_1 = model.predict([np.expand_dims(img0_1, axis=0), np.expand_dims(img1_1, axis=0)])[0][0]
        score_avg = model.predict([np.expand_dims(img0_avg, axis=0), np.expand_dims(img1_avg, axis=0)])[0][0]
        score_initial = model.predict([np.expand_dims(img0, axis=0), np.expand_dims(img1, axis=0)])[0][0]
        # Find best way use CoM motion
        best_score = score_avg #max([score_initial, score_0,score_1,score_avg])

        if best_score == score_0:
            out_dict['images'].append(img0_0)
            out_dict['images2'].append(img1_0)
            out_dict['delta_pos'].append(delta_pos_0)
            print('Moving to the CoM of Gelsight 0')
        elif best_score == score_1:
            out_dict['images'].append(img0_1)
            out_dict['images2'].append(img1_1)
            out_dict['delta_pos'].append(delta_pos_1)
            print('Moving to the CoM of Gelsight 1')
        elif best_score == score_avg:
            out_dict['images'].append(img0_avg)
            out_dict['images2'].append(img1_avg)
            out_dict['delta_pos'].append(delta_pos_avg)
            print('Moving to the CoM of the average')
        else:  #Do nothing
            # TODO: add the patch detection if needed !!!
            img0 =preprocess_image(img0)
            img1 =preprocess_image(img1)
            out_dict['images'].append(img0)
            out_dict['images2'].append(img1)
            out_dict['delta_pos'].append([0,0,0])
        '''
        return out_dict, best_score
Exemplo n.º 5
0
    def planned_place(self):
        if len(self.get_objects()) == 1:
            fixed_container = [self.tote_ID]
        else:
            fixed_container = [
                1 - self.tote_ID
            ]  #TODO_M: planner only accepts bins 1,2,3 and names them as 0,1,2

        if self.PlacingPlanner.visionType == 'real':  #Update HM
            self.PlacingPlanner.update_real_height_map(fixed_container[0])

        #Get obj dimensions:
        try:
            asking_for = '/obj/' + self.obj_ID
            aux_obj = rospy.get_param(asking_for)
            obj_dim = aux_obj['dimensions']
        except:
            print('Could not get the object dimensions')
            obj_dim = [0.12, 0.12, 0.12]
        drop_pose = self.PlacingPlanner.place_object_local_best(
            obj_dim=obj_dim, containers=fixed_container
        )  #TODO_M : change placing to return drop_pose
        print('drop_pose', drop_pose)

        #~frank hack: drop pose
        drop_pose = get_params_yaml('bin' + str(fixed_container[0]) + '_pose')
        drop_pose[1] = drop_pose[1] + 0.03

        ## TODO add here a correction depending on the com of the contact patch of the object?
        image_list = self.capture_images()
        image_list[0] = crop_contact(self.background_images[0],
                                     image_list[0],
                                     gel_id=1,
                                     is_zeros=use_COM)
        image_list[1] = crop_contact(self.background_images[1],
                                     image_list[1],
                                     gel_id=2,
                                     is_zeros=use_COM)
        img0 = scipy.misc.imresize(image_list[0], (224, 224, 3))
        img1 = scipy.misc.imresize(image_list[1], (224, 224, 3))

        pos_pixel_img0 = get_center_of_mass(img0)
        pos_pixel_img1 = get_center_of_mass(img1)
        pos_pixel_avg = np.mean(np.array([pos_pixel_img0, pos_pixel_img1]),
                                axis=0)  #TODO: check axis

        #x in pixel frame -> y in hand frame
        #y in pixel frame -> -z in hand frame
        # Compute actual motion for the robot and check if there is collision

        pos_world_avg = convert_image2world([225 / 2 - pos_pixel_avg[1],
                                             0])  #np.array([y,-z])
        delta_pos_avg = np.array([0, -pos_world_avg[0], 0])
        print('Pixel positions: ', pos_pixel_avg, '. World positions: ',
              pos_world_avg)
        drop_pose[1] = drop_pose[1] + pos_world_avg[0]

        # Place object using graspingc
        self.rel_pose, self.BoxBody = vision_transform_precise_placing_with_visualization(
            self.bbox_info, viz_pub=self.viz_array_pub, listener=self.listener)
        place(listener=self.listener,
              br=self.br,
              isExecute=self.isExecute,
              binId=fixed_container[0],
              withPause=self.withPause,
              rel_pose=self.rel_pose,
              BoxBody=self.BoxBody,
              place_pose=drop_pose,
              viz_pub=self.viz_array_pub,
              is_drop=False,
              recorder=self.gdr)