class GraspActionServer: def __init__(self): self.server = actionlib.SimpleActionServer('object_pick', pickAction, self.execute, False) self.result = pickResult() self.moveit = MoveIt() self.object_size = { "evergreen": [0.215, 0.05, 0.095], "tomatosoup": [0.1, 0.03, 0.13], "basetech": [0.16, 0.045, 0.17], "eraserbox": [0.155, 0.05, 0.055], "usbhub": [0.13, 0.07, 0.075] } self.server.start() def execute(self, goal): print('+++++++++++++++++++++++++++++++++++++++++++++++++') rospy.loginfo('Picking request received') self.moveit.close_fingers() x, y, z = goal.request.position z_max = goal.request.z_max rospy.loginfo("Received z_max from behav: {}".format(z_max)) rospy.loginfo("Received z from behav: {}".format(z)) success = self.moveit.clear_object( x, y, z, z_max, goal.request.yaw, self.object_size[goal.request.object_label]) self.result = pickResult() # Reset object array if success: self.server.set_succeeded(self.result) else: self.moveit.rtb() self.server.set_aborted(self.result)
print "Deg: ", degrees(prediction) if np.array_equal(zero_image, image_depth) or np.array_equal(one_image,image_depth): print 'zero image' # uncomment to show binary depth image cv2.imshow("binary depth image" , image_depth[0]) else: #uncomment to show binary depth image cv2.imshow("binary depth image", image_depth[0]) pass image_depth = image_depth.reshape(1, 28, 28, 1) predictions.append(prediction) image_color = np.asarray([image_color]) output_classes = model_classification.predict([image_color])[0] class_index = np.argmax(output_classes) #uncomment to show images cv2.imshow("color image", image_color[0]) cv2.waitKey(1000) print "With orientation:", str(degrees(np.mean(predictions))) print "Box class:", class_index # Call the grasp function # moveit.grasp(x, y, z, prediction, object_size[class_index]) moveit.clear_object(x, y, z, z_max, prediction, object_size[class_index]) moveit.rtb()