def addInfoText(self, bodyPos, bodyEuler, linearVel, angularVel): # TODO: use replacementId instead of deleting the old views. seems to have memleak issues? if not self.debug: return text = "Distance: {:.1f}m".format( math.sqrt(bodyPos[0]**2 + bodyPos[1]**2)) text2 = "Roll/Pitch: {:.1f} / {:.1f}".format( math.degrees(bodyEuler[0]), math.degrees(bodyEuler[1])) text3 = "Vl: {:.1f} / {:.1f} / {:.1f} Va: {:.1f} / {:.1f} / {:.1f}".format( linearVel[0], linearVel[1], linearVel[2], angularVel[0], angularVel[1], angularVel[2]) x, y = bodyPos[0], bodyPos[1] newDebugInfo = [ p.addUserDebugLine([x, y, 0], [x, y, 1], [0, 1, 0]), p.addUserDebugText(text, [x + 0.03, y, 0.6], textColorRGB=[1, 1, 1], textSize=1.0), p.addUserDebugText(text2, [x + 0.03, y, 0.5], textColorRGB=[1, 1, 1], textSize=1.0), p.addUserDebugText(text3, [x + 0.03, y, 0.4], textColorRGB=[1, 1, 1], textSize=1.0) ] p.addUserDebugLine([-0.3, 0, 0], [0.3, 0, 0], [0, 1, 0], parentObjectUniqueId=self.quadruped, parentLinkIndex=1), p.addUserDebugLine([0, -0.2, 0], [0, 0.2, 0], [0, 1, 0], parentObjectUniqueId=self.quadruped, parentLinkIndex=1), if len(self.oldDebugInfo) > 0: for x in self.oldDebugInfo: p.removeUserDebugItem(x) self.oldDebugInfo = newDebugInfo
def resetBody(self): if len(self.oldDebugInfo) > 0: for x in self.oldDebugInfo: p.removeUserDebugItem(x) p.resetBasePositionAndOrientation(self.quadruped, self.init_position, [0, 0, 0, 1]) p.resetBaseVelocity(self.quadruped, [0, 0, 0], [0, 0, 0])
def go_to_target(self, end_position, end_orientation, precision=0.01): rate = rospy.Rate(100) p.removeUserDebugItem(self.text_handler) self.text_handler = p.addUserDebugText('Target', end_position, textColorRGB=[1, 0, 1]) while not rospy.is_shutdown(): self.pub_msg() self.br.sendTransform(end_position, end_orientation, rospy.Time.now(), 'target', 'world') try: (trans, rot) = self.ls.lookupTransform('robot', 'target', rospy.Time(0)) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): continue yaw_feedback = np.arctan2(trans[1], trans[0]) yaw_out = self.yaw_pid.step(feedback=yaw_feedback, expect=0) x_out = self.pos_pid.step(feedback=trans[0], expect=0) if ((end_position[0] - self.pos_msg.pose.position.x)**2 + (end_position[1] - self.pos_msg.pose.position.y)** 2) < precision: break elif np.abs(x_out) < 5: self.velocity_rotate = 0 else: self.velocity_rotate = yaw_out self.velocity_x = -x_out self.set_motor() rate.sleep()
def remove_user_debug(itemId, physicsClientId): """Remove specific debug parameter. Args: itemId : unique id for debug object physicsClientId: unique id for physics server. """ pb.removeUserDebugItem(itemId, physicsClientId)
def remove_debug(self, name): if name in self.debug_names: uid = self.debug_names[name] p.removeUserDebugItem(itemUniqueId=uid, physicsClientId=self.sim.id) del self.debug_names[name] return True return False
def _resetDebugLine(self): """ INTERNAL METHOD, remove all debug lines """ for i in range(len(self.ray_ids)): pybullet.removeUserDebugItem(self.ray_ids[i], self.physics_client) self.ray_ids = []
def flush(): global remove_user_item_indices global remove_body_indices for idx in remove_user_item_indices: p.removeUserDebugItem(idx) for idx in remove_body_indices: p.removeBody(idx) remove_user_item_indices = []
def _reward(self): self._t += 1 current_base_position = self.aida.GetBasePosition() distToTarget = self.aida.distToTarget() if(np.linalg.norm(distToTarget) < 0.4): self._currentObjective = (self._currentObjective+1)%len(self._commands) self.aida.setTarget(self._commands[self._currentObjective]) if(self._is_render): pybullet.removeUserDebugItem(self._shapeID) self._shapeID = pybullet.addUserDebugLine(self._commands[self._currentObjective]+[0],self._commands[self._currentObjective]+[1],lineColorRGB=[0,1,0.5], lineWidth=10) #if(self._is_render): #self.drawTarget(self.aida._targetPoint) height_reward = np.exp(-((current_base_position[2]-0.7)**2)/self._height_forgiveness) orientation = self.aida.GetBaseOrientation() rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation) local_up = rot_mat[6:] orientation_reward = np.exp(-((np.dot(np.asarray([0, 0, 1]), np.asarray(local_up))-1)**2)/self._orientation_forgiveness) dirTo = distToTarget dirTo /= np.linalg.norm(dirTo) actualDir = rot_mat[:2] actualDir /= np.linalg.norm(actualDir) direction_reward = np.exp(-((np.dot(actualDir, -dirTo)-1)**2)/self._direction_forgiveness) x = np.dot(np.array(self.aida.GetBaseLinearVelocity()[0:2]),dirTo) speed_reward = np.arctan(self._speed_forgiveness*x)/np.pi*2 t = self._t speed=self._ideal_speed offset = np.pi actions_unsync = [np.sin(t * speed + offset-np.pi)*0.3, -0.65+ np.cos(t * speed + offset) *0.2, 0.5] offset = 0 actions = [np.sin(t * speed + offset-np.pi)*0.4, -0.75+ np.cos(t * speed + offset) *0.2, 0.5] targetPos = actions_unsync + actions_unsync + actions + actions realPos = self._get_observation()[0:12] diff = np.linalg.norm(np.array(realPos)-np.array(targetPos)) #rewardDiff = np.exp(-self._mimic_forgiveness*diff) #rewardDiff = 1-diff/self._mimic_forgiveness rewardDiff = (1-np.power((diff/2),0.4))*self._mimic_forgiveness rewardDivider = self._mimic_weight +self._height_weight+self._orientation_weight+self._direction_weight+self._speed_weight reward =( self._mimic_weight/rewardDivider* rewardDiff + self._height_weight/rewardDivider* height_reward + self._orientation_weight/rewardDivider* orientation_reward + self._direction_weight/rewardDivider* direction_reward + self._speed_weight/rewardDivider* speed_reward) if(self._termination()): reward = -10 #pybullet.removeUserDebugItem(self._rewardLineID) #self._rewardLineID = pybullet.addUserDebugLine([0,0,0],[0,0,reward],lineColorRGB=[0.5,1,0], lineWidth=10) self._logRewardTab+=[[height_reward,orientation_reward,direction_reward,speed_reward,rewardDiff,reward]] return reward
def pile_scenario(n, vis, output, debug): objects = YcbObjects('objects/ycb_objects', mod_orn=['ChipsCan', 'MustardBottle', 'TomatoSoupCan'], mod_stiffness=['Strawberry'], exclude=['CrackerBox', 'Hammer']) center_x, center_y = 0.05, -0.52 network_path = 'network/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98' camera = Camera((center_x, center_y, 1.9), (center_x, center_y, 0.785), 0.2, 2.0, (224, 224), 40) env = Environment(camera, vis=vis, debug=debug, finger_length=0.06) generator = GraspGenerator(network_path, camera, 5) for i in range(n): print(f'Trial {i}') straight_fails = 0 objects.shuffle_objects() env.move_away_arm() info = objects.get_n_first_obj_info(5) env.create_pile(info) straight_fails = 0 while len(env.obj_ids) != 0 and straight_fails < 3: env.move_away_arm() env.reset_all_obj() rgb, depth, _ = camera.get_cam_img() grasps, save_name = generator.predict_grasp( rgb, depth, n_grasps=3, show_output=output) for i, grasp in enumerate(grasps): x, y, z, roll, opening_len, obj_height = grasp if vis: debugID = p.addUserDebugLine( [x, y, z], [x, y, 1.2], [0, 0, 1], lineWidth=3) succes_grasp, succes_target = env.grasp( (x, y, z), roll, opening_len, obj_height) if vis: p.removeUserDebugItem(debugID) if succes_target: straight_fails = 0 if save_name is not None: os.rename(save_name + '.png', save_name + f'_SUCCESS_grasp{i}.png') break else: straight_fails += 1 if straight_fails == 3 or len(env.obj_ids) == 0: break env.reset_all_obj() env.remove_all_obj()
def clear_visualization_after(self, prefix, index): name = "{}{}".format(prefix, index) while name in self._debug_ids: uids = self._debug_ids.pop(name) if type(uids) is int: uids = [uids] for id in uids: p.removeUserDebugItem(id) index += 1 name = "{}{}".format(prefix, index)
def isolated_obj_scenario(n, vis, output, debug): objects = YcbObjects( 'objects/ycb_objects', mod_orn=['ChipsCan', 'MustardBottle', 'TomatoSoupCan'], mod_stiffness=['Strawberry']) data = IsolatedObjData(objects.obj_names, n, 'results') center_x, center_y = 0.05, -0.52 network_path = 'network/trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch32/epoch_19_iou_0.98' camera = Camera((center_x, center_y, 1.9), (center_x, center_y, 0.785), 0.2, 2.0, (224, 224), 40) env = Environment(camera, vis=vis, debug=debug) generator = GraspGenerator(network_path, camera, 5) for obj_name in objects.obj_names: print(obj_name) for _ in range(n): path, mod_orn, mod_stiffness = objects.get_obj_info(obj_name) env.load_isolated_obj(path, mod_orn, mod_stiffness) env.move_away_arm() rgb, depth, _ = camera.get_cam_img() grasps, save_name = generator.predict_grasp(rgb, depth, n_grasps=3, show_output=output) for i, grasp in enumerate(grasps): data.add_try(obj_name) x, y, z, roll, opening_len, obj_height = grasp if vis: debug_id = p.addUserDebugLine([x, y, z], [x, y, 1.2], [0, 0, 1]) succes_grasp, succes_target = env.grasp( (x, y, z), roll, opening_len, obj_height) if vis: p.removeUserDebugItem(debug_id) if succes_grasp: data.add_succes_grasp(obj_name) if succes_target: data.add_succes_target(obj_name) if save_name is not None: os.rename(save_name + '.png', save_name + f'_SUCCESS_grasp{i}.png') break env.reset_all_obj() env.remove_all_obj() data.write_json() summarize(data.save_dir, n)
def _reset(self): if self._hard_reset: self._pybullet_client.resetSimulation() self._pybullet_client.setPhysicsEngineParameter( numSolverIterations=int(self._num_bullet_solver_iterations)) self._pybullet_client.setTimeStep(self._time_step) path = aida.getDataPath() + "/urdf/" + self._area +".urdf" self._pybullet_client.loadURDF(path) #self._pybullet_client.loadURDF("%s/plane.urdf" % self._urdf_root) self._pybullet_client.setGravity(0, 0, -10) acc_motor = self._accurate_motor_model_enabled motor_protect = self._motor_overheat_protection self.aida = (aida.Aida( pybullet_client=self._pybullet_client, time_step=self._time_step, motor_velocity_limit=self._motor_velocity_limit, pd_control_enabled=self._pd_control_enabled, accurate_motor_model_enabled=acc_motor, motor_kp=self._motor_kp, motor_kd=self._motor_kd, torque_control_enabled=self._torque_control_enabled, motor_overheat_protection=motor_protect, on_rack=self._on_rack, kd_for_pd_controllers=self._kd_for_pd_controllers)) else: self.aida.Reset(reload_urdf=False) #if self._env_randomizer is not None: # self._env_randomizer.randomize_env(self) self._env_step_counter = 0 self._last_base_position = [0, 0, 0] self._pybullet_client.resetDebugVisualizerCamera( self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0]) if not self._torque_control_enabled: for _ in range(100): if self._pd_control_enabled or self._accurate_motor_model_enabled: self.aida.ApplyAction(-0.5 * np.ones(12)) self._pybullet_client.stepSimulation() ret = self._noisy_observation() self._currentObjective = 0 self.aida.setTarget(self._commands[self._currentObjective]) if(self._is_render): pybullet.removeUserDebugItem(self._shapeID) self._shapeID = pybullet.addUserDebugLine(self._commands[self._currentObjective]+[0],self._commands[self._currentObjective]+[1],lineColorRGB=[0,1,0.5], lineWidth=10) for i in range(50): self._step([0.0, -0.85, 0.5, 0.0, -0.85, 0.5, 0, -0.45, 0.5, 0, -0.45, 0.5]) # init self._t = 0 self._logRewardTab = [] return ret
def show_text_over_head(self, text): """Show some text over robot's head""" camera_pos, _, _, _ = self.get_camera_target_positions() camera_pos = list(camera_pos) camera_pos[1] -= 0.1 camera_pos[2] += 0.2 if self.text_id >= 0: p.removeUserDebugItem(self.text_id) self.text_id = -1 self.text_id = p.addUserDebugText(text, camera_pos, textColorRGB=[0, 0, 0.8], textSize=1, lifeTime=0)
def removeDebugItems(*args): ''' remove one or multiple debug items :param args: int, list, tuple id of the items to be removed ''' if len(args) == 0: pybullet.removeAllUserDebugItems() else: for arg in args: if isinstance(arg, int): pybullet.removeUserDebugItem(arg) elif isinstance(arg, list) or isinstance(arg, tuple): for item in arg: SimEnv.removeDebugItems(item)
def step_simulation(self): """ Hook p.stepSimulation() """ p.stepSimulation() if self.vis: if self.debug: if self.eef_debug_lineID is not None: p.removeUserDebugItem(self.eef_debug_lineID) eef_xyz = p.getLinkState(self.robot_id, self.eef_id)[0:1] end = np.array(eef_xyz[0]) end[2] -= 0.5 self.eef_debug_lineID = p.addUserDebugLine( np.array(eef_xyz[0]), end, [1, 0, 0]) time.sleep(self.SIMULATION_STEP_DELAY)
def reset(self): robot_position, robot_orientation = p.getBasePositionAndOrientation( self.hopper) self.jointIds = [] self.paramIds = [] p.removeAllUserParameters() # Must remove and replace p.restoreState(self.stateId) self.defineHomePosition() for i in range(len(self.foot_points)): p.removeUserDebugItem(self.foot_points[i]) p.removeUserDebugItem(self.body_points[i]) return self.computeObservation()
def run_generate_scene(self): body_id = self.bodies.next() pos, ori = self._get_pos_and_ori(body_id) eulers = p.getEulerFromQuaternion(ori) sliders = [ p.addUserDebugParameter('x', -2, 2, pos[0]), p.addUserDebugParameter('y', -2, 2, pos[1]), p.addUserDebugParameter('z', -2, 2, pos[2]), p.addUserDebugParameter('roll', -180, 180, np.rad2deg(eulers[0])), p.addUserDebugParameter('pitch', -180, 180, np.rad2deg(eulers[1])), p.addUserDebugParameter('yaw', -180, 180, np.rad2deg(eulers[2])) ] qKey = ord('q') pKey = ord(' ') nextKey = 65309L simulate = False stop = False while True: keys = p.getKeyboardEvents() if qKey in keys and keys[qKey] & p.KEY_WAS_TRIGGERED: stop = True break if nextKey in keys and keys[nextKey] & p.KEY_WAS_TRIGGERED: break if pKey in keys and keys[pKey] & p.KEY_WAS_TRIGGERED: pos, ori = self._get_pos_and_ori(body_id) simulate = not simulate self.step() if not simulate: targets_values = [ p.readUserDebugParameter(s_id) for s_id in sliders ] p.resetBasePositionAndOrientation( body_id, targets_values[:3], p.getQuaternionFromEuler(np.deg2rad(targets_values[3:6]))) p.removeAllUserDebugItems() for slider in sliders: p.removeUserDebugItem(slider) return stop
def remove_marker(marker_id): p.removeUserDebugItem(marker_id)
def _debug_links_lines_remove(self): for idd in self._debug_lines_ids: pybullet.removeUserDebugItem(idd)
def clear_transitions(self): name = 't' if name in self._debug_ids: for line in self._debug_ids[name]: p.removeUserDebugItem(line) self._debug_ids[name] = []
minitaur = p.loadURDF("quadruped/minitaur_single_motor.urdf", useFixedBase=True) print(p.getNumJoints(minitaur)) p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=23.2, cameraPitch=-6.6, cameraTargetPosition=[-0.064, .621, -0.2]) motorJointId = 1 p.setJointMotorControl2(minitaur, motorJointId, p.VELOCITY_CONTROL, targetVelocity=100000, force=0) p.resetJointState(minitaur, motorJointId, targetValue=0, targetVelocity=1) angularDampingSlider = p.addUserDebugParameter("angularDamping", 0, 1, 0) jointFrictionForceSlider = p.addUserDebugParameter("jointFrictionForce", 0, 0.1, 0) textId = p.addUserDebugText("jointVelocity=0", [0, 0, -0.2]) p.setRealTimeSimulation(1) while (1): frictionForce = p.readUserDebugParameter(jointFrictionForceSlider) angularDamping = p.readUserDebugParameter(angularDampingSlider) p.setJointMotorControl2(minitaur, motorJointId, p.VELOCITY_CONTROL, targetVelocity=0, force=frictionForce) p.changeDynamics(minitaur, motorJointId, linearDamping=0, angularDamping=angularDamping) time.sleep(0.01) txt = "jointVelocity=" + str(p.getJointState(minitaur, motorJointId)[1]) prevTextId = textId textId = p.addUserDebugText(txt, [0, 0, -0.2]) p.removeUserDebugItem(prevTextId)
def removeUserDebugItem(itemUniqueId: int): p.removeUserDebugItem(itemUniqueId=itemUniqueId)
def apply_push(iters, orn): """ Applies a push to the cube with the corresponding parameters :param iters: the number of iterations the push will last :param orn: the orientation of the gripper in Euler angles, only x and y coordinates specified """ # Reset block every 100 pushes if (t % 100 == 0): p.resetBasePositionAndOrientation(grip_id, [0, 0, 4], p.getQuaternionFromEuler([0, 0, 0])) p.resetBasePositionAndOrientation(cube_id, [2, 2, 1], p.getQuaternionFromEuler([0, 0, random.uniform(0, 2 * PI)])) cube_pos, cube_orn = p.getBasePositionAndOrientation(cube_id) x_disp = math.cos(p.getEulerFromQuaternion(cube_orn)[2]) y_disp = math.sin(p.getEulerFromQuaternion(cube_orn)[2]) # Pre push position for gripper new_grip_x, new_grip_y = cube_pos[0] + DIST * x_disp, cube_pos[1] + DIST * y_disp height = orn[1] * (2 / PI) + 0.5 grip_new_pos = [new_grip_x, new_grip_y, height] # Calculate new position/orientation of the gripper vec = np.array([cube_pos[0] - grip_new_pos[0], cube_pos[1] - grip_new_pos[1], 0]) vec = vec / np.linalg.norm(vec) new_orn = [orn[0], orn[1], (math.atan2(vec[0], -vec[1]) - PI / 2) % (2 * PI)] # Points gripper at cube (z orientation) # Move gripper to new position for i in range(100): eachIter() p.changeConstraint(constraint_id, grip_new_pos, p.getQuaternionFromEuler(new_orn)) for i in range(100): eachIter() p.setJointMotorControlArray(grip_id, range(num_joints), p.POSITION_CONTROL, [0.0] * num_joints) eachIter() # Draw line of desired trajectory start_x = grip_new_pos[0] start_y = grip_new_pos[1] end_x = grip_new_pos[0] - 10 * DIST * x_disp end_y = grip_new_pos[1] - 10 * DIST * y_disp line = p.addUserDebugLine(grip_new_pos, [end_x, end_y, 0], lineColorRGB=(1, 0, 0)) # addUserDebugText # Execute push for i in range(iters): new_grip_pos = [cube_pos[0] + (iters - i) / (iters / DIST) * x_disp, cube_pos[1] + (iters - i) / (iters / DIST) * y_disp, height] p.changeConstraint(constraint_id, new_grip_pos, p.getQuaternionFromEuler(new_orn)) eachIter() for i in range(400): eachIter() # Get result new_cube_pos, new_cube_orn = p.getBasePositionAndOrientation(cube_id) result = push_result([start_x, start_y], [end_x, end_y], [new_cube_pos[0], new_cube_pos[1]]) # print("****************") # print("Result: ", result) p.removeUserDebugItem(line) # Data collection # TODO when actually training, change test_output.txt to real data collection file with open("test_output.txt", "a") as f: f.write(str(iters) + " " + str(list(orn)) + "\n" + str(result) + "\n") # Back up gripper so no collisions for i in range(100): new_grip_pos = [cube_pos[0] + (i) / (100 / DIST) * x_disp, cube_pos[1] + (i) / (100 / DIST) * y_disp, height] p.changeConstraint(constraint_id, new_grip_pos, p.getQuaternionFromEuler(new_orn)) eachIter() for i in range(100): eachIter()
p.POSITION_CONTROL, targetPosition=pos) p.stepSimulation() img = p.getCameraImage(cam_width, cam_height, cam_view, cam_proj, renderer=p.ER_BULLET_HARDWARE_OPENGL, flags=p.ER_NO_SEGMENTATION_MASK) dist = dbo.query() if text is not None: p.removeUserDebugItem(text) if dist > 0.007: dist_txt = f"distance: {dist}" else: dist_txt = f"YAY! {dist}" coll = get_collision(cube) if coll > 0: dist_txt = "HIT - " + dist_txt text = p.addUserDebugText(dist_txt, [0, 0, 0.02], textColorRGB=[1, 1, 0], textSize=2) time.sleep(1. / frequency)
def remove_markers(marker_ids): for i in marker_ids: p.removeUserDebugItem(i)
def set_text(self, text=None): if self.debug_text is not None: p.removeUserDebugItem(self.debug_text) if text is not None and text is not "": self.debug_text = p.addUserDebugText( text, [.1, -.1, .12], textColorRGB=[1, 0, 0], textSize=6)
def erase_pos(line_ids): for line_id in line_ids: p.removeUserDebugItem(line_id)
def remove(self): if not self.axes: return for axis in self.axes: p.removeUserDebugItem(axis)
#Once the video is recorded, you can extract all individual frames using ffmpeg #mkdir frames #ffmpeg -i test.mp4 "frames/out-%03d.png" #by default, PyBullet runs at 240Hz p.connect(p.GUI, options="--width=1920 --height=1080 --mp4=\"test.mp4\" --mp4fps=240") p.setAdditionalSearchPath(pybullet_data.getDataPath()) p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, 1) p.loadURDF("plane.urdf") #in 3 seconds, the object travels about 0.5*g*t^2 meter ~ 45 meter. r2d2 = p.loadURDF("r2d2.urdf", [0, 0, 45]) #disable linear damping p.changeDynamics(r2d2, -1, linearDamping=0) p.setGravity(0, 0, -10) for i in range(3 * 240): txt = "frame " + str(i) item = p.addUserDebugText(txt, [0, 1, 0]) p.stepSimulation() #synchronize the visualizer (rendering frames for the video mp4) with stepSimulation p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, 1) #print("r2d2 vel=", p.getBaseVelocity(r2d2)[0][2]) p.removeUserDebugItem(item) p.disconnect()
t1 = threading.Thread(target=update_debug_parameters) t1.start() t2 = threading.Thread(target=pepper_track_object) t2.start() color_debug = {"accuracy": 0, "class": None, "overlay_id": None} while p.isConnected(): X = engine.screenshot() predicate = classifier.predict([np.array([entry]) for entry in X]) index = np.argmax(predicate) color_debug["accuracy"] = predicate.T[index][0] color_debug["class"] = str(list(Colors)[index]).split('.')[1] if color_debug["overlay_id"] is not None: p.removeUserDebugItem(color_debug["overlay_id"]) color_debug["overlay_id"] = p.addUserDebugText( f"{color_debug['class']} @ {color_debug['accuracy']*100:.1f}%", (0, -0.7, 1.5), textSize=0.3, textColorRGB=(0, 0, 0), textOrientation=p.getQuaternionFromEuler( (np.pi / 2, 0, np.pi / 2)), parentObjectUniqueId=engine._agent.getRobotModel()) print(list(Colors)[np.argmax(predicate)]) t1.join() t2.join() engine.stop()