def manual_control(args): ''' You can input control orders on command line to control the robot arm, including pose, delta pos, delta ori, home, etc. you can also try yumi/sim/IKC.py. Args: args: ''' open_flag = False robot_control_type = 'ur5e' if args.robot_arm == 'franka' else args.robot_arm pos, quat, rot, euler = dispatch_control_order(robot_control_type + ':get_pose') ar.log_info('Right arm end effector position: ' + str(pos) + ' ' + str(euler)) while 1: mov = input() # input control order if mov == ' ': # open or close the gripper open_flag = not open_flag if open_flag: dispatch_control_order(robot_control_type + ':open') else: dispatch_control_order(robot_control_type + ':close') elif mov == 'pose': # input the pos and ori, then control robot to the target pose temp = input().strip() # example input: 0 0 0 print(temp + '\n') center = np.array([float(p) for p in temp.split()]) temp = input().strip() print(temp + '\n') quaternion = [float(p) for p in temp.split()] print('center: {}, ori: {}'.format(center, quaternion)) dispatch_control_order(robot_control_type + ':set_pose', center, quaternion) elif mov == 'home': dispatch_control_order(robot_control_type + ':home') elif mov == 'dp': # input the delta of pos, then control robot dx = float(input().strip()) dy = float(input().strip()) dz = float(input().strip()) target_pos = [pos[0] + dx, pos[1] + dy, pos[2] + dz] print('target pos: ', target_pos) dispatch_control_order(robot_control_type + ':set_pose', target_pos) elif mov == 'do': # input the delta of ori in euler angels, then control robot dx = float(input().strip()) dy = float(input().strip()) dz = float(input().strip()) target_ori = [euler[0] + dx, euler[1] + dy, euler[2] + dz] print('target ori: ', target_ori) dispatch_control_order(robot_control_type + ':set_pose', pos=None, ori=target_ori) else: pass pos, quat, rot, euler = dispatch_control_order(robot_control_type + ':get_pose') ar.log_info('Right arm end effector position: ' + str(pos) + ' ' + str(euler) + ' ' + str(quat))
def __init__(self, connection_mode=None, realtime=False, opengl_render=True, options=''): self._in_realtime_mode = realtime self.opengl_render = opengl_render self._realtime_lock = threading.RLock() if connection_mode is None: self._client = p.connect(p.SHARED_MEMORY, options=options) if self._client >= 0: return else: connection_mode = p.DIRECT self._client = p.connect(connection_mode, options=options) is_linux = platform.system() == 'Linux' if connection_mode == p.DIRECT and is_linux and opengl_render: airobot.log_info('Load in OpenGL!') # # using the eglRendererPlugin (hardware OpenGL acceleration) egl = pkgutil.get_loader('eglRenderer') if egl: p.loadPlugin(egl.get_filename(), "_eglRendererPlugin", physicsClientId=self._client) else: p.loadPlugin("eglRendererPlugin", physicsClientId=self._client) self._gui_mode = connection_mode == p.GUI p.setGravity(0, 0, GRAVITY_CONST, physicsClientId=self._client) self.set_step_sim(not self._in_realtime_mode)
def main(): """ This function demonstrates how to properly save depth images in python. """ robot = Robot('ur5e', pb_cfg={'gui': False, 'opengl_render': False}) focus_pt = [0, 0, 1] robot.cam.setup_camera(focus_pt=focus_pt, dist=3, yaw=90, pitch=0, roll=0) robot.arm.go_home() rgb, depth, seg = robot.cam.get_images(get_rgb=True, get_depth=True, get_seg=True) log_info(f'Depth image min:{depth.min()} m, max: {depth.max()} m.') # we first convert the depth image into CV_16U format. # The maximum value for CV_16U is 65536. # Since png files only keep the integer values, we need to scale the # depth images to keep enough resolution. scale = 1000.0 sdepth = depth * scale img_name = 'depth.png' # by converting the image data to CV_16U, the resolution of the # depth image data becomes 1 / scale m (0.001m in this case). cv2.imwrite(img_name, sdepth.astype(np.uint16)) re_depth = cv2.imread(img_name, cv2.IMREAD_UNCHANGED) sre_depth = re_depth / scale log_info(f'Saved depth image min:{sre_depth.min()} m, ' f'max: {sre_depth.max()} m.')
def main(): """ This function demonstrates how to setup camera and get rgb/depth images and segmentation mask. """ robot = Robot('ur5e', pb_cfg={'gui': True, 'opengl_render': False}) focus_pt = [0, 0, 1] # ([x, y, z]) robot.cam.setup_camera(focus_pt=focus_pt, dist=3, yaw=90, pitch=0, roll=0) robot.arm.go_home() rgb, depth, seg = robot.cam.get_images(get_rgb=True, get_depth=True, get_seg=True) plt.figure() plt.imshow(rgb) plt.figure() plt.imshow(depth * 25, cmap='gray', vmin=0, vmax=255) log_info('Maximum Depth (m): %f' % np.max(depth)) log_info('Minimum Depth (m): %f' % np.min(depth)) plt.figure() plt.imshow(seg) plt.show()
def poke(self, save_dir='data/image'): # setup directories if not os.path.exists(save_dir): os.makedirs(save_dir) save_depth_dir = save_dir + '_depth' if not os.path.exists(save_depth_dir): os.makedirs(save_depth_dir) # poke loop self.go_home() self.reset_box() poke_index = 0 while True: obj_pos, obj_quat, _, lin_vel = self.get_box_pose() obj_x, obj_y, obj_z = obj_pos # check if cube is on table and still if obj_z < self.table_surface_height or lin_vel[0] > 1e-3: print("object height: ", obj_z, "object x linear velocity: ", lin_vel[0]) self.reset_box() continue # log images rgb, dep = self.get_img() # rgb 0-255 uint8; dep float32 img = self.resize_rgb(rgb) # img 0-255 float32 cv2.imwrite(save_dir + '/' + str(poke_index) + '.png', cv2.cvtColor(img, cv2.COLOR_RGB2BGR)) # shape (100,200,3) np.save(save_dir + '_depth/' + str(poke_index), dep) # shape (480,640) # generate random poke through box center poke_ang, poke_len, start_x, start_y, end_x, end_y = self.sample_poke(obj_x, obj_y) # calc poke and obj locations in image pixel space start_r, start_c = self.xyz2pixel(start_x, start_y) end_r, end_c = self.xyz2pixel(end_x, end_y) obj_r, obj_c = self.xyz2pixel(obj_x, obj_y) # execute poke start_jpos, end_jpos = self.execute_poke(start_x, start_y, end_x, end_y) js1, js2, js3, js4, js5, js6 = start_jpos je1, je2, je3, je4, je5, je6 = end_jpos # log poke with open(save_dir + '.txt', 'a') as file: file.write('%d %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n' % \ (poke_index, start_x, start_y, end_x, end_y, obj_x, obj_y, obj_quat[0], obj_quat[1], obj_quat[2], obj_quat[3], js1, js2, js3, js4, js5, js6, je1, je2, je3, je4, je5, je6, start_r, start_c, end_r, end_c, obj_r, obj_c, poke_ang, poke_len)) # log num of pokes if poke_index % 1000 == 0: log_info('number of pokes: %sk' % str(poke_index/1000)) poke_index += 1
def main(): """ Move the robot end effector to the desired pose. """ robot = Robot('ur5e') robot.arm.go_home() tgt_pos = [0.45, 0.2, 1.3] robot.arm.set_ee_pose(tgt_pos) # sleep statement is not necessary time.sleep(1) tgt_pos = [0.6, -0.15, 1.2] tgt_euler = [1.57, 0.0, 0.0] robot.arm.set_ee_pose(tgt_pos, tgt_euler) time.sleep(1) pos, quat, rot, euler = robot.arm.get_ee_pose() log_info('End effector pose:') log_info('Position:') log_info(pos) log_info('Euler angles:') log_info(euler)
def main(): env = URRobotGym(gui=False) ob = env.reset() log_info('\nThe action is a 5-dim vector A. \n' 'The range of each element is [-1, 1].\n' 'A[0] is dx, A[1] is dy, A[2] is dz;\n' 'A[3] is the delta of the gripper orientation;\n' 'A[4] is the gripper opening angle, \n' '-1 means opening the gripper, \n' '1 means closing the gripper.\n') image = plt.imshow(ob, interpolation='none', animated=True, label="cam") ax = plt.gca() for j in range(2): for i in range(10): ob, reward, done, info = env.step([1, 0, 0, 0, -1]) image.set_data(ob) ax.plot([0]) plt.pause(0.05) for i in range(10): ob, reward, done, info = env.step([-1, 0, 0, 0, -1]) image.set_data(ob) ax.plot([0]) plt.pause(0.05)
def poke(save_dir='data/image'): if not os.path.exists(save_dir): os.makedirs(save_dir) save_depth_dir = save_dir + '_depth' if not os.path.exists(save_depth_dir): os.makedirs(save_depth_dir) index = 0 curr_img, curr_dep = get_img() while True: obj_pos, obj_quat, lin_vel, _ = get_body_state(box_id) obj_ang = quat2euler(obj_quat)[2] # -pi ~ pi obj_x, obj_y, obj_z = obj_pos # check if cube is on table if obj_z < box_z / 2.0 or lin_vel[0] > 1e-3: # important that box is still print(obj_z, lin_vel[0]) reset_body(box_id, box_pos) continue while True: # choose random poke point on the object poke_x = np.random.random()*box_size + obj_x - box_size/2.0 poke_y = np.random.random()*box_size + obj_y - box_size/2.0 # choose poke angle along the z axis poke_ang = np.random.random() * np.pi * 2 - np.pi # choose poke length poke_len = np.random.random() * poke_len_std + poke_len_mean # calc starting poke location and ending poke loaction start_x = poke_x - poke_len * np.cos(poke_ang) start_y = poke_y - poke_len * np.sin(poke_ang) end_x = poke_x + poke_len * np.cos(poke_ang) end_y = poke_y + poke_len * np.sin(poke_ang) if end_x > workspace_min_x and end_x < workspace_max_x \ and end_y > workspace_min_y and end_y < workspace_max_y: break robot.arm.move_ee_xyz([start_x-home[0], start_y-home[1], 0], 0.015) robot.arm.move_ee_xyz([0, 0, min_height-rest_height], 0.015) js1, js2, js3, js4, js5, js6 = robot.arm.get_jpos() robot.arm.move_ee_xyz([end_x-start_x, end_y-start_y, 0], 0.015) je1, je2, je3, je4, je5, je6 = robot.arm.get_jpos() # important that we use move_ee_xyz, as set_ee_pose can throw obj in motion robot.arm.move_ee_xyz([0, 0, rest_height-min_height], 0.015) # move arm away from camera view go_home() # important to have one set_ee_pose every loop to reset accu errors next_img, next_dep = get_img() # calc poke and obj locations in image pixel space start_r, start_c = get_pixel(start_x, start_y) end_r, end_c = get_pixel(end_x, end_y) obj_r, obj_c = get_pixel(obj_x, obj_y) with open(save_dir + '.txt', 'a') as file: file.write('%d %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n' % \ (index, start_x, start_y, end_x, end_y, obj_x, obj_y, obj_quat[0], obj_quat[1], obj_quat[2], obj_quat[3], js1, js2, js3, js4, js5, js6, je1, je2, je3, je4, je5, je6, start_r, start_c, end_r, end_c, obj_r, obj_c)) cv2.imwrite(save_dir + '/' + str(index) +'.png', cv2.cvtColor(curr_img, cv2.COLOR_RGB2BGR)) # cv2.imwrite(save_dir + '_depth/' + str(index) +'.png', curr_dep) np.save(save_dir + '_depth/' + str(index), curr_dep) curr_img = next_img curr_dep = next_dep if index % 1000 == 0: ar.log_info('number of pokes: %sk' % str(index/1000)) index += 1
def main(): """ This function demonstrates how to move the robot arm to the desired joint positions. """ robot = ar.Robot('yumi_grippers') robot.arm.go_home() robot.arm.right_arm.set_jpos([0.5, -1.0, -1.0, -0.0, -0.2, 1.0, -1.57]) time.sleep(3) ar.log_info('Right arm joint positions: ') ar.log_info(np.round(robot.arm.right_arm.get_jpos(), 6)) ar.log_info('Both arm joint positions: ') ar.log_info(np.round(robot.arm.get_jpos(), 6)) ar.log_info('\n') robot.arm.right_arm.set_jpos(0.5, 'yumi_joint_3_r') time.sleep(3) robot.arm.left_arm.eetool.activate() robot.arm.left_arm.eetool.open() time.sleep(3) robot.arm.left_arm.eetool.set_pos(0.005) ar.log_info('Left gripper position: ') ar.log_info(np.round(robot.arm.left_arm.eetool.get_pos(), 6)) ar.log_info('\n') time.sleep(3) robot.arm.left_arm.eetool.close() time.sleep(3) ar.log_info('Left gripper position at close: ') ar.log_info(np.round(robot.arm.left_arm.eetool.get_pos(), 6)) ar.log_info('\n')
def main(): """ This function demonstrates how to get joint information such as joint positions/velocities/torques. """ robot = Robot('ur5e_2f140', pb_cfg={'gui': True}) robot.arm.go_home() log_info('\nJoint positions for all actuated joints:') jpos = robot.arm.get_jpos() log_info(np.round(jpos, decimals=3)) joint = 'shoulder_pan_joint' log_info('Joint [%s] position: %.3f' % (joint, robot.arm.get_jpos('shoulder_pan_joint'))) log_info('Joint velocities:') jvel = robot.arm.get_jvel() log_info(np.round(jvel, decimals=3)) log_info('Joint torques:') jtorq = robot.arm.get_jtorq() log_info(np.round(jtorq, decimals=3)) robot.arm.eetool.close() log_info('Gripper position (close): %.3f' % robot.arm.eetool.get_pos()) robot.arm.eetool.open() log_info('Gripper position (open): %.3f' % robot.arm.eetool.get_pos())
def main(): """ This function demonstrates how to move the robot arm to the desired joint positions. """ robot = ar.Robot('yumi_palms') robot.arm.go_home() robot.arm.right_arm.set_jpos([0.5, -1.0, -1.0, -0.0, -0.2, 1.0, -1.57]) time.sleep(3) ar.log_info('Right arm joint positions: ') ar.log_info(np.round(robot.arm.right_arm.get_jpos(), 6)) ar.log_info('Both arm joint positions: ') ar.log_info(np.round(robot.arm.get_jpos(), 6)) ar.log_info('\n') robot.arm.right_arm.set_jpos(0.5, 'yumi_joint_3_r') time.sleep(3) robot.arm.set_jpos([0.05, -1.33, 0.87, 0.94, 0.41, 0.26, -2.01], arm='left', ignore_physics=True) time.sleep(3) ar.log_info('Left arm joint positions: ') ar.log_info(np.round(robot.arm.left_arm.get_jpos(), 6)) ar.log_info('Both arm joint positions: ') ar.log_info(np.round(robot.arm.get_jpos(), 6)) ar.log_info('\n') robot.arm.set_jpos(0.5, joint_name='yumi_joint_3_l', arm='left', ignore_physics=True) time.sleep(3) robot.arm.go_home(ignore_physics=True)
def main(): """ in current dir: test_grasp, we will save the whole info(depth, point cloud, info.npy, info.txt) to a separate folder, named data_x automatically, x is the number of folders in the current dir """ # load robot and object, set camera parameter args = make_parser().parse_args() robot_type = parse_robot_type(args.robot_arm) robot = ar.Robot(robot_type) robot.arm.go_home() robot.pb_client.load_urdf('plane.urdf') if args.multi_obj: _, poses = load_multi_object(robot) else: load_single_object(robot, args) time.sleep(1.3) robot.cam.setup_camera(focus_pt=args.cam_focus_pt, camera_pos=args.cam_pos, height=args.cam_height, width=args.cam_width) # in current dir: test_grasp, we will save the whole info(depth, point cloud, info.npy, info.txt) to a # separate folder, named data_x, x is the number of folders in the current dir current_dir = os.path.dirname(os.path.abspath(__file__)) folder_num = len([ lists for lists in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, lists)) ]) - 1 save_path = os.path.join(current_dir, 'data_' + str(folder_num)) os.mkdir(save_path) # save camera info to camera_info.npy, in addition, save object and camera info to info.txt info = { 'cam_intrinsic': robot.cam.cam_int_mat, 'cam_view': robot.cam.view_matrix, 'cam_external': robot.cam.cam_int_mat, 'cam_pos': args.cam_pos, 'cam_focus_pt': args.cam_focus_pt, 'cam_height': args.cam_height, 'cam_width': args.cam_width } if args.multi_obj: info['object'] = poses else: info['object'] = [args.object_pos, args.object_ori] np.save(os.path.join(save_path, 'info.npy'), info) with open(os.path.join(save_path, 'info.txt'), 'w') as ci: if args.multi_obj: for i, pose in enumerate(poses): ci.write('object_{} position:\n'.format(i)) ci.write(str(pose[0])) ci.write('\nobject_{} rotation:\n'.format(i)) ci.write(str(pose[1])) else: ci.write('object position: \n') ci.write(str(args.object_pos) + '\n') ci.write('object rotation: \n') ci.write(str(args.object_ori) + '\n') ci.write('\ncamera position: \n') ci.write(str(args.cam_pos) + '\n') ci.write('camera focus point: \n') ci.write(str(args.cam_focus_pt) + '\n') ci.write('camera resolution: \n') ci.write(str(args.cam_height) + ' * ' + str(args.cam_width) + '\n') ci.write('camera view matrix: \n') ci.write(str(robot.cam.view_matrix) + '\n') ci.write('camera intrinsic matrix: ' + '\n') ci.write(str(robot.cam.cam_int_mat) + '\n') ci.write('camera external matrix: ' + '\n') ci.write(str(robot.cam.cam_ext_mat)) ci.flush() # save rgb. depth, segmentation, point cloud img, depth, seg = robot.cam.get_images(get_rgb=True, get_depth=True, get_seg=True) print('image shape: {}, depth shape: {}'.format(img.shape, depth.shape)) cv2.imwrite(os.path.join(save_path, 'depth.jpg'), encode_depth_to_image(depth)) b, g, r = cv2.split(img) img = cv2.merge([r, g, b]) cv2.imwrite(os.path.join(save_path, 'rgb.jpg'), img) ar.log_info(f'Depth image min:{depth.min()} m, max: {depth.max()} m.') np.save(os.path.join(save_path, 'depth.npy'), depth) np.save(os.path.join(save_path, 'seg.npy'), seg) # segmentation # get point cloud data in the world frame pts = robot.cam.depth_to_point_cloud(depth, in_world=True) ar.log_info('point cloud shape: {}'.format(pts.shape)) np.save(os.path.join(save_path, 'pc.npy'), pts) print('saved %s successfully' % os.path.join(save_path, 'pc.npy')) input()
def main(): """ Move the robot end effector to the desired pose. """ robot = ar.Robot('yumi_grippers') robot.arm.go_home() robot.pb_client.load_urdf('plane.urdf') load_objects(robot) robot_z_offset = 0.58 robot.cam.setup_camera(focus_pt=[0, 0, 0.25 + robot_z_offset], camera_pos=[1.4, 0, 0.3 + robot_z_offset]) # robot.cam.setup_camera(focus_pt=[0, 0, 0.5], dist=1.5, height=800, width=800) root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) rgb_img_dir = os.path.join(root_path, 'rgb') rgb_index = 0 pos, quat, rot, euler = robot.arm.get_ee_pose(arm='right') ar.log_info('Right arm end effector position: ' + str(pos) + ' ' + str(euler)) # initial right end effector pos: [ 0.3196905 -0.53293526 0.27487105] [3.08614804 -0.56711174 0.58926922] pos_delta = 0.3 euler_delta = 0.2 open_flag = False target_pos, target_ori = [], [] while 1: mov = input() if mov == 'w': target_pos = [pos[0] - pos_delta, pos[1], pos[2]] elif mov == 'a': target_pos = [pos[0], pos[1] - pos_delta, pos[2]] elif mov == 's': target_pos = [pos[0] + pos_delta, pos[1], pos[2]] elif mov == 'd': target_pos = [pos[0], pos[1] + pos_delta, pos[2]] elif mov == 'r': target_pos = [pos[0], pos[1], pos[2] + pos_delta] elif mov == 'f': target_pos = [pos[0], pos[1], pos[2] - pos_delta] elif mov == 'z': target_ori = [euler[0] - euler_delta, euler[1], euler[2]] elif mov == 'x': target_ori = [euler[0] + euler_delta, euler[1], euler[2]] elif mov == 't': target_ori = [euler[0], euler[1] - euler_delta, euler[2]] elif mov == 'g': target_ori = [euler[0], euler[1] + euler_delta, euler[2]] elif mov == 'c': target_ori = [euler[0], euler[1], euler[2] - euler_delta] elif mov == 'v': target_ori = [euler[0], euler[1], euler[2] + euler_delta] elif mov == ' ': open_flag = not open_flag if open_flag: robot.arm.right_arm.eetool.open() else: robot.arm.right_arm.eetool.close() continue elif mov == 'q': img, depth, seg = robot.cam.get_images(get_rgb=True, get_depth=True, get_seg=True) b, g, r = cv2.split(img) img = cv2.merge([r, g, b]) # scale = 25. # sdepth = depth * scale cv2.imwrite( os.path.join(rgb_img_dir, str(rgb_index).zfill(5) + '_rgb.jpg'), img) # cv2.imwrite(os.path.join(rgb_img_dir, str(rgb_index).zfill(5) + '_depth.jpg'), sdepth.astype(np.uint16)) # cv2.imwrite(os.path.join(rgb_img_dir, str(rgb_index).zfill(5) + '_seg.jpg'), seg) # Image.fromarray(seg).save(os.path.join(rgb_img_dir, str(rgb_index).zfill(5) + '_seg.png')) ar.log_info('Successfully saved rgb image ' + rgb_img_dir + '\\' + str(rgb_index).zfill(5) + '.jpg') rgb_index += 1 continue else: robot.arm.go_home() pos, quat, rot, euler = robot.arm.get_ee_pose(arm='right') ar.log_info('Right arm end effector position: ' + str(pos)) continue if mov in ['w', 'a', 's', 'd', 'r', 'f']: robot.arm.set_ee_pose(target_pos, arm='right') else: robot.arm.set_ee_pose(ori=target_ori, arm='right') pos, quat, rot, euler = robot.arm.get_ee_pose(arm='right') ar.log_info('Right arm end effector position: ' + str(pos) + ' ' + str(euler))
def main(): """ This function demonstrates how to load different kinds of objects and get state information of objects. """ np.set_printoptions(precision=3, suppress=True) robot = Robot('ur5e_stick') robot.arm.go_home() ori = euler2quat([0, 0, np.pi / 2]) robot.pb_client.load_urdf('table/table.urdf', [1, 0, 0.4], ori, scaling=0.9) sphere_id = robot.pb_client.load_geom('sphere', size=0.05, mass=1, base_pos=[1, 0, 1.0], rgba=[0, 1, 0, 1]) robot.pb_client.load_geom('box', size=0.05, mass=1, base_pos=[1, 0.12, 1.0], rgba=[1, 0, 0, 1]) robot.pb_client.load_geom('box', size=[0.06, 0.02, 0.03], mass=1, base_pos=[1.3, 0.12, 1.0], rgba=[0, 0, 1, 1]) cylinder_id = robot.pb_client.load_geom('cylinder', size=[0.06, 0.08], mass=1, base_pos=[0.8, -0.12, 1.0], rgba=[0, 1, 1, 1]) duck_id = robot.pb_client.load_geom('mesh', mass=1, visualfile='duck.obj', mesh_scale=0.1, base_pos=[0.9, -0.4, 1.0], rgba=[0.5, 0.2, 1, 1]) pos, quat, lin_vel, ang_vel = robot.pb_client.get_body_state(cylinder_id) log_info('Cylinder:') log_info(' position: %s' % np.array2string(pos, precision=2)) log_info(' quaternion: %s' % np.array2string(quat, precision=2)) log_info(' linear vel: %s' % np.array2string(lin_vel, precision=2)) log_info(' angular vel: %s' % np.array2string(ang_vel, precision=2)) log_info('Removing sphere') robot.pb_client.remove_body(sphere_id) time.sleep(2) log_info('Reset duck') robot.pb_client.reset_body(duck_id, base_pos=[0.9, -0.4, 1.0], base_quat=[0, 0, 0, 1], lin_vel=[0, 2, 0], ang_vel=[0, 0, 2]) time.sleep(10)