def random_pick_and_place(pick_num=10, pick_scale=0.01): """ Random pick a particle up and the drop it for pick_num times""" curr_pos = pyflex.get_positions().reshape(-1, 4) num_particles = curr_pos.shape[0] for i in range(pick_num): pick_id = np.random.randint(num_particles) pick_dir = np.random.random(3) * 2 - 1 pick_dir[1] = (pick_dir[1] + 1) pick_dir *= pick_scale original_inv_mass = curr_pos[pick_id, 3] for _ in range(60): curr_pos = pyflex.get_positions().reshape(-1, 4) curr_pos[pick_id, :3] += pick_dir curr_pos[pick_id, 3] = 0 pyflex.set_positions(curr_pos.flatten()) pyflex.step() # Revert mass curr_pos = pyflex.get_positions().reshape(-1, 4) curr_pos[pick_id, 3] = original_inv_mass pyflex.set_positions(curr_pos.flatten()) pyflex.step() # Wait to stabalize for _ in range(100): pyflex.step() curr_vel = pyflex.get_velocities() if np.alltrue(curr_vel < 0.01): break for _ in range(500): pyflex.step() curr_vel = pyflex.get_velocities() if np.alltrue(curr_vel < 0.01): break
def generate_env_variation(self, num_variations=1, vary_cloth_size=True): """ Generate initial states. Note: This will also change the current states! """ max_wait_step = 500 # Maximum number of steps waiting for the cloth to stablize stable_vel_threshold = 0.1 # Cloth stable when all particles' vel are smaller than this generated_configs, generated_states = [], [] default_config = self.get_default_config() for i in range(num_variations): config = deepcopy(default_config) self.update_camera(config['camera_name'], config['camera_params'][config['camera_name']]) if vary_cloth_size: cloth_dimx, cloth_dimy = self._sample_cloth_size() config['ClothSize'] = [cloth_dimx, cloth_dimy] else: cloth_dimx, cloth_dimy = config['ClothSize'] self.set_scene(config) self.action_tool.reset([0., -1., 0.]) pickpoints = self._get_drop_point_idx( )[:2] # Pick two corners of the cloth and wait until stablize config['target_pos'] = self._get_flat_pos() self._set_to_vertical(x_low=np.random.random() * 0.2 - 0.1, height_low=np.random.random() * 0.1 + 0.1) # Get height of the cloth without the gravity. With gravity, it will be longer p1, _, p2, _ = self._get_key_point_idx() curr_pos = pyflex.get_positions().reshape(-1, 4) curr_pos[0] += np.random.random() * 0.001 # Add small jittering original_inv_mass = curr_pos[pickpoints, 3] curr_pos[ pickpoints, 3] = 0 # Set mass of the pickup point to infinity so that it generates enough force to the rest of the cloth pickpoint_pos = curr_pos[pickpoints, :3] pyflex.set_positions(curr_pos.flatten()) picker_radius = self.action_tool.picker_radius self.action_tool.update_picker_boundary([-0.3, 0.05, -0.5], [0.5, 2, 0.5]) self.action_tool.set_picker_pos(picker_pos=pickpoint_pos + np.array([0., picker_radius, 0.])) # Pick up the cloth and wait to stablize for j in range(0, max_wait_step): pyflex.step() curr_pos = pyflex.get_positions().reshape((-1, 4)) curr_vel = pyflex.get_velocities().reshape((-1, 3)) if np.alltrue(curr_vel < stable_vel_threshold) and j > 300: break curr_pos[pickpoints, :3] = pickpoint_pos pyflex.set_positions(curr_pos) curr_pos = pyflex.get_positions().reshape((-1, 4)) curr_pos[pickpoints, 3] = original_inv_mass pyflex.set_positions(curr_pos.flatten()) generated_configs.append(deepcopy(config)) print('config {}: {}'.format(i, config['camera_params'])) generated_states.append(deepcopy(self.get_state())) return generated_configs, generated_states
def get_state(self): pos = pyflex.get_positions() vel = pyflex.get_velocities() shape_pos = pyflex.get_shape_states() phase = pyflex.get_phases() camera_params = copy.deepcopy(self.camera_params) return {'particle_pos': pos, 'particle_vel': vel, 'shape_pos': shape_pos, 'phase': phase, 'camera_params': camera_params, 'config_id': self.current_config_id}
def get_state(self): ''' get the postion, velocity of flex particles, and postions of flex shapes. ''' particle_pos = pyflex.get_positions() particle_vel = pyflex.get_velocities() shape_position = pyflex.get_shape_states() return {'particle_pos': particle_pos, 'particle_vel': particle_vel, 'shape_pos': shape_position, 'box_x': self.box_x, 'box_states': self.box_states, 'box_params': self.box_params, 'config_id': self.current_config_id}
def generate_env_variation(self, num_variations=2, vary_cloth_size=True): """ Generate initial states. Note: This will also change the current states! """ max_wait_step = 1000 # Maximum number of steps waiting for the cloth to stablize stable_vel_threshold = 0.2 # Cloth stable when all particles' vel are smaller than this generated_configs, generated_states = [], [] default_config = self.get_default_config() default_config['flip_mesh'] = 1 for i in range(num_variations): config = deepcopy(default_config) self.update_camera(config['camera_name'], config['camera_params'][config['camera_name']]) if vary_cloth_size: cloth_dimx, cloth_dimy = self._sample_cloth_size() config['ClothSize'] = [cloth_dimx, cloth_dimy] else: cloth_dimx, cloth_dimy = config['ClothSize'] self.set_scene(config) self.action_tool.reset([0., -1., 0.]) pos = pyflex.get_positions().reshape(-1, 4) pos[:, :3] -= np.mean(pos, axis=0)[:3] if self.action_mode in ['sawyer', 'franka' ]: # Take care of the table in robot case pos[:, 1] = 0.57 else: pos[:, 1] = 0.005 pos[:, 3] = 1 pyflex.set_positions(pos.flatten()) pyflex.set_velocities(np.zeros_like(pos)) for _ in range(5): # In case if the cloth starts in the air pyflex.step() for wait_i in range(max_wait_step): pyflex.step() curr_vel = pyflex.get_velocities() if np.alltrue(np.abs(curr_vel) < stable_vel_threshold): break center_object() angle = (np.random.random() - 0.5) * np.pi / 2 self.rotate_particles(angle) generated_configs.append(deepcopy(config)) print('config {}: {}'.format(i, config['camera_params'])) generated_states.append(deepcopy(self.get_state())) return generated_configs, generated_states
def get_state(self): ''' get the postion, velocity of flex particles, and postions of flex shapes. ''' particle_pos = pyflex.get_positions() particle_vel = pyflex.get_velocities() shape_position = pyflex.get_shape_states() return { 'particle_pos': particle_pos, 'particle_vel': particle_vel, 'shape_pos': shape_position, 'glass_x': self.glass_x, 'glass_y': self.glass_y, 'glass_rotation': self.glass_rotation, 'glass_states': self.glass_states, 'poured_glass_states': self.poured_glass_states, 'glass_params': self.glass_params, 'config_id': self.current_config_id, 'line_box_x': self.line_box_x, 'line_box_y': self.line_box_y }
shape_states = np.zeros((time_step, n_shapes, dim_shape_state)) x_box = x_center v_box = 0 # simulation for i in range(time_step): x_box_last = x_box x_box += v_box * dt v_box += rand_float(-0.15, 0.15) - x_box * 0.1 shape_states_ = calc_shape_states(x_box, x_box_last, scene_params[-2:]) pyflex.set_shape_states(shape_states_) positions[i] = pyflex.get_positions().reshape(-1, dim_position) velocities[i] = pyflex.get_velocities().reshape(-1, dim_velocity) shape_states[i] = pyflex.get_shape_states().reshape(-1, dim_shape_state) if i == 0: print(np.min(positions[i], 0), np.max(positions[i], 0)) print(x_box, box_dis_x, box_dis_z) pyflex.step() # playback pyflex.set_scene(6, scene_params, 0) for i in range(len(boxes) - 1): halfEdge = boxes[i][0] center = boxes[i][1] quat = boxes[i][2]
def generate_env_variation(self, num_variations=1, vary_cloth_size=True): """ Generate initial states. Note: This will also change the current states! """ max_wait_step = 300 # Maximum number of steps waiting for the cloth to stablize stable_vel_threshold = 0.01 # Cloth stable when all particles' vel are smaller than this generated_configs, generated_states = [], [] default_config = self.get_default_config() for i in range(num_variations): config = deepcopy(default_config) self.update_camera(config['camera_name'], config['camera_params'][config['camera_name']]) if vary_cloth_size: cloth_dimx, cloth_dimy = self._sample_cloth_size() config['ClothSize'] = [cloth_dimx, cloth_dimy] else: cloth_dimx, cloth_dimy = config['ClothSize'] self.set_scene(config) self.action_tool.reset([0., -1., 0.]) pos = pyflex.get_positions().reshape(-1, 4) pos[:, :3] -= np.mean(pos, axis=0)[:3] if self.action_mode in ['sawyer', 'franka' ]: # Take care of the table in robot case pos[:, 1] = 0.57 else: pos[:, 1] = 0.005 pos[:, 3] = 1 pyflex.set_positions(pos.flatten()) pyflex.set_velocities(np.zeros_like(pos)) pyflex.step() num_particle = cloth_dimx * cloth_dimy pickpoint = random.randint(0, num_particle - 1) curr_pos = pyflex.get_positions() original_inv_mass = curr_pos[pickpoint * 4 + 3] curr_pos[ pickpoint * 4 + 3] = 0 # Set the mass of the pickup point to infinity so that it generates enough force to the rest of the cloth pickpoint_pos = curr_pos[pickpoint * 4:pickpoint * 4 + 3].copy( ) # Pos of the pickup point is fixed to this point pickpoint_pos[1] += np.random.random(1) * 0.5 + 0.5 pyflex.set_positions(curr_pos) # Pick up the cloth and wait to stablize for j in range(0, max_wait_step): curr_pos = pyflex.get_positions() curr_vel = pyflex.get_velocities() curr_pos[pickpoint * 4:pickpoint * 4 + 3] = pickpoint_pos curr_vel[pickpoint * 3:pickpoint * 3 + 3] = [0, 0, 0] pyflex.set_positions(curr_pos) pyflex.set_velocities(curr_vel) pyflex.step() if np.alltrue( np.abs(curr_vel) < stable_vel_threshold) and j > 5: break # Drop the cloth and wait to stablize curr_pos = pyflex.get_positions() curr_pos[pickpoint * 4 + 3] = original_inv_mass pyflex.set_positions(curr_pos) for _ in range(max_wait_step): pyflex.step() curr_vel = pyflex.get_velocities() if np.alltrue(curr_vel < stable_vel_threshold): break center_object() if self.action_mode == 'sphere' or self.action_mode.startswith( 'picker'): curr_pos = pyflex.get_positions() self.action_tool.reset(curr_pos[pickpoint * 4:pickpoint * 4 + 3] + [0., 0.2, 0.]) generated_configs.append(deepcopy(config)) generated_states.append(deepcopy(self.get_state())) self.current_config = config # Needed in _set_to_flatten function generated_configs[-1]['flatten_area'] = self._set_to_flatten( ) # Record the maximum flatten area print('config {}: camera params {}, flatten area: {}'.format( i, config['camera_params'], generated_configs[-1]['flatten_area'])) return generated_configs, generated_states
def main(): parser = argparse.ArgumentParser(description='Process some integers.') # ['PassWater', 'PourWater', 'PourWaterAmount', 'RopeFlatten', 'ClothFold', 'ClothFlatten', 'ClothDrop', 'ClothFoldCrumpled', 'ClothFoldDrop', 'RopeConfiguration'] parser.add_argument('--env_name', type=str, default='RopeFlatten') parser.add_argument( '--headless', type=int, default=0, help='Whether to run the environment with headless rendering') parser.add_argument( '--num_variations', type=int, default=1, help='Number of environment variations to be generated') parser.add_argument('--save_video_dir', type=str, default='./data/', help='Path to the saved video') parser.add_argument('--img_size', type=int, default=256, help='Size of the recorded videos') args = parser.parse_args() env_kwargs = env_arg_dict[args.env_name] # Generate and save the initial states for running this environment for the first time env_kwargs['use_cached_states'] = False env_kwargs['save_cached_states'] = False env_kwargs['num_variations'] = args.num_variations env_kwargs['render'] = True env_kwargs['headless'] = args.headless if not env_kwargs['use_cached_states']: print( 'Waiting to generate environment variations. May take 1 minute for each variation...' ) env = SOFTGYM_ENVS[args.env_name](**env_kwargs) env.reset() #frames = [env.get_image(args.img_size, args.img_size)] #generate an init state count = 0 while True: count += 1 #the pickers should pick/unpick a segment and move randomly picker_pos, particle_pos = sa.action_space.Picker._get_pos() #visualize particles num_particles = len(particle_pos) pick_id_1 = np.random.randint(num_particles) picked_particle_1 = particle_pos[pick_id_1, :3] while True: pick_id_2 = np.random.randint(num_particles) if pick_id_2 != pick_id_1: break picked_particle_2 = particle_pos[pick_id_2, :3] new_picker_pos = np.vstack((picked_particle_1, picked_particle_2)) sa.action_space.Picker._set_pos(new_picker_pos, particle_pos) action = env.action_space.sample() _, _, _, info = env.step(action, record_continuous_video=True, img_size=args.img_size) #move the pickers to the boundary to avoid picker occlusion in frames action = np.array([[2, 2, 2, 0], [2, 2, 2, 0]]) _, _, _, info = env.step(action, record_continuous_video=True, img_size=args.img_size) #wait to be stable for _ in range(100): pyflex.step() curr_vel = pyflex.get_velocities() if np.alltrue(curr_vel < 0.01): break z_frame = env.get_image() gc = get_crossing.get_crossing(z_frame) crossings = gc.find_crossing(img_idx=0, visual=False) if crossings is not None: if len(crossings) >= 2: break if count > 500: break #change camera config to get image from different viewpoint, later use for crossing-height calculation #this part is not used camera_para = env.get_current_config()['camera_params']['default_camera'] z_frame = Image.fromarray(env.get_image()) z_frame.save("data/diff_view/z.png") camera_para = env.get_current_config()['camera_params']['default_camera'] camera_para['pos'] = np.array([-0.85, 0, 0]) camera_para['angle'] = np.array([-90 / 180. * np.pi, 0 * np.pi, 0]) env.update_camera('default_camera', camera_para) x_frame = Image.fromarray(env.get_image()) x_frame.save("data/diff_view/x.png") camera_para['pos'] = np.array([0, 0, 0.85]) camera_para['angle'] = np.array([0 * np.pi, 0 * np.pi, -90 / 180. * np.pi]) env.update_camera('default_camera', camera_para) y_frame = Image.fromarray(env.get_image()) y_frame.save("data/diff_view/y.png") camera_para['pos'] = np.array([0, 0.85, 0]) camera_para['angle'] = np.array([0 * np.pi, -90 / 180. * np.pi, 0 * np.pi]) env.update_camera('default_camera', camera_para) z_frame = env.get_image() z_frame = cv2.cvtColor(z_frame, cv2.COLOR_RGB2BGR) cv2.imwrite('my_test/presentation/orig_img.png', z_frame) #get crossing and map to world space start_time = time.time() PIC = Picker() gc = get_crossing.get_crossing(z_frame, 'my_test/presentation/orig_img.png') crossings = gc.find_crossing(img_idx=0, visual=False) print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", crossings) #cv2.imshow("crossing", cv2.imread("./my_test/new_interaction/pre_000.png")) world_coors = [] if crossings is not None: for i in range(len(crossings)): PQ = PickerQPG((z_frame.shape[0], z_frame.shape[1]), camera_para['pos'], camera_para['angle']) cro_world_coor = PQ._get_world_coor_from_image( crossings[i][0][1], crossings[i][0][0]) world_coors.append([cro_world_coor[0], 0.0245, cro_world_coor[2]]) #cv2.imshow("x_frame_binary", x_frame_binary) picker_pos, particle_pos = sa.action_space.Picker._get_pos() world_coors = np.array(world_coors) particle_x = particle_pos[:, 0] particle_z = particle_pos[:, 1] particle_y = particle_pos[:, 2] fig = plt.figure() ax2 = Axes3D(fig) ax2.scatter3D(particle_x, particle_y, particle_z, cmap='Blues') ax2.plot3D(particle_x, particle_y, particle_z, 'gray') if world_coors != []: ax2.scatter3D(world_coors[:, 0], world_coors[:, 2], world_coors[:, 1], cmap='Reds') #plt.show() PQ = PickerQPG((z_frame.shape[0], z_frame.shape[1]), camera_para['pos'], camera_para['angle']) #init PickerQPG class prev_picker_pos, prev_particle_pos = sa.action_space.Picker._get_pos() interact_cros = [] for c in range(len(crossings)): top_branches, bottom_branches = my_utils.relative_location( env, PQ, PIC, crossings[c], args.img_size, c, prev_picker_pos, prev_particle_pos) interact_cros.append( [crossings[c][0], (top_branches, bottom_branches)]) scored_cros = my_utils.opt_crossings(interact_cros, gc) after_interact_img = env.get_image() after_interact_img = cv2.cvtColor(after_interact_img, cv2.COLOR_RGB2BGR) for i in range(len(scored_cros)): cv2.circle(after_interact_img, (scored_cros[i][0][0][1], scored_cros[i][0][0][0]), 3, (255, 0, 0), -1) top_branches = scored_cros[i][0][1][0] bottom_branches = scored_cros[i][0][1][1] if top_branches: for top_j in range(len(top_branches)): cv2.circle(after_interact_img, (top_branches[top_j][1], top_branches[top_j][0]), 3, (0, 0, 255), -1) if bottom_branches: for bottom_j in range(len(bottom_branches)): cv2.circle(after_interact_img, (bottom_branches[bottom_j][1], bottom_branches[bottom_j][0]), 3, (0, 255, 0), -1) #cv2.imwrite("my_test/new_interaction/after_000.png", after_interact_img) cv2.imwrite('my_test/presentation/after_interact_000.png', after_interact_img) end_time = time.time() print("total perception and interaction time: ", end_time - start_time)