Exemple #1
0
def classify_pedestrians(annotation_file, viscinity):
    '''
    reads the annotation file and spits out important stats about the data
    '''

    tick_speed = 30
    #initialize world
    env = GridWorldDrone(display=False, is_onehot=False, 
                        seed=10, obstacles=None,
                        show_trail=False,
                        is_random=False,
                        show_orientation=True,
                        annotation_file=annotation_file,
                        subject=None,
                        external_control=False,
                        replace_subject=True,      
                        tick_speed=tick_speed,                  
                        rows=576, cols=720,
                        width=10)    
    
    subject_set = extract_subjects_from_file(annotation_file)
    avg_ped_per_subject = []
    for subject in subject_set:
        print(' Subject :', subject)
        state = env.reset_and_replace(ped=subject)

        nearby_peds_in_frame = 0
        total_frames = env.final_frame - env.current_frame
        while env.current_frame < env.final_frame:

            state, _, _, _ = env.step()
            
            nearby_peds_in_frame += get_pedestrians_in_viscinity(state, viscinity)
        
        avg_peds_per_frame = nearby_peds_in_frame/total_frames
        avg_ped_per_subject.append(avg_peds_per_frame)
        print('Avg peds nearby :', avg_peds_per_frame)

    subject_array = np.asarray(list(subject_set))
    avg_peds_per_subject_arr = np.asarray(avg_ped_per_subject)
    subject_array = subject_array[avg_peds_per_subject_arr.argsort()]
    avg_peds_per_subject_arr.sort()
    easy_arr = subject_array[0:200]
    medium_arr = subject_array[200:380]
    hard_arr = subject_array[380:]

    return easy_arr, medium_arr, hard_arr
Exemple #2
0
    def play_regression_policy(self,
                    num_runs,
                    max_episode_length,
                    feat_extractor):
        '''
        Loads up an environment and checks the performance of the agent.
        '''
        #initialize variables needed for the run 

        agent_width = 10
        obs_width = 10
        step_size = 2
        grid_size = 10
        
        #load up the environment
        annotation_file = "../envs/expert_datasets/university_students\
/annotation/processed/frame_skip_1/students003_processed_corrected.txt"
        env = GridWorldDrone(
                            display=True,
                            is_onehot=False,
                            seed=0,
                            obstacles=None,
                            show_trail=False,
                            is_random=False,
                            annotation_file=annotation_file,
                            subject=None,
                            tick_speed=60,
                            obs_width=10,
                            step_size=step_size,
                            agent_width=agent_width,
                            replace_subject=True,
                            segment_size=None,
                            external_control=True,
                            step_reward=0.001,
                            show_comparison=True,
                            consider_heading=True,
                            show_orientation=True,
                            continuous_action=False,
                            # rows=200, cols=200, width=grid_size)
                            rows=576,
                            cols=720,
                            width=grid_size,
                        )
        #initialize the feature extractor

        feat_ext = None
        if feat_extractor == "DroneFeatureRisk_speedv2":

            feat_ext = DroneFeatureRisk_speedv2(
                agent_width=agent_width,
                obs_width=obs_width,
                step_size=step_size,
                grid_size=grid_size,
                show_agent_persp=False,
                return_tensor=False,
                thresh1=18,
                thresh2=30,
            )

        #play the environment 

        for i in range(num_runs):
 
            state = env.reset()
            state_features = feat_ext.extract_features(state)
            state_features = torch.from_numpy(state_features).type(torch.FloatTensor).to(self.device)
            done = False
            t = 0
            while t < max_episode_length:

                action = self.policy.eval_action(state_features)

                state, _, done, _ = env.step(action)
                state_features = feat_ext.extract_features(state)
                state_features = torch.from_numpy(state_features).type(torch.FloatTensor).to(self.device)
                t+=1
                if done:
                    break
Exemple #3
0
def extract_trajectory(annotation_file, 
                       folder_to_save, 
                       feature_extractor=None, 
                       display=False, 
                       extract_action=False,
                       show_states=False, subject=None, 
                       trajectory_length_limit=None):


    if not os.path.exists(folder_to_save):
        os.makedirs(folder_to_save)

    lag_val = 8
    
    tick_speed = 60
    subject_list = extract_subjects_from_file(annotation_file)
    print(subject_list)
    disp = display
    total_path_len = 0

    if show_states:
            tick_speed = 5
            disp = True
            
    #initialize world
    world = GridWorldDrone(display=disp, is_onehot=False, 
                        seed=10, obstacles=None, 
                        show_trail=False,
                        is_random=False,
                        show_orientation=True,
                        annotation_file=annotation_file,
                        subject=None,
                        external_control=False,
                        replace_subject=True,      
                        tick_speed=tick_speed,                  
                        rows=576, cols=720,
                        width=10)


    default_action = int(len(world.speed_array)/2)*int(len(world.orientation_array))+int(len(world.orientation_array)/2)
    
    
    default_action = torch.tensor(default_action)

    if subject is not None:
        subject_list = subject
    for sub in subject_list:
        print('Starting for subject :', sub)
        trajectory_info = []

        if extract_action:
            action_info = []
        step_counter_segment = 0

        segment_counter = 1
        world.subject = sub
        old_state = world.reset()
        cur_lag = 0
        print('Path lenghth :',world.final_frame - world.current_frame)
        path_len = world.final_frame - world.current_frame
        cur_subject_final_frame = world.final_frame
        total_path_len += world.final_frame - world.current_frame
        print('Total trajectory information :\nStarting frame: {},final frame: {}'.format(world.current_frame, cur_subject_final_frame))
        print('Total path length :', path_len)                                                                               
        if trajectory_length_limit is not None:

            traj_seg_length = min(trajectory_length_limit, path_len)
            #change the goal position
            world.goal_state = copy.deepcopy(world.return_position(world.cur_ped, world.current_frame + traj_seg_length)['position'])        
            world.state['goal_state'] = copy.deepcopy(world.goal_state) 
    
        print('Segment 1: Start frame :', world.current_frame)    
        while world.current_frame < cur_subject_final_frame:
            state, _, _, _ = world.step()
            step_counter_segment += 1
            #step_counter_trajectory += 1 
            #if disp:
            #    feature_extractor.overlay_bins(state)

            if extract_action:
                
                if cur_lag == lag_val:
                    
                    action = extract_expert_action(state, old_state, 
                                            world.orient_quantization,
                                            len(world.orientation_array),
                                            world.speed_quantization,
                                                len(world.speed_array))
                    '''
                    action = extract_expert_speed_orientation(state)
                    '''
                    old_state = copy.deepcopy(state)
                    action = torch.tensor(action)
                    action_info.append(action)
                    for i in range(cur_lag):
                        action_info.append(default_action)
                    cur_lag = 0
                    #pdb.set_trace()

                else:
                    cur_lag += 1
            if feature_extractor is not None:
                state = feature_extractor.extract_features(state)
                state = torch.tensor(state)
            trajectory_info.append(copy.deepcopy(state))
            if trajectory_length_limit is not None:

                if step_counter_segment%traj_seg_length == 0:
                    print('Segment {} final frame : {}'.format(segment_counter, world.current_frame))
                    path_len = cur_subject_final_frame - world.current_frame
                    traj_seg_length = min(trajectory_length_limit, path_len)
                    print('Length of next path :', traj_seg_length)

                    #change the goal position
                    world.goal_state = copy.deepcopy(world.return_position(world.cur_ped, world.current_frame + traj_seg_length)['position'])        
                    world.state['goal_state'] = copy.deepcopy(world.goal_state) 
                    print('Trajectory length : ', len(trajectory_info))

                    if feature_extractor is not None:
                        state_tensors = torch.stack(trajectory_info)
                        torch.save(state_tensors, 
                                os.path.join(folder_to_save, 
                                        'traj_of_sub_{}_segment{}.states'.format(str(sub), 
                                        str(segment_counter))))
                    else:
                        with open('traj_of_sub_{}_segment{}.states'.format(str(sub), 
                                  str(segment_counter)), 'w') as fout:
                            json.dump(trajectory_info, fout)
                    if extract_action:

                        acton_tensors = torch.stack(action_info)
                        torch.save(action_tensors,
                                os.path.join(folder_to_save, 
                                        'action_of_sub_{}_segment{}.actions'.format(str(sub),
                                        str(segment_counter))))
                    segment_counter += 1 
                    #pdb.set_trace()
                    step_counter_segment = 0 
                    trajectory_info = []
                    print('Segment {}: Start frame : {}'.format(segment_counter, 
                                                                world.current_frame))    

        #add the last bunch of actions

        for i in range(cur_lag):
            action_info.append(default_action)

        if trajectory_length_limit is None:

            if feature_extractor is not None:
                state_tensors = torch.stack(trajectory_info)
                torch.save(state_tensors, os.path.join(folder_to_save, 'traj_of_sub_{}_segment{}.states'.format(str(sub), str(segment_counter))))
            
                if extract_action:
                    #pdb.set_trace()
                    action_tensors = torch.stack(action_info)
                    torch.save(action_tensors,
                            os.path.join(folder_to_save, 
                                    'action_of_sub_{}_segment{}.actions'.format(str(sub),
                                    str(segment_counter))))
            else:
                '''
                with open('traj_of_sub_{}_segment{}.states'.format(str(sub), 
                            str(segment_counter)), 'w') as fout:
                    pdb.set_trace()
                    json.dump(trajectory_info, fout)
                '''
                np.save(os.path.join(folder_to_save, 'traj_of_sub_{}_segment{}.states'.format(str(sub), 
                            str(segment_counter))), trajectory_info)
                
                if extract_action:

                    action_tensors = torch.stack(action_info)
                    torch.save(action_tensors,
                            os.path.join(folder_to_save, 
                                    'action_of_sub_{}_segment{}.actions'.format(str(sub),
                                    str(segment_counter))))
        
    #if feature_extractor.debug_mode:
    #    feature_extractor.print_info()


    print('The average path length :', total_path_len/len(subject_list))
Exemple #4
0
def main():
    '''
    The main function 
    '''
    #**************************************************
    #parameters for the feature extractors
    thresh1 = 10
    thresh2 = 15

    step_size = 2
    agent_width = 10
    obs_width = 10
    grid_size = 3

    #**************************************************
    #for bookkeeping purposes

    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')

    args = parser.parse_args()

    #checks if all the parameters are in order
    check_parameters(args)

    if args.on_server:

        matplotlib.use('Agg')
        os.environ['SDL_VIDEODRIVER'] = 'dummy'

    #*************************************************
    #initialize environment
    from envs.gridworld_drone import GridWorldDrone

    consider_heading = True
    np.random.seed(0)
    env = GridWorldDrone(display=args.render,
                         is_onehot=False,
                         seed=0,
                         obstacles=None,
                         show_trail=True,
                         is_random=False,
                         subject=None,
                         annotation_file=args.annotation_file,
                         tick_speed=60,
                         obs_width=10,
                         step_size=step_size,
                         agent_width=agent_width,
                         external_control=True,
                         replace_subject=args.run_exact,
                         show_comparison=True,
                         consider_heading=consider_heading,
                         show_orientation=True,
                         rows=576,
                         cols=720,
                         width=grid_size)

    print('Environment initalized successfully.')

    #*************************************************
    #initialize the feature extractor
    from featureExtractor.drone_feature_extractor import DroneFeatureRisk, DroneFeatureRisk_v2
    from featureExtractor.drone_feature_extractor import DroneFeatureRisk_speed, DroneFeatureRisk_speedv2

    if args.feat_extractor == 'DroneFeatureRisk':

        feat_ext = DroneFeatureRisk(agent_width=agent_width,
                                    obs_width=obs_width,
                                    step_size=step_size,
                                    grid_size=grid_size,
                                    show_agent_persp=True,
                                    thresh1=thresh1,
                                    thresh2=thresh2)

    if args.feat_extractor == 'DroneFeatureRisk_v2':

        feat_ext = DroneFeatureRisk_v2(agent_width=agent_width,
                                       obs_width=obs_width,
                                       step_size=step_size,
                                       grid_size=grid_size,
                                       show_agent_persp=False,
                                       thresh1=thresh1,
                                       thresh2=thresh2)

    if args.feat_extractor == 'DroneFeatureRisk_speed':

        feat_ext = DroneFeatureRisk_speed(agent_width=agent_width,
                                          obs_width=obs_width,
                                          step_size=step_size,
                                          grid_size=grid_size,
                                          show_agent_persp=True,
                                          thresh1=thresh1,
                                          thresh2=thresh2)

    if args.feat_extractor == 'DroneFeatureRisk_speedv2':

        feat_ext = DroneFeatureRisk_speedv2(agent_width=agent_width,
                                            obs_width=obs_width,
                                            step_size=step_size,
                                            grid_size=grid_size,
                                            thresh1=18,
                                            thresh2=30)

    #*************************************************
    #initialize the agents
    agent_list = []  #list containing the paths to the agents
    agent_type_list = []  #list containing the type of the agents

    #for potential field agent
    attr_mag = 3
    rep_mag = 2

    #agent = PFController()
    ######################
    #for social forces agent

    ######################

    #for network based agents
    agent_file_list = [
        '/home/abhisek/Study/Robotics/deepirl/experiments/results/Beluga/IRL Runs/Variable-speed-hit-full-run-suppressed-local-updated-features2019-12-14_16:38:00-policy_net-256--reward_net-256--reg-0.001-seed-9-lr-0.0005/saved-models/28.pt'
    ]
    agent_file_list.append(
        '/home/abhisek/Study/Robotics/deepirl/experiments/results/Quadra/RL Runs/Possible_strawman2019-12-16 12:22:05DroneFeatureRisk_speedv2-seed-789-policy_net-256--reward_net-128--total-ep-8000-max-ep-len-500/policy-models/0.pt'
    )

    #initialize agents based on the agent files
    for agent_file in agent_file_list:

        agent_temp = Policy(feat_ext.state_rep_size,
                            env.action_space.n,
                            hidden_dims=args.policy_net_hidden_dims)

        agent_temp.load(agent_file)
        agent_list.append(agent_temp)
        agent_type_list.append('Policy_network')

    #####################

    for i in range(len(agent_list)):

        while env.cur_ped != env.last_pedestrian:

            state = env.reset()
            done = False
            t = 0
            traj = [copy.deepcopy(state)]
            while not done or t < args.max_ep_length:

                if agent_type_list[i] != 'Policy_Network':

                    feat = feat_ext.extract_features(state)
                    feat = torch.from_numpy(feat).type(
                        torch.FloatTensor).to(DEVICE)

                action = agent_list[i].eval_action(feat)
                state, _, done, _ = env.step(action)
                traj.append(copy.deepcopy(state))

                if done:
                    break

            total_smoothness, avg_smoothness = compute_trajectory_smoothness(
                traj)
            ratio = compute_distance_displacement_ratio(traj)

            proxemic_intrusions(traj, 10)
            anisotropic_intrusions(traj, 30)
            pdb.set_trace()