Example #1
0
    def execute_action(self):
        selected_actions = self.model_action.get_selected_results_with_index()

        if selected_actions and self.args_for_action:
            for name, _, act_idx in selected_actions:
                try:
                    action = self.actions[act_idx]
                    if action:
                        action.act([arg for arg, _, _ in self.args_for_action], self)
                except Exception as e:
                    debug.log("execute_action", e)
Example #2
0
    def execute_action(self):
        selected_actions = self.model_action.get_selected_results_with_index()

        if selected_actions and self.args_for_action:
            for name, _, act_idx in selected_actions:
                try:
                    action = self.actions[act_idx]
                    if action:
                        action.act([arg for arg, _, _ in self.args_for_action],
                                   self)
                except Exception as e:
                    debug.log("execute_action", e)
Example #3
0
 def step(self, action_dict):
     assert self.env_state == EnvState.NORMAL, "env.env_state is not EnvState.NORMAL"
     try:
         self.env_state = act(self, self.EGO_VEH_ID, action_dict)
         if self.env_state == EnvState.DONE:
             obs_dict = self.obs_dict_hist.get(-1)
             veh_dict = self.veh_dict_hist.get(-1)
         else:
             obs_dict = get_obs_dict(self)
             veh_dict = get_veh_dict(self)
         if self.agt_ctrl == False:
             action_dict = infer_action(self)
         self.action_dict_hist.add(action_dict)
         self.veh_dict_hist.add(veh_dict)
         self.obs_dict_hist.add(obs_dict)
     except (traci.FatalTraCIError, traci.TraCIException):
         self.env_state = EnvState.ERROR
         raise
     info = action_dict
     return obs_dict, get_reward_list(self), self.env_state, info
Example #4
0
    def step(self, action_dict=None):
        assert self.env_state == EnvState.NORMAL, "env.env_state is not EnvState.NORMAL"
        try:
            self.env_state = act(self, self.EGO_VEH_ID, action_dict)

            # if ego reaches the end of an incorrect (turning) lane, simulation is considered as DONE
            if self.env_state == EnvState.NORMAL and \
               self.obs_dict_hist[-1]["ego_dist_to_end_of_lane"] < 0.01 and \
               self.obs_dict_hist[-1]["ego_correct_lane_gap"] != 0:
                self.env_state = EnvState.DONE

            if self.env_state == EnvState.DONE:
                obs_dict = deepcopy(self.obs_dict_hist[-1])
                veh_dict = deepcopy(self.veh_dict_hist[-1])
            else:
                obs_dict = get_obs_dict(self)
                veh_dict = get_veh_dict(self)
            self.veh_dict_hist.append(veh_dict)
            self.obs_dict_hist.append(obs_dict)
            if self.agt_ctrl == False:
                action_dict = infer_action(self)
            self.action_dict_hist.append(action_dict)
            info = action_dict
            """
      print(self.obs_dict_hist[-1]["veh_ids"])
      print("peer", self.obs_dict_hist[-1]["veh_relation_peer"])
      print("conflict", self.obs_dict_hist[-1]["veh_relation_conflict"])
      print("ahead", self.obs_dict_hist[-1]["veh_relation_ahead"])
      print("next", self.obs_dict_hist[-1]["veh_relation_next"])
      print("in_intersection", self.obs_dict_hist[-1]["in_intersection"])
      print("has_priority", self.obs_dict_hist[-1]["has_priority"])
      print("ego_has_priority", self.obs_dict_hist[-1]["ego_has_priority"])
      print(self.obs_dict_hist[-1]["ttc"])
      print(get_reward_list(self)[0][1])
      """
            return obs_dict, get_reward_list(self), self.env_state, info
        except (traci.FatalTraCIError, traci.TraCIException):
            self.env_state = EnvState.ERROR
            raise
Example #5
0
def like(request):
    return act(request, db_adapter.like)
Example #6
0
def dislike(request):
    return act(request, db_adapter.dislike)