def _get_task_on_completion_added_conditions(
            self) -> Dict[int, List[Distribution]]:
        completion_conditions_dict = {}

        completion_conditions_dict[2] = [
            DiscreteDistribution([
                (ConditionElementsExample.PROBLEM_OPERATION_2, 0.1),
                (ConditionElementsExample.OK, 0.9),
            ])
        ]
        completion_conditions_dict[3] = [
            DiscreteDistribution([
                (ConditionElementsExample.PROBLEM_OPERATION_3, 0.9),
                (ConditionElementsExample.OK, 0.1),
            ])
        ]

        # completion_conditions_dict[2] = [
        #     DiscreteDistribution(
        #         [(ConditionElementsExample.PROBLEM_OPERATION_2, 1)])
        # ]
        # completion_conditions_dict[3] = [
        #     DiscreteDistribution(
        #         [(ConditionElementsExample.PROBLEM_OPERATION_3, 1)])
        # ]

        return completion_conditions_dict
Esempio n. 2
0
def build_stochastic_from_deterministic(rcpsp: MRCPSP,
                                        task_to_noise: Set[int] = None):
    if task_to_noise is None:
        task_to_noise = set(rcpsp.get_tasks_ids())
    duration_distribution = {}
    for task_id in rcpsp.get_tasks_ids():
        duration_distribution[task_id] = {}
        for mode in rcpsp.get_task_modes(task_id=task_id):
            duration = rcpsp.get_task_duration(task=task_id, mode=mode)
            if duration == 0 or task_id not in task_to_noise:
                distrib = DiscreteDistribution(values=[(duration, 1)])
            else:
                n = 10
                distrib = DiscreteDistribution(
                    values=[(max(1, duration + i), 1 / (2 * n + 1))
                            for i in range(-n, n + 1)])
            duration_distribution[task_id][mode] = distrib

    return Stochastic_RCPSP(
        resource_names=rcpsp.get_resource_types_names(),
        task_ids=rcpsp.get_tasks_ids(),
        tasks_mode=rcpsp.get_tasks_modes(),  # ressource
        duration_distribution=duration_distribution,
        successors=rcpsp.successors,
        max_horizon=rcpsp.max_horizon * 2,
        resource_availability=rcpsp.resource_availability,
        resource_renewable=rcpsp.resource_renewable,
    )
 def _get_task_duration_distribution(self, task: int, mode: Optional[int] = 1,
                                     progress_from: Optional[float] = 0.,
                                     multivariate_settings: Optional[Dict[str, int]] = None) -> Distribution:
     all_distributions = {}
     all_distributions[1] = DiscreteDistribution([(0, 1.)])
     all_distributions[2] = DiscreteDistribution([(4, 0.25), (5, 0.5), (6, 0.25)])
     all_distributions[3] = DiscreteDistribution([(5, 0.25), (6, 0.5), (7, 0.25)])
     all_distributions[4] = DiscreteDistribution([(3, 0.5), (4, 0.5)])
     all_distributions[5] = DiscreteDistribution([(0, 1.)])
     return all_distributions[task]
    def _get_next_state_distribution(self, state: TrafficLightState, action: TrafficLightAction) -> DiscreteDistribution[TrafficLightState]:
        '''
          Returns the list of states 
          that applying the specified action in the specified state can lead to, 
          together with their respective probabilities.
        '''
        # TODO
        ans = []

        tau_north = tau_1
        tau_east = tau_2

        east_light = state.east_light.next_state(state.north_light,action)
        north_light = state.north_light.next_state(state.east_light,action)

        north_distribution = state.north_light.next_cars_queueing(state.cars_queueing_north,tau_north)
        east_distribution = state.east_light.next_cars_queueing(state.cars_queueing_east,tau_east)

        for (north_int, prob_north) in north_distribution:
            for (east_int, prob_east) in east_distribution:
                traffic_light_state = TrafficLightState(north_int,east_int,north_light,east_light)
                traffic_light_state_prob = prob_north*prob_east
                ans.append((traffic_light_state,traffic_light_state_prob))
                if north_int == 3 and east_int == 2:
                    print(prob_north)
                    print(prob_east)
        return DiscreteDistribution(ans)
Esempio n. 5
0
 def decode(dd):
     return DiscreteDistribution(
         [
             (GridShmProxy.StateProxy.decode(o[0]), o[1].value)
             for o in dd
             if o[1].value > -0.5
         ]
     )
 def _get_next_state_distribution(self, state: GridState, action: GridAction) -> DiscreteDistribution[State]:
     unnormalised = [ (self.next_state(state, delta), prob) for (delta,prob) in action._deltas ]
     factor = sum( [ prob for (next_state,prob) in unnormalised if next_state != None] )
     if factor == 0:
         raise ValueError("Action {} not applicable in state {}".format(action, state))
     result = DiscreteDistribution(
         [ (next_state, prob / factor) for (next_state, prob) in unnormalised if next_state != None ]
     )
     return result
 def _get_task_duration_distribution(
     self,
     task: int,
     mode: Optional[int] = 1,
     progress_from: Optional[float] = 0.0,
     multivariate_settings: Optional[Dict[str, int]] = None,
 ) -> Distribution:
     all_distributions = {}
     t = None
     if multivariate_settings is not None:
         if "t" in multivariate_settings:
             t = multivariate_settings["t"]
     all_distributions[1] = DiscreteDistribution([(0, 1.0)])
     all_distributions[2] = DiscreteDistribution([(4, 0.25), (5, 0.5),
                                                  (6, 0.25)])
     if t is not None:
         if t == 1:
             all_distributions[2] = DiscreteDistribution([(0, 0.25),
                                                          (1, 0.75)
                                                          ])  # Faster
     all_distributions[3] = DiscreteDistribution([(5, 0.25), (6, 0.5),
                                                  (7, 0.25)])
     all_distributions[4] = DiscreteDistribution([(3, 0.5), (4, 0.5)])
     all_distributions[5] = DiscreteDistribution([(0, 1.0)])
     return all_distributions[task]
Esempio n. 8
0
    def _get_next_state_distribution(
        self,
        memory: D.T_memory[D.T_state],
        action: D.T_agent[D.T_concurrency[D.T_event]],
    ) -> DiscreteDistribution[D.T_state]:

        if action == Action.left:
            next_state_1 = State(max(memory.x - 1, 0), memory.y)
            next_state_2 = State(max(memory.x - 1, 0), max(memory.y - 1, 0))
            next_state_3 = State(
                max(memory.x - 1, 0), min(memory.y + 1, self.num_rows - 1)
            )
        if action == Action.right:
            next_state_1 = State(min(memory.x + 1, self.num_cols - 1), memory.y)
            next_state_2 = State(
                min(memory.x + 1, self.num_cols - 1),
                min(memory.y + 1, self.num_rows - 1),
            )
            next_state_3 = State(
                min(memory.x + 1, self.num_cols - 1), max(memory.y - 1, 0)
            )
        if action == Action.up:
            next_state_1 = State(memory.x, max(memory.y - 1, 0))
            next_state_2 = State(max(memory.x - 1, 0), max(memory.y - 1, 0))
            next_state_3 = State(
                min(memory.x + 1, self.num_cols - 1), max(memory.y - 1, 0)
            )
        if action == Action.down:
            next_state_1 = State(memory.x, min(memory.y + 1, self.num_rows - 1))
            next_state_2 = State(
                min(memory.x + 1, self.num_cols - 1),
                min(memory.y + 1, self.num_rows - 1),
            )
            next_state_3 = State(
                max(memory.x - 1, 0), min(memory.y + 1, self.num_rows - 1)
            )

        return DiscreteDistribution(
            [
                (memory, 0.2),
                (next_state_1, 0.4),
                (next_state_2, 0.2),
                (next_state_3, 0.2),
            ]
        )
Esempio n. 9
0
    def __init__(self, \
            transition_function: List[SMTransition], \
            initial_state: str):
        self._name_to_state = {}
        self._name_to_action = {}
        self._state_to_action_to_output = {}

        trans: SMTransition
        for trans in transition_function:
            origin = self.state(trans.origin)
            action = self.action(trans.action)
            distrib = DiscreteDistribution([
                (self.state(target_name), prob)
                for (target_name, prob) in trans.prob_distribution
            ])
            self._state_to_action_to_output[origin][action] = (trans.cost,
                                                               distrib)

        self._initial_state = self.state(initial_state)
Esempio n. 10
0
 def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
     # Return a uniform distribution over all initial states
     n = len(self._h_solutions)
     return DiscreteDistribution([(State(solution=s, score=Score(0,
                                                                 0)), 1 / n)
                                  for s in self._h_solutions])
    def _get_next_state_distribution(
        self,
        memory: D.T_memory[D.T_state],
        action: D.T_agent[D.T_concurrency[D.T_event]],
    ) -> DiscreteDistribution[D.T_state]:
        if memory.t == 0:
            next_state_distribution = DiscreteDistribution([
                (MyState(-1, -1, 0), 1.0)
            ])

        if action == MyActions.left_slow:
            next_state_distribution = DiscreteDistribution([
                (MyState(max(memory.x - 1, 0), memory.y, memory.t - 1), 0.8),
                (MyState(-1, -1, 0), 0.2),
            ])
        if action == MyActions.left_fast:
            next_state_distribution = DiscreteDistribution([
                (MyState(max(memory.x - 1, 0), memory.y, memory.t - 1), 0.9),
                (MyState(-1, -1, 0), 0.1),
            ])
        if action == MyActions.right_slow:
            next_state_distribution = DiscreteDistribution([
                (
                    MyState(min(memory.x + 1, self.num_cols - 1), memory.y,
                            memory.t - 1),
                    0.8,
                ),
                (MyState(-1, -1, 0), 0.2),
            ])
        if action == MyActions.right_fast:
            next_state_distribution = DiscreteDistribution([
                (
                    MyState(min(memory.x + 1, self.num_cols - 1), memory.y,
                            memory.t - 1),
                    0.9,
                ),
                (MyState(-1, -1, 0), 0.1),
            ])
        if action == MyActions.up_slow:
            next_state_distribution = DiscreteDistribution([
                (MyState(memory.x, max(memory.y - 1, 0), memory.t - 1), 0.8),
                (MyState(-1, -1, 0), 0.2),
            ])
        if action == MyActions.up_fast:
            next_state_distribution = DiscreteDistribution([
                (MyState(memory.x, max(memory.y - 1, 0), memory.t - 1), 0.9),
                (MyState(-1, -1, 0), 0.1),
            ])
        if action == MyActions.down_slow:
            next_state_distribution = DiscreteDistribution([
                (
                    MyState(memory.x, min(memory.y + 1, self.num_rows - 1),
                            memory.t - 1),
                    0.8,
                ),
                (MyState(-1, -1, 0), 0.2),
            ])
        if action == MyActions.down_fast:
            next_state_distribution = DiscreteDistribution([
                (
                    MyState(memory.x, min(memory.y + 1, self.num_rows - 1),
                            memory.t - 1),
                    0.9,
                ),
                (MyState(-1, -1, 0), 0.1),
            ])

        return next_state_distribution