def create_do_from_sk():
    rcpsp_domain = MyExampleRCPSPDomain()
    do_domain = build_do_domain(rcpsp_domain)
    print(do_domain.__class__)
    rcpsp_domain = MyExampleMRCPSPDomain_WithCost()
    do_domain = build_do_domain(rcpsp_domain)
    print(do_domain.__class__)
    from skdecide.hub.domain.rcpsp.rcpsp_sk_parser import load_domain, load_multiskill_domain
    rcpsp_domain = load_multiskill_domain()
    do_domain = build_do_domain(rcpsp_domain)
    print(do_domain.__class__)
 def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
     self.domain = domain_factory()
     self.do_domain = build_do_domain(self.domain)
     solvers = build_solver(solving_method=self.method,
                            do_domain=self.do_domain)
     solver_class = solvers[0]
     key, params = solvers[1]
     for k in params:
         if k not in self.dict_params:
             self.dict_params[k] = params[k]
     self.solver = solver_class(self.do_domain, **self.dict_params)
     try:
         self.solver.init_model(**self.dict_params)
     except:
         pass
     result_storage = self.solver.solve(**self.dict_params)
     best_solution: RCPSPSolution = result_storage.get_best_solution()
     fits = self.do_domain.evaluate(best_solution)
     print("Best solution fitness found : ", fits)
     self.best_solution = best_solution
     print("Satisfiable ", self.do_domain.satisfy(self.best_solution))
     self.policy_object = from_solution_to_policy(
         solution=best_solution,
         domain=self.domain,
         policy_method_params=self.policy_method_params)
Exemple #3
0
 def initialize_cpm_data_for_training(self):
     self.cpm_data = {}
     for domain in self.training_domains:
         do_model = build_do_domain(domain)
         cpm, cpm_esd = compute_cpm(do_model)
         self.cpm_data[domain] = {'cpm': cpm,
                                  'cpm_esd': cpm_esd}
Exemple #4
0
    def evaluate_heuristic(self, individual, domains) -> float:
        vals = []
        func_heuristic = self.toolbox.compile(expr=individual)
        # print('individual', individual)
        for domain in domains:

            ###
            initial_state = domain.get_initial_state()

            do_model = build_do_domain(domain)
            modes = [
                initial_state.tasks_mode.get(j, 1)
                for j in sorted(domain.get_tasks_ids())
            ]
            modes = modes[1:-1]

            cpm = self.cpm_data[domain]["cpm"]
            cpm_esd = self.cpm_data[domain]["cpm_esd"]

            raw_values = []
            for task_id in domain.get_available_tasks(initial_state):
                input_features = [
                    feature_function_map[lf](
                        domain=domain,
                        cpm=cpm,
                        cpm_esd=cpm_esd,
                        task_id=task_id,
                        state=initial_state,
                    )
                    for lf in self.list_feature
                ]
                output_value = func_heuristic(*input_features)
                raw_values.append(output_value)

            normalized_values = [
                x + 1
                for x in sorted(
                    range(len(raw_values)), key=lambda k: raw_values[k], reverse=False
                )
            ]
            normalized_values_for_do = [
                normalized_values[i] - 2
                for i in range(len(normalized_values))
                if normalized_values[i] not in {1, len(normalized_values)}
            ]

            solution = RCPSPSolution(
                problem=do_model,
                rcpsp_permutation=normalized_values_for_do,
                rcpsp_modes=modes,
            )
            last_activity = max(list(solution.rcpsp_schedule.keys()))
            do_makespan = solution.rcpsp_schedule[last_activity]["end_time"]
            vals.append(do_makespan)

        fitness = [np.mean(vals)]
        # fitness = [np.max(vals)]
        return fitness
Exemple #5
0
def create_do_from_sk():
    rcpsp_domain = MyExampleRCPSPDomain()
    do_domain = build_do_domain(rcpsp_domain)
    print("Loading rcpsp domain :resulting class in DO : ",
          do_domain.__class__)
    rcpsp_domain = MyExampleMRCPSPDomain_WithCost()
    do_domain = build_do_domain(rcpsp_domain)
    print("Loading multimode-rcpsp domain : resulting class in DO : ",
          do_domain.__class__)
    from examples.discrete_optimization.rcpsp_multiskill_parser_example import (
        get_data_available_ms, )
    from skdecide.hub.domain.rcpsp.rcpsp_sk_parser import load_multiskill_domain

    rcpsp_domain = load_multiskill_domain(get_data_available_ms()[0])
    do_domain = build_do_domain(rcpsp_domain)
    print(
        "Loading multiskill-rcpsp domain : resulting class in DO : ",
        do_domain.__class__,
    )
Exemple #6
0
    def evaluate_heuristic_permutation(self, individual, domains) -> float:
        vals = []
        func_heuristic = self.toolbox.compile(expr=individual)
        # print('individual', individual)

        for domain in domains:

            raw_values = []
            initial_state = domain.get_initial_state()

            regenerate_cpm = False
            if regenerate_cpm:
                do_model = build_do_domain(domain)
                cpm, cpm_esd = compute_cpm(do_model)
            else:
                cpm = self.cpm_data[domain]['cpm']
                cpm_esd = self.cpm_data[domain]['cpm_esd']

            for task_id in domain.get_available_tasks(state=initial_state):

                input_features = [feature_function_map[lf](domain=domain,
                                                       cpm = cpm,
                                                        cpm_esd=cpm_esd,
                                                           task_id=task_id,
                                                           state=initial_state)
                                  for lf in self.list_feature]
                output_value = func_heuristic(*input_features)
                raw_values.append(output_value)

            most_common_raw_val = max(raw_values, key=raw_values.count)
            most_common_count = raw_values.count(most_common_raw_val)

            heuristic_permutation = [x + 1 for x in sorted(range(len(raw_values)), key=lambda k: raw_values[k],
                                                       reverse=False)]

            if self.permutation_distance == PermutationDistance.KTD:
                dist, p_value = stats.kendalltau(heuristic_permutation, self.reference_permutations[domain])
                dist = -dist

            if self.permutation_distance == PermutationDistance.HAMMING:
                dist = distance.hamming(heuristic_permutation, self.reference_permutations[domain])

            if self.permutation_distance == PermutationDistance.KTD_HAMMING:
                ktd, _ = stats.kendalltau(heuristic_permutation, self.reference_permutations[domain])
                dist = -ktd + distance.hamming(heuristic_permutation, self.reference_permutations[domain])

            penalty = most_common_count / len(raw_values)
            # penalty = 0.
            penalized_distance = dist + penalty
            vals.append(penalized_distance)
        fitness = [np.mean(vals)]
        # fitness = [np.max(vals)]
        return fitness
 def get_available_methods(self, domain: SchedulingDomain):
     do_domain = build_do_domain(domain)
     if isinstance(do_domain, (MS_RCPSPModel)):
         from skdecide.builders.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_solvers import look_for_solver, solvers_map
         available = look_for_solver(do_domain)
     elif isinstance(
             do_domain,
         (SingleModeRCPSPModel, RCPSPModel, MultiModeRCPSPModel)):
         from skdecide.builders.discrete_optimization.rcpsp.rcpsp_solvers import look_for_solver, solvers_map
         available = look_for_solver(do_domain)
     smap = [(av, solvers_map[av]) for av in available]
     print("available solvers :", smap)
     return smap
Exemple #8
0
    def _get_next_action(
        self, observation: D.T_agent[D.T_observation]
    ) -> D.T_agent[D.T_concurrency[D.T_event]]:
        run_sgs = True
        cheat_mode = False

        do_model = build_do_domain(self.domain_model)
        modes = [
            observation.tasks_mode.get(j, 1)
            for j in sorted(self.domain.get_tasks_ids())
        ]
        modes = modes[1:-1]

        if run_sgs:
            scheduled_tasks_start_times = {}
            for j in observation.tasks_details.keys():
                if observation.tasks_details[j].start is not None:
                    scheduled_tasks_start_times[j] = observation.tasks_details[j].start
                    do_model.mode_details[j][1]["duration"] = observation.tasks_details[
                        j
                    ].sampled_duration

        # do_model = build_do_domain(self.domain)
        # modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
        # modes = modes[1:-1]
        #
        # if run_sgs:
        #     scheduled_tasks_start_times = {}
        #     for j in observation.tasks_details.keys():
        #         if observation.tasks_details[j].start is not None:
        #             scheduled_tasks_start_times[j] = observation.tasks_details[j].start
        #         else:
        #             if not cheat_mode:
        #                 do_model.mode_details[j][1]['duration'] = self.domain_model.sample_task_duration(j, 1, 0.)

        if self.recompute_cpm:
            cpm, cpm_esd = compute_cpm(do_model)
        else:
            cpm = self.cpm_data[self.domain]["cpm"]
            cpm_esd = self.cpm_data[self.domain]["cpm_esd"]

        t = observation.t
        raw_values = []
        for task_id in self.domain.get_available_tasks(observation):
            input_features = [
                feature_function_map[lf](
                    domain=self.domain,
                    cpm=cpm,
                    cpm_esd=cpm_esd,
                    task_id=task_id,
                    state=observation,
                )
                for lf in self.list_feature
            ]
            output_value = self.func_heuristic(*input_features)
            raw_values.append(output_value)

        normalized_values = [
            x + 1
            for x in sorted(
                range(len(raw_values)), key=lambda k: raw_values[k], reverse=False
            )
        ]
        normalized_values_for_do = [
            normalized_values[i] - 2
            for i in range(len(normalized_values))
            if normalized_values[i] not in {1, len(normalized_values)}
        ]

        # print(t, ': ', normalized_values)
        # print('normalized_values_for_do: ', normalized_values_for_do)

        modes_dictionnary = {}
        for i in range(len(normalized_values)):
            modes_dictionnary[i + 1] = 1

        if run_sgs:

            solution = RCPSPSolution(
                problem=do_model,
                rcpsp_permutation=normalized_values_for_do,
                rcpsp_modes=modes,
            )

            solution.generate_schedule_from_permutation_serial_sgs_2(
                current_t=t,
                completed_tasks={
                    j: observation.tasks_details[j] for j in observation.tasks_complete
                },
                scheduled_tasks_start_times=scheduled_tasks_start_times,
            )

            schedule = solution.rcpsp_schedule
        else:
            schedule = None

        sgs_policy = PolicyRCPSP(
            domain=self.domain,
            schedule=schedule,
            policy_method_params=PolicyMethodParams(
                # base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
                # base_policy_method=BasePolicyMethod.SGS_READY,
                base_policy_method=self.params_gphh.base_policy_method,
                delta_index_freedom=self.params_gphh.delta_index_freedom,
                delta_time_freedom=self.params_gphh.delta_time_freedom,
            ),
            permutation_task=normalized_values,
            modes_dictionnary=modes_dictionnary,
        )
        action: SchedulingAction = sgs_policy.sample_action(observation)
        # print('action_2: ', action.action)
        return action
Exemple #9
0
    def _get_next_action(
        self, observation: D.T_agent[D.T_observation]
    ) -> D.T_agent[D.T_concurrency[D.T_event]]:
        run_sgs = True
        cheat_mode = False

        do_model = build_do_domain(self.domain_model)
        modes = [
            observation.tasks_mode.get(j, 1)
            for j in sorted(self.domain.get_tasks_ids())
        ]
        modes = modes[1:-1]

        if run_sgs:
            scheduled_tasks_start_times = {}
            for j in observation.tasks_details.keys():
                if observation.tasks_details[j].start is not None:
                    scheduled_tasks_start_times[j] = observation.tasks_details[j].start
                    do_model.mode_details[j][1]["duration"] = observation.tasks_details[
                        j
                    ].sampled_duration

        # do_model = build_do_domain(self.domain)
        # modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
        # modes = modes[1:-1]
        #
        # if run_sgs:
        #     scheduled_tasks_start_times = {}
        #     for j in observation.tasks_details.keys():
        #         # schedule[j] = {}
        #         if observation.tasks_details[j].start is not None:
        #             # schedule[j]["start_time"] = observation.tasks_details[j].start
        #             scheduled_tasks_start_times[j] = observation.tasks_details[j].start
        #         # if observation.tasks_details[j].end is not None:
        #         #     schedule[j]["end_time"] = observation.tasks_details[j].end
        #         else:
        #             if not cheat_mode:
        #                 # print('do_model: ', do_model)
        #                 do_model.mode_details[j][1]['duration'] = self.domain_model.sample_task_duration(j, 1, 0.)

        normalized_values = self.fixed_perm

        normalized_values_for_do = [
            normalized_values[i] - 2
            for i in range(len(normalized_values))
            if normalized_values[i] not in {1, len(normalized_values)}
        ]

        # print('normalized_values: ', normalized_values)
        # print('normalized_values_for_do: ', normalized_values_for_do)
        t = observation.t

        modes_dictionnary = {}
        for i in range(len(normalized_values)):
            modes_dictionnary[i + 1] = 1

        if run_sgs:

            solution = RCPSPSolution(
                problem=do_model,
                rcpsp_permutation=normalized_values_for_do,
                rcpsp_modes=modes,
            )

            solution.generate_schedule_from_permutation_serial_sgs_2(
                current_t=t,
                completed_tasks={
                    j: observation.tasks_details[j] for j in observation.tasks_complete
                },
                scheduled_tasks_start_times=scheduled_tasks_start_times,
            )

            schedule = solution.rcpsp_schedule
        else:
            schedule = None

        sgs_policy = PolicyRCPSP(
            domain=self.domain,
            schedule=schedule,
            policy_method_params=PolicyMethodParams(
                # base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
                # base_policy_method=BasePolicyMethod.SGS_READY,
                base_policy_method=BasePolicyMethod.FOLLOW_GANTT,
                # delta_index_freedom=self.params_gphh.delta_index_freedom,
                # delta_time_freedom=self.params_gphh.delta_time_freedom
            ),
            permutation_task=normalized_values,
            modes_dictionnary=modes_dictionnary,
        )
        action: SchedulingAction = sgs_policy.sample_action(observation)
        # print('action_2: ', action.action)
        return action
Exemple #10
0
    def _get_next_action(
        self, observation: D.T_agent[D.T_observation]
    ) -> D.T_agent[D.T_concurrency[D.T_event]]:

        run_sgs = True
        cheat_mode = False
        regenerate_cpm = True

        do_model = build_do_domain(self.domain_model)
        modes = [
            observation.tasks_mode.get(j, 1)
            for j in sorted(self.domain.get_tasks_ids())
        ]
        modes = modes[1:-1]

        if run_sgs:
            scheduled_tasks_start_times = {}
            for j in observation.tasks_details.keys():
                if observation.tasks_details[j].start is not None:
                    scheduled_tasks_start_times[j] = observation.tasks_details[j].start
                    do_model.mode_details[j][1]["duration"] = observation.tasks_details[
                        j
                    ].sampled_duration

        # do_model = build_do_domain(self.domain)
        # modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
        # modes = modes[1:-1]
        #
        # if run_sgs:
        #     scheduled_tasks_start_times = {}
        #     for j in observation.tasks_details.keys():
        #         # schedule[j] = {}
        #         if observation.tasks_details[j].start is not None:
        #             # schedule[j]["start_time"] = observation.tasks_details[j].start
        #             scheduled_tasks_start_times[j] = observation.tasks_details[j].start
        #         # if observation.tasks_details[j].end is not None:
        #         #     schedule[j]["end_time"] = observation.tasks_details[j].end
        #         else:
        #             if not cheat_mode:
        #                 do_model.mode_details[j][1]['duration'] = self.domain_model.sample_task_duration(j, 1, 0.)

        if regenerate_cpm:
            cpm, cpm_esd = compute_cpm(do_model)

        t = observation.t
        raw_values = []
        for task_id in self.domain.get_available_tasks(observation):
            input_features = [
                feature_function_map[lf](
                    domain=self.domain,
                    cpm=cpm,
                    cpm_esd=cpm_esd,
                    task_id=task_id,
                    state=observation,
                )
                for lf in self.list_feature
            ]
            output_values = []
            for f in self.func_heuristics:
                output_value = f(*input_features)
                output_values.append(output_value)

            # print('output_values: ', output_values)
            if self.remove_extremes_values > 0:
                the_median = float(np.median(output_values))
                tmp = {}
                for i in range(len(output_values)):
                    tmp[i] = abs(output_values[i] - the_median)
                tmp = sorted(tmp.items(), key=lambda x: x[1], reverse=True)
                to_remove = [tmp[i][0] for i in range(self.remove_extremes_values)]
                output_values = list(np.delete(output_values, to_remove))

            # print('output_values filtered: ', output_values)
            if self.pool_aggregation_method == PoolAggregationMethod.MEAN:
                agg_value = np.mean(output_values)
            elif self.pool_aggregation_method == PoolAggregationMethod.MEDIAN:
                agg_value = np.median(output_values)
            elif self.pool_aggregation_method == PoolAggregationMethod.RANDOM:
                index = random.randint(len(output_values))
                agg_value = output_values[index]

            # print('agg_value: ', agg_value)
            raw_values.append(agg_value)

        normalized_values = [
            x + 1
            for x in sorted(
                range(len(raw_values)), key=lambda k: raw_values[k], reverse=False
            )
        ]
        normalized_values_for_do = [
            normalized_values[i] - 2
            for i in range(len(normalized_values))
            if normalized_values[i] not in {1, len(normalized_values)}
        ]

        # print('normalized_values: ', normalized_values)
        # print('normalized_values_for_do: ', normalized_values_for_do)

        modes_dictionnary = {}
        for i in range(len(normalized_values)):
            modes_dictionnary[i + 1] = 1

        if run_sgs:

            solution = RCPSPSolution(
                problem=do_model,
                rcpsp_permutation=normalized_values_for_do,
                rcpsp_modes=modes,
            )

            solution.generate_schedule_from_permutation_serial_sgs_2(
                current_t=t,
                completed_tasks={
                    j: observation.tasks_details[j] for j in observation.tasks_complete
                },
                scheduled_tasks_start_times=scheduled_tasks_start_times,
            )

            schedule = solution.rcpsp_schedule
        else:
            schedule = None

        sgs_policy = PolicyRCPSP(
            domain=self.domain,
            schedule=schedule,
            policy_method_params=PolicyMethodParams(
                # base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
                # base_policy_method=BasePolicyMethod.SGS_READY,
                base_policy_method=self.params_gphh.base_policy_method,
                delta_index_freedom=self.params_gphh.delta_index_freedom,
                delta_time_freedom=self.params_gphh.delta_time_freedom,
            ),
            permutation_task=normalized_values,
            modes_dictionnary=modes_dictionnary,
        )
        action: SchedulingAction = sgs_policy.sample_action(observation)
        # print('action_2: ', action.action)
        return action