Пример #1
0
def start_repair(org_code, team_id):
    """Train the PPO rl agent:
    1. consumes the kafka env_window messages
    2. run replay
    3. create recommendations
    """

    log = logging.getLogger("rl_env")
    log.setLevel(logging.ERROR)

    klog = logging.getLogger("kafka.conn")
    klog.setLevel(logging.ERROR)

    log = logging.getLogger("rl_env")
    log.setLevel(logging.ERROR)

    log = logging.getLogger("cli_repair")
    log.setLevel(logging.INFO)

    log.info(f"Acquiring Env for team_id={team_id} ...")

    planner = get_default_active_planner(org_code=org_code, team_id=team_id)
    rl_env = planner["planner_env"]
    rl_agent = planner["planner_agent"]
    rl_agent.config["nbr_of_actions"] = 2
    pprint(rl_env.get_planner_score_stats())

    log.info(f"Starting repair for team_id={team_id},  use Ctrl-C to exit ...")

    for job_code in rl_env.jobs_dict.keys():
        if (rl_env.jobs_dict[job_code].job_type
                == JobType.JOB) & (rl_env.jobs_dict[job_code].planning_status
                                   == JobPlanningStatus.UNPLANNED):
            res = rl_agent.predict_action_dict_list(job_code=job_code)
            if len(res) < 1:
                log.warn(f"Failed to predict for job_code = {job_code}")
                continue
            one_job_action_dict = ActionDict(
                is_forced_action=True,
                job_code=job_code,
                action_type=ActionType.
                FLOATING,  # I assume that only in-planning jobs can appear here...
                scheduled_worker_codes=res[0].scheduled_worker_codes,
                scheduled_start_minutes=res[0].scheduled_start_minutes,
                scheduled_duration_minutes=res[0].scheduled_duration_minutes,
            )
            internal_result_info = rl_env.mutate_update_job_by_action_dict(
                a_dict=one_job_action_dict, post_changes_flag=True)

            if internal_result_info.status_code != ActionScoringResultType.OK:
                log.warn(
                    f"JOB:{ job_code}: Failed to act on job={job_code}. "  # {internal_result_info}
                )
            else:
                log.info(
                    f"JOB:{job_code}: Successfully Planned job, action={res[0]}. "
                )
    log.info("Repair Done, printing new scores...")
    pprint(rl_env.get_planner_score_stats())
Пример #2
0
def save_solution(agent, manager, routing, solution):
    """Prints solution on console."""
    max_route_distance = 0
    for vehicle_id in range(len(agent.worker_slots)):
        index = routing.Start(vehicle_id)
        # First one should be depot
        previous_index = index
        index = solution.Value(routing.NextVar(index))
        plan_output = "Route for vehicle {}:\n".format(vehicle_id)
        route_distance = 0
        # worker/vehicle starts at 0
        scheduled_worker_codes = [agent.worker_slots[vehicle_id].worker_id]

        next_start_minutes = agent.worker_slots[vehicle_id].start_minutes
        prev_location = agent.worker_slots[vehicle_id].end_location

        while not routing.IsEnd(index):
            plan_output += " {} -> ".format(manager.IndexToNode(index))
            # job starts at 0
            job = agent.env.jobs[index - 1]
            one_job_action_dict = ActionDict(
                job_code=job.job_code,
                scheduled_worker_codes=scheduled_worker_codes,
                scheduled_start_minutes=next_start_minutes,
                scheduled_duration_minutes=job.requested_duration_minutes,
                action_type=ActionType.FLOATING,
                is_forced_action=False,
            )
            internal_result_info = agent.env.mutate_update_job_by_action_dict(
                a_dict=one_job_action_dict, post_changes_flag=True)
            if internal_result_info.status_code != ActionScoringResultType.OK:
                print(
                    f"{one_job_action_dict.job_code}: Failed to commit change, error: {str(internal_result_info)} "
                )
            else:
                print(
                    f"job({one_job_action_dict.job_code}) is planned successfully ..."
                )

            travel_time = agent._get_travel_time_2locations(
                prev_location, job.location)

            route_distance = routing.GetArcCostForVehicle(
                previous_index, index, vehicle_id)
            next_start_minutes += job.requested_duration_minutes + route_distance

            prev_location = job.location
            previous_index = index
            index = solution.Value(routing.NextVar(index))
Пример #3
0
def generic_job_commit(
    request_in: GenericJobCommitActionInput,
    current_user: DispatchUser = Depends(get_current_user),
    db_session: Session = Depends(get_db),
):

    org_code = current_user.org_code  # "0"  #

    team_id = request_in.team_id

    planner = get_default_active_planner(org_code=org_code, team_id=team_id)
    rl_env = planner["planner_env"]

    if request_in.job_code not in rl_env.jobs_dict.keys():
        return GenericJobCommitOutput(
            errorNumber=PLANNER_ERROR_MESSAGES["JOB_NOT_EXIST_IN_ENV"][0],
            errorDescription=f"The JOB (id={request_in.job_code}) is in the system but does not exist in env with team_id={team_id}.",
        )

    scheduled_start_minutes = rl_env.env_encode_from_datetime_to_minutes(
        request_in.scheduled_start_datetime.replace(tzinfo=None)
    )

    one_job_action_dict = ActionDict(
        is_forced_action=False,
        job_code=request_in.job_code,
        action_type=ActionType.UNPLAN if request_in.planning_status == 'U' else ActionType.FLOATING,
        scheduled_worker_codes=request_in.scheduled_worker_codes,
        scheduled_start_minutes=scheduled_start_minutes,
        scheduled_duration_minutes=request_in.scheduled_duration_minutes,
    )

    internal_result_info = rl_env.mutate_update_job_by_action_dict(
        a_dict=one_job_action_dict, post_changes_flag=True
    )

    if internal_result_info.status_code != ActionScoringResultType.OK:
        errorNumber = 40001
        errorDescription = str(internal_result_info)
    else:
        errorNumber = 0
        errorDescription = "OK"

    return GenericJobCommitOutput(
        errorNumber=errorNumber, errorDescription=errorDescription
    )
Пример #4
0
    def dispatch_jobs(self, env, rl_agent=None):
        # Real batch, rl_agent is skilpped.
        self.kandbox_env = env
        GENERATOR_START_DATE = datetime.strptime(
            self.kandbox_env.config["env_start_day"],
            config.KANDBOX_DATE_FORMAT)
        GENERATOR_END_DATE = GENERATOR_START_DATE + timedelta(
            days=self.kandbox_env.config["nbr_of_days_planning_window"])
        current_date = GENERATOR_START_DATE

        # day_seq = int(self.kandbox_env.env_encode_from_datetime_to_minutes( self.kandbox_env.data_start_datetime) / 1440) - 1

        while current_date < GENERATOR_END_DATE:
            """
            visit_cust  = self.kandbox_env.load_job_status_df(
                planner_code = config.ORIGINAL_PLANNER,
                start_date = start_date,
                end_date = end_date
                )
            """
            print("current_start_day", current_date, "GENERATOR_END_DATE",
                  GENERATOR_END_DATE)
            current_start_day = datetime.strftime(current_date,
                                                  config.KANDBOX_DATE_FORMAT)

            day_seq = int(
                self.kandbox_env.env_encode_from_datetime_to_minutes(
                    current_date) / 1440)

            # purge job status for this planner and this day.

            workers = self.kandbox_env.workers  # start_day=current_start_day
            # TODO, do it per day

            jobs_orig = []
            for j in self.kandbox_env.jobs:
                if (j.requested_start_minutes >=
                        day_seq * 1440) and (j.requested_start_minutes <
                                             (day_seq + 1) * 1440):
                    jobs_orig.append(j)
            if len(jobs_orig) < 1:
                print("it is empty, nothing to dispatch!")
                return

            current_shifts = jobs_orig
            current_workers = workers  # .T.to_dict().values()

            print({
                "loaded day": current_date,
                "job count": len(current_shifts),
                "current_workers count": len(current_workers),
            })

            worker_day = self.dispatch_jobs_1day(
                jobs=current_shifts[0:53],
                workers=current_workers)  # [0:20]  [:70]
            # worker_day is the dispatched result, now we save it into DB.

            if len(worker_day) < 1:
                print(
                    "no data returned from opti1day! I will move on to next day!"
                )
                current_date = current_date + timedelta(days=1)
                continue
            # pprint(worker_day)
            job_list = []
            for w_i in range(len(worker_day)):
                pre_job_code = "__HOME"
                for task in worker_day[w_i][:-1]:
                    task_id = task[0]  # + 1
                    worker_code = current_workers[w_i].worker_code

                    # updated_order =  {} # latest_order_dict[id]
                    job_list.append({
                        "job_code":
                        current_shifts[task_id].job_code,
                        "job_schedule_type":
                        current_shifts[task_id].job_schedule_type,
                        "planning_status":
                        JobPlanningStatus.IN_PLANNING,
                        "scheduled_primary_worker_id":
                        worker_code,
                        "scheduled_start_day":
                        datetime.strftime(current_date,
                                          config.KANDBOX_DATE_FORMAT),
                        "requested_start_day":
                        datetime.strftime(current_date,
                                          config.KANDBOX_DATE_FORMAT),
                        "scheduled_start_minutes":
                        task[1] + (day_seq * 1440),
                        "scheduled_duration_minutes":
                        current_shifts[task_id].requested_duration_minutes,
                        "scheduled_travel_minutes_before":
                        task[2],
                        "scheduled_travel_prev_code":
                        pre_job_code,
                        # "location_code": current_shifts[task_id]["location_code"],
                        "geo_longitude":
                        current_shifts[task_id].location[0],
                        "geo_latitude":
                        current_shifts[task_id].location[1],
                        "conflict_level":
                        0,
                        "scheduled_secondary_worker_ids":
                        "[]",
                        "scheduled_share_status":
                        "N",
                        "error_message":
                        "",
                    })
                    pre_job_code = current_shifts[task_id].job_code
            """
            import pprint

            pprint.pprint(job_list)
            return
            """
            # TODO: No needs of 2 rounds.
            # fmt:off
            self.kandbox_env.kp_data_adapter.reload_data_from_db()
            for job in job_list:
                # self.kandbox_env.jobs_dict[job["job_code"]] .planning_status = job["planning_status"]
                # self.kandbox_env.jobs_dict[job["job_code"]] .scheduled_worker_codes = job ["scheduled_primary_worker_id"] + job ["scheduled_secondary_worker_ids"]
                # self.kandbox_env.jobs_dict[job["job_code"]] .scheduled_start_minutes = job["scheduled_start_minutes"]
                # self.kandbox_env.jobs_dict[job["job_code"]].is_changed = True

                # Duration does not change
                # self.kandbox_env.jobs_dict[job.job_code] .scheduled_start_minutes = job.["scheduled_start_minutes"] \
                #      + 24*60* date_util.days_between_2_day_string(start_day=self.kandbox_env.config["data_start_day"], end_day=job["scheduled_start_day"])

                one_job_action_dict = ActionDict(
                    is_forced_action=False,
                    job_code=job["job_code"],
                    # I assume that only in-planning jobs can appear here...
                    action_type=ActionType.FLOATING,
                    scheduled_worker_codes=[
                        job["scheduled_primary_worker_id"]
                    ],
                    scheduled_start_minutes=job["scheduled_start_minutes"],
                    scheduled_duration_minutes=job[
                        "scheduled_duration_minutes"],
                )
                internal_result_info = self.kandbox_env.mutate_update_job_by_action_dict(
                    a_dict=one_job_action_dict, post_changes_flag=False)
                if internal_result_info.status_code != ActionScoringResultType.OK:
                    print(
                        f"{one_job_action_dict.job_code}: Failed to commit change, error: {str(internal_result_info)} "
                    )

                # job_to_create = copy.deepcopy(self.kandbox_env.kp_data_adapter.jobs_db_dict[job["job_code"]])
                job_to_update = JobPlanningInfoUpdate(
                    code=job["job_code"],
                    planning_status=job["planning_status"],
                    scheduled_start_datetime=self.kandbox_env.
                    env_decode_from_minutes_to_datetime(
                        job["scheduled_start_minutes"]),
                    scheduled_duration_minutes=job[
                        "scheduled_duration_minutes"],
                    scheduled_primary_worker_code=job[
                        "scheduled_primary_worker_id"],
                )
                job_service.update_planning_info(
                    db_session=self.kandbox_env.kp_data_adapter.db_session,
                    job_in=job_to_update)

                # self.kandbox_env.kp_data_adapter.db_session.add(db_job)
                # self.kandbox_env.kp_data_adapter.db_session.commit()
                # job_to_create= copy.deepcopy(job_orig)

                print(
                    f"job({job_to_update.code}) is updated with new planning info"
                )

            # fmt:on

            # self.kandbox_env.commit_changed_jobs()

            current_date = current_date + timedelta(days=1)
    def dispatch_jobs(self, env, rl_agent=None):
        # rl_agent will not be used.
        # , start_date="20191101", end_date="20191230"
        self.kandbox_env = env
        GENERATOR_START_DATE = datetime.strptime(
            self.kandbox_env.config["env_start_day"],
            config.KANDBOX_DATE_FORMAT)
        GENERATOR_END_DATE = GENERATOR_START_DATE + timedelta(
            days=self.kandbox_env.config["nbr_of_days_planning_window"])
        current_date = GENERATOR_START_DATE
        print("current_start_day", current_date, "GENERATOR_END_DATE",
              GENERATOR_END_DATE)

        slot_keys = list(self.kandbox_env.slot_server.time_slot_dict.keys())
        worker_slots = [
            self.kandbox_env.slot_server.time_slot_dict[key]
            for key in slot_keys
        ]
        # TODO, do it per day

        jobs_orig = []
        for j in self.kandbox_env.jobs:
            if j.planning_status in (JobPlanningStatus.UNPLANNED,
                                     JobPlanningStatus.IN_PLANNING):
                jobs_orig.append(j)
        if len(jobs_orig) < 1:
            print("it is empty, nothing to dispatch!")
            return

        current_shifts = jobs_orig

        print({
            "loaded day": GENERATOR_START_DATE,
            "job count": len(current_shifts),
            "worker_slots count": len(worker_slots),
        })

        worker_day = self.dispatch_jobs_to_slots(
            jobs=current_shifts, worker_slots=worker_slots)  # [0:20]  [:70]

        if len(worker_day) < 1:
            print("no data returned from dispataching!")
            return

        # pprint(worker_day)

        job_list = []
        for w_i in range(len(worker_day)):
            # worker_day is the dispatched result, now we save it into DB.
            pre_job_code = "__HOME"
            for task in worker_day[w_i][:-1]:
                task_id = task[0]  # + 1
                worker_code = worker_slots[w_i].worker_id

                # updated_order =  {} # latest_order_dict[id]
                job_list.append({
                    "job_code":
                    current_shifts[task_id].job_code,
                    "job_schedule_type":
                    current_shifts[task_id].job_schedule_type,
                    "planning_status":
                    JobPlanningStatus.IN_PLANNING,
                    "scheduled_primary_worker_id":
                    worker_code,
                    "scheduled_start_minutes":
                    task[1] + worker_slots[w_i].start_minutes,
                    "scheduled_duration_minutes":
                    current_shifts[task_id].requested_duration_minutes,
                    "scheduled_travel_minutes_before":
                    task[2],
                    "scheduled_travel_prev_code":
                    pre_job_code,
                    "geo_longitude":
                    current_shifts[task_id].location[0],
                    "geo_latitude":
                    current_shifts[task_id].location[1],
                    "conflict_level":
                    0,
                    "scheduled_secondary_worker_ids":
                    "[]",
                    "scheduled_share_status":
                    "N",
                    "error_message":
                    "",
                })
                pre_job_code = current_shifts[task_id].job_code
        """
        import pprint

        pprint.pprint(job_list)
        return
        """
        # TODO: No needs of 2 rounds.
        # fmt:off
        self.kandbox_env.kp_data_adapter.reload_data_from_db()
        for job in job_list:
            one_job_action_dict = ActionDict(
                is_forced_action=False,
                job_code=job["job_code"],
                # I assume that only in-planning jobs can appear here...
                action_type=ActionType.FLOATING,
                scheduled_worker_codes=[job["scheduled_primary_worker_id"]],
                scheduled_start_minutes=job["scheduled_start_minutes"],
                scheduled_duration_minutes=job["scheduled_duration_minutes"],
            )
            internal_result_info = self.kandbox_env.mutate_update_job_by_action_dict(
                a_dict=one_job_action_dict, post_changes_flag=False)
            if internal_result_info.status_code != ActionScoringResultType.OK:
                print(
                    f"{one_job_action_dict.job_code}: Failed to commit change, error: {str(internal_result_info)} "
                )

            job_to_update = JobPlanningInfoUpdate(
                code=job["job_code"],
                job_type=JobType.JOB,
                planning_status=job["planning_status"],
                scheduled_start_datetime=self.kandbox_env.
                env_decode_from_minutes_to_datetime(
                    job["scheduled_start_minutes"]),
                scheduled_duration_minutes=job["scheduled_duration_minutes"],
                scheduled_primary_worker_code=job[
                    "scheduled_primary_worker_id"],
            )
            job_service.update_planning_info(
                db_session=self.kandbox_env.kp_data_adapter.db_session,
                job_in=job_to_update)

            print(
                f"job({job_to_update.code}) is updated with new planning info")
    def dispatch_jobs_in_slots(self, working_time_slots: list):
        """Assign jobs to workers."""
        num_slots = len(working_time_slots)

        # All durations are in minutes.
        all_start_minutes = [
            s.start_minutes - s.start_overtime_minutes
            for s in working_time_slots
        ]
        all_end_minutes = [
            s.end_minutes + s.end_overtime_minutes for s in working_time_slots
        ]

        # MAX_MINUTE = 24 * 60  # 24 hours in one day.
        SCHEDULED_WORKER_CODES = [s.worker_id for s in working_time_slots[:-2]]
        CURRENT_JOB_CODE = working_time_slots[-1].assigned_job_codes[0]

        all_job_durations = {
            CURRENT_JOB_CODE:
            self.env.get_encode_shared_duration_by_planning_efficiency_factor(
                self.env.jobs_dict[CURRENT_JOB_CODE].
                requested_duration_minutes, len(SCHEDULED_WORKER_CODES))
        }

        for a_slot in working_time_slots:
            for a_jc in a_slot.assigned_job_codes:
                if a_jc not in all_job_durations.keys():
                    if self.env.jobs_dict[a_jc].planning_status == 'U':
                        log.error(
                            f"dispatch_jobs_in_slots: self.env.jobs_dict[a_jc].planning_status == 'U', job_code = {a_jc}"
                        )
                    all_job_durations[a_jc] = self.env.jobs_dict[
                        a_jc].scheduled_duration_minutes

        MIN_MINUTE = max(all_start_minutes)  # 24 hours in one day.
        MAX_MINUTE = min(all_end_minutes)  # 24 hours in one day.

        MAX_TRAVEL_MINUTES = MAX_MINUTE - MIN_MINUTE

        # TODO: Missing shared ===
        # model.NewIntVar(min_num_workers, min_num_workers * 5, 'num_slots')
        # Computed data.

        # We are going to build a flow from a the start of the day to the end
        # of the day.
        #
        # Along the path, we will accumulate travel time

        model = cp_model.CpModel()

        # Per node info

        #incoming_literals = collections.defaultdict(list)
        #outgoing_literals = collections.defaultdict(list)
        outgoing_other_job_index = []

        workers_assigned2_job_literals = collections.defaultdict(
            list)  # emp_job_literals [job] = [worker_n_lit, ....]
        travel_time_per_emp_sum_dict = {}  # TODO

        # incoming_sink_literals = []
        # outgoing_source_literals = []

        # new_start_time = []
        # Duan
        # Create all the shift variables before iterating on the transitions
        # between these shifts.
        total_travel_until_emp = {}

        shift_start_time_dict = {}
        travel_time_until_job = {}
        source_lit_dict = {}
        sink_lit_dict = {}
        total_travel_until_emp[-1] = model.NewIntVar(
            0, 0, "total_travel until emp {}".format("init"))

        all_worker_codes_for_each_job_dict = {}
        all_jobs_in_slots = []
        for slot_i in range(num_slots):
            all_jobs_in_slots.append([
                self.env.jobs_dict[jc]
                for jc in working_time_slots[slot_i].assigned_job_codes
            ])
            for ajob in all_jobs_in_slots[slot_i]:
                if ajob.job_code not in all_worker_codes_for_each_job_dict.keys(
                ):
                    all_worker_codes_for_each_job_dict[ajob.job_code] = [
                        working_time_slots[slot_i].worker_id
                    ]
                else:
                    all_worker_codes_for_each_job_dict[ajob.job_code].append(
                        working_time_slots[slot_i].worker_id)

        for slot_i in range(num_slots):
            incoming_literals_per_slot = {}
            outgoing_literals_per_slot = {}
            shift_start_time_dict[slot_i] = {}
            source_lit_dict[slot_i] = {}
            sink_lit_dict[slot_i] = {}
            travel_time_per_emp_sum_dict[slot_i] = model.NewIntVar(
                0, MAX_TRAVEL_MINUTES,
                "total_travel time for emp {}".format(slot_i))
            total_travel_until_emp[slot_i] = model.NewIntVar(
                0, MAX_TRAVEL_MINUTES * (slot_i + 1),
                "total_travel for emp {}".format(slot_i))

            # To chain and  accumulate all travel times for each employee
            model.Add(total_travel_until_emp[slot_i] ==
                      total_travel_until_emp[slot_i - 1] +
                      travel_time_per_emp_sum_dict[slot_i])
        # total traval (travel_time_final_total) is the last one
        travel_time_final_total = model.NewIntVar(
            0, num_slots * int(MAX_TRAVEL_MINUTES),
            "total_travel for  - {}".format("all"))
        model.Add(travel_time_final_total == total_travel_until_emp[num_slots -
                                                                    1])

        # Not sure why sum() does not work!!! sum(travel_time_per_emp_sum_dict)
        """
        model.Add(travel_time_final_total == travel_time_per_emp_sum_dict[0] + travel_time_per_emp_sum_dict[1] \
                + travel_time_per_emp_sum_dict[2] + travel_time_per_emp_sum_dict[3]     \
                + travel_time_per_emp_sum_dict[4] + travel_time_per_emp_sum_dict[5]     \
                )
        """
        shared_job_lits = {}

        job_edge_dict = {}  # only for tracking

        # other_start_time_dict = {}
        for slot_i in range(num_slots):

            outgoing_other_job_index.append([])
            num_jobs = len(working_time_slots[slot_i].assigned_job_codes)
            incoming_literals_per_slot[slot_i] = collections.defaultdict(list)
            outgoing_literals_per_slot[slot_i] = collections.defaultdict(list)
            # fmt: off
            for shift in range(num_jobs):
                shift_start_time_dict[slot_i][shift] = model.NewIntVar(
                    MIN_MINUTE, MAX_MINUTE, "start_time_shift_%i" % shift)
                travel_time_until_job[shift] = model.NewIntVar(
                    0, MAX_TRAVEL_MINUTES,
                    "travel_time_until_shift_%i" % shift)

                job_code = working_time_slots[slot_i].assigned_job_codes[shift]
                if job_code in shared_job_lits.keys():
                    for existing_lits in shared_job_lits[job_code]:
                        if existing_lits[0] == slot_i:
                            log.error(
                                f"Duplicated job ({job_code}) code in same slot({working_time_slots[slot_i]})"
                            )

                            res_dict = {
                                "status": OptimizerSolutionStatus.INFEASIBLE
                            }
                            return res_dict
                            # raise ValueError(f"Error, duplicated job ({job_code}) code in same slot")
                        # Not necessary for M*N
                        # model.Add( existing_lits[1] == shift_start_time_dict[slot_i][shift] )

                    # Link only the last slot_i literal to this new literal in new slot, for same job
                    last_lit = shared_job_lits[job_code][-1][1]
                    model.Add(last_lit == shift_start_time_dict[slot_i][shift])

                    shared_job_lits[job_code].append(
                        (slot_i, shift_start_time_dict[slot_i][shift]))
                else:
                    shared_job_lits[job_code] = [
                        (slot_i, shift_start_time_dict[slot_i][shift])
                    ]

            for shift in range(num_jobs):
                #
                job_code = working_time_slots[slot_i].assigned_job_codes[shift]
                #
                # job_duration =  self.env. get_encode_shared_duration_by_planning_efficiency_factor (
                #     self.env.jobs_dict[job_code].requested_duration_minutes,
                #     NBR_OF_WORKERS)
                """
                if working_time_slots[slot_i].assigned_job_codes[shift]["mandatory_minutes_minmax_flag"] == 1:
                    shift_start_time_dict[slot_i][shift] = model.NewIntVar(
                        working_time_slots[slot_i].assigned_job_codes[shift]["requested_start_min_minutes"],
                        working_time_slots[slot_i].assigned_job_codes[shift]["requested_start_max_minutes"],
                        "start_time_shift_%i" % shift,
                    )
                else:
                """
                source_lit_dict[slot_i][shift] = model.NewBoolVar(
                    "Source Emp {} to job {}".format(slot_i, shift))
                # Arc from source worker to this job
                incoming_literals_per_slot[slot_i][shift].append(
                    source_lit_dict[slot_i][shift])

                # If this job[shift] is first job for worker[slot_i], travel time on this job is from home to job.
                model.Add(travel_time_until_job[shift] ==
                          self._get_travel_time_from_location_to_job(
                              working_time_slots[slot_i].start_location,
                              working_time_slots[slot_i].
                              assigned_job_codes[shift])).OnlyEnforceIf(
                                  source_lit_dict[slot_i][shift])

                sink_lit_dict[slot_i][shift] = model.NewBoolVar(
                    "Sink job {} to emp {} ".format(shift, slot_i))
                # Arc from job to sinking_worker.
                outgoing_literals_per_slot[slot_i][shift].append(
                    sink_lit_dict[slot_i][shift])

                this_job = self.env.jobs_dict[job_code]
                # If this job[shift] is the last job for worker[slot_i], travel_time_per_emp_sum_dict (total) is the travel time on this job is from job to home.
                model.Add(
                    travel_time_per_emp_sum_dict[slot_i] ==
                    travel_time_until_job[shift] +
                    self._get_travel_time_from_location_to_job(
                        working_time_slots[slot_i].end_location,
                        working_time_slots[slot_i].assigned_job_codes[shift])
                ).OnlyEnforceIf(sink_lit_dict[slot_i][shift])  # from sink_

                # job must obey Start time for worker, if assigned to this worker
                # TODO, only 1 day for now, [0]
                try:
                    model.Add(shift_start_time_dict[slot_i][shift] >= int(
                        working_time_slots[slot_i].start_minutes -
                        working_time_slots[slot_i].start_overtime_minutes +
                        self._get_travel_time_from_location_to_job(
                            working_time_slots[slot_i].start_location,
                            working_time_slots[slot_i].
                            assigned_job_codes[shift]))).OnlyEnforceIf(
                                source_lit_dict[slot_i][shift])
                except TypeError:
                    log.error(str('internal - int vs float?'))

                #
                # job must obey end time for worker, if assigned to this worker
                model.Add(shift_start_time_dict[slot_i][shift] <= int(
                    working_time_slots[slot_i].end_minutes +
                    working_time_slots[slot_i].end_overtime_minutes -
                    all_job_durations[job_code] -
                    self._get_travel_time_from_location_to_job(
                        working_time_slots[slot_i].end_location,
                        working_time_slots[slot_i].assigned_job_codes[shift]))
                          ).OnlyEnforceIf(sink_lit_dict[slot_i][shift])

                for other in range(num_jobs):
                    if shift == other:
                        continue
                    other_job_code = working_time_slots[
                        slot_i].assigned_job_codes[other]
                    other_duration = self.env.jobs_dict[
                        other_job_code].requested_duration_minutes

                    lit = model.NewBoolVar("job path from %i to %i" %
                                           (shift, other))
                    job_edge_dict[(slot_i, shift, other)] = lit

                    # constraint for start time by duan 2019-10-09 16:58:42  #### + working_time_slots[slot_i].assigned_job_codes[shift]['requested_duration_minutes'] + min_delay_between_shifts

                    model.Add(shift_start_time_dict[slot_i][shift] + int(
                        all_job_durations[job_code]
                    ) + self._get_travel_time_2_sites(
                        working_time_slots[slot_i].assigned_job_codes[shift],
                        working_time_slots[slot_i].assigned_job_codes[other]) <
                              shift_start_time_dict[slot_i][other]
                              ).OnlyEnforceIf(lit)

                    # Increase travel time
                    model.Add(
                        travel_time_until_job[other] ==
                        travel_time_until_job[shift] +
                        self._get_travel_time_2_sites(
                            working_time_slots[slot_i].
                            assigned_job_codes[shift],
                            working_time_slots[slot_i].
                            assigned_job_codes[other])).OnlyEnforceIf(lit)

                    # Add arc
                    outgoing_literals_per_slot[slot_i][shift].append(lit)
                    incoming_literals_per_slot[slot_i][other].append(lit)
            """
            model.Add(sum(  (  outgoing_literals_per_slot[slot_i][s_i] for s_i in range(num_jobs) )
                ) == 1)
            model.Add(sum( [incoming_literals_per_slot[slot_i][s_i] for s_i in range(num_jobs) ]
            ) == 1)
            """

        # fmt: on
        # Create dag constraint.
        for slot_i in range(num_slots):
            num_jobs = len(working_time_slots[slot_i].assigned_job_codes)
            model.Add(
                sum((sink_lit_dict[slot_i][s_i]
                     for s_i in range(num_jobs))) == 1)
            model.Add(
                sum((source_lit_dict[slot_i][s_i]
                     for s_i in range(num_jobs))) == 1)

            for shift in range(num_jobs):
                model.Add(
                    sum((outgoing_literals_per_slot[slot_i][shift][s_i]
                         for s_i in range(num_jobs))) == 1)
                model.Add(
                    sum((incoming_literals_per_slot[slot_i][shift][s_i]
                         for s_i in range(num_jobs))) == 1)
        """
        """
        # model.Add(sum(incoming_sink_literals) == num_slots)
        # model.Add(sum(outgoing_source_literals) == num_slots)

        model.Minimize(travel_time_final_total)

        # Solve model.
        solver = cp_model.CpSolver()
        solver.parameters.log_search_progress = self.config[
            "log_search_progress"]
        # solver.parameters.num_search_workers = 4
        # https://developers.google.com/optimization/cp/cp_tasks
        solver.parameters.max_time_in_seconds = self.config[
            "max_exec_time"]  # two minutes
        status = solver.Solve(model)

        if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE:  #
            log.debug(
                f"Solver Status = {solver.StatusName(status)}, Not FEASIBLE, failed!"
            )
            res_dict = {"status": OptimizerSolutionStatus.INFEASIBLE}
            return res_dict

        optimal_travel_time = int(solver.ObjectiveValue())
        log.debug(
            f"Solver Status = {solver.StatusName(status)}, optimal_travel_time = {optimal_travel_time} minutes"
        )
        # s_printer = OneSolutionPrinter(__variables,solver)
        # return s_printer.print_solution()
        # return optimal_num_workers
        all_following_tasks = {}
        emp_following_tasks = {}
        """
        for slot_i in range(num_slots):
            print("w_{}_{}: {}, hasta {}".format(
                slot_i,
                workers[slot_i]['worker_code'],
                solver.Value( travel_time_per_emp_sum_dict[slot_i]),
                solver.Value( total_travel_until_emp[slot_i])
                )
            )
        for slot_i in range(num_jobs):
            print("j_{} : {},  travel {}, start: {}".format(
                slot_i,
                working_time_slots[slot_i].assigned_job_codes[slot_i]['requested_duration_minutes'],
                solver.Value(travel_time_until_job[slot_i] ) ,
                solver.Value( shift_start_time_dict[slot_i] [slot_i])
                )
            )
        """

        # j_file.write('')
        to_print_json_list = []
        final_result = {
            "status": OptimizerSolutionStatus.SUCCESS,
            "changed_action_dict_by_job_code": {},
            "not_changed_job_codes": []
        }
        for slot_i in range(num_slots):
            to_print_json_list.append([])
            num_jobs = len(working_time_slots[slot_i].assigned_job_codes)

            # for LI in range(num_jobs):
            #     for LJ in range(num_jobs):
            #         if LI != LJ:
            #             print(f"slot-i = {slot_i}, edge: {LI}-{LJ} == {solver.BooleanValue(job_edge_dict[(slot_i,LI,LJ)])}")
            # TODO , to align jobs to job_side, or beginning
            # TODO , add lunch break as objective.
            for shift in range(num_jobs):
                if all_jobs_in_slots[slot_i][
                        shift].scheduled_start_minutes == solver.Value(
                            shift_start_time_dict[slot_i][shift]):
                    changed_flag = False
                else:
                    changed_flag = True
                if changed_flag:
                    this_job_code = all_jobs_in_slots[slot_i][shift].job_code

                    one_job_action_dict = ActionDict(
                        is_forced_action=False,
                        job_code=this_job_code,
                        action_type=ActionType.JOB_FIXED,
                        # self._tmp_get_worker_code_list_by_id_n_ids(primary_id = )
                        scheduled_worker_codes=
                        all_worker_codes_for_each_job_dict[this_job_code],
                        scheduled_start_minutes=solver.Value(
                            shift_start_time_dict[slot_i][shift]),
                        scheduled_duration_minutes=all_jobs_in_slots[slot_i]
                        [shift].scheduled_duration_minutes,
                        # slot_code_list =
                    )
                    final_result["changed_action_dict_by_job_code"][
                        all_jobs_in_slots[slot_i]
                        [shift].job_code] = one_job_action_dict
                else:
                    final_result["not_changed_job_codes"].append(
                        all_jobs_in_slots[slot_i][shift].job_code)

                one_job_result = [
                    solver.Value(shift_start_time_dict[slot_i][shift]),
                    all_jobs_in_slots[slot_i][shift].job_code, shift,
                    changed_flag
                ]
                to_print_json_list[slot_i].append(one_job_result)
                # final_result["changed_action_dict_by_job_code"][all_jobs_in_slots [slot_i][shift].job_code] = one_job_result
        res_slots = []
        for job_list in to_print_json_list:
            res_slots.append(
                sorted(
                    job_list,
                    key=lambda item: item[0],
                    reverse=False,
                ))
        final_result["slots"] = res_slots
        log.debug(final_result)

        return final_result
Пример #7
0
    def dispatch_jobs_in_slots(self, working_time_slots: list=[], last_job_count=1):
        """Assign jobs to workers."""
        num_slots = len(working_time_slots)  # - 2
        num_workers = len(working_time_slots) - 2
        if num_workers < 1:
            num_workers = 1

        if num_slots < 1:
            return JobsInSlotsDispatchResult(
                status = OptimizerSolutionStatus.INFEASIBLE,
                changed_action_dict_by_job_code= {},
                all_assigned_job_codes=[],
                planned_job_sequence=[]
            )

        is_ok_start = True
        is_ok_end = True
        job_code = working_time_slots[0].assigned_job_codes[-1]
        job_duration_minutes = self.env.get_encode_shared_duration_by_planning_efficiency_factor(
            requested_duration_minutes=self.env.jobs_dict[job_code].requested_duration_minutes,
            nbr_workers=num_workers,
        )

        # Common earlist start of all slots
        current_min_start = max([s.start_minutes for s in working_time_slots])
        # Common latest start of all slots
        current_max_last_start = min(
            [s.end_minutes - job_duration_minutes for s in working_time_slots]
        )

        # The latest start if start from beginning. Try to align to first job rather than morning start.
        max_start_from_begin = current_max_last_start

        # NO need to track last start.
        # min_start_from_end = current_min_start

        for slot_i in range(num_slots):
            slot = working_time_slots[slot_i]

            sorted_slot_assigned_job_codes = sorted(
                slot.assigned_job_codes[:-1], key=lambda x: self.env.jobs_dict[x].scheduled_start_minutes)

            all_jobs_to_begin = [slot.assigned_job_codes[-1]] + sorted_slot_assigned_job_codes
            (
                prev_travel,
                next_travel,
                inside_travel,
            ) = self.env.get_travel_time_jobs_in_slot(slot, all_jobs_to_begin)
            
            if len(slot.assigned_job_codes) <= 1:
                # if The job itself is the only one in considerattion.
                if (
                    current_min_start + prev_travel + job_duration_minutes + next_travel >
                    slot.end_minutes
                ):
                    is_ok_start = False
                    is_ok_end = False
                    break
                else:
                    if current_min_start < slot.start_minutes:
                        current_min_start = slot.start_minutes

                    if current_max_last_start > (
                        slot.end_minutes - next_travel - job_duration_minutes
                    ):
                        current_max_last_start = (
                            slot.end_minutes - next_travel - job_duration_minutes
                        )
                    continue

            if current_min_start < slot.start_minutes:
                log.error("Should not happend: current_min_start < slot.start_minutes")
                current_min_start = slot.start_minutes

            first_job = self.env.jobs_dict[all_jobs_to_begin[1]]
            if (
                current_min_start + prev_travel + job_duration_minutes + inside_travel[0] >
                first_job.scheduled_start_minutes
            ):
                is_ok_start = False
            else:
                _new_start = (
                    first_job.scheduled_start_minutes - inside_travel[0] - job_duration_minutes
                )
                if max_start_from_begin > _new_start:
                    max_start_from_begin = _new_start
            # slot.assigned_job_codes
            all_jobs_to_end = sorted_slot_assigned_job_codes + \
                [slot.assigned_job_codes[-1]]
            (
                prev_travel,
                next_travel,
                inside_travel,
            ) = self.env.get_travel_time_jobs_in_slot(slot, all_jobs_to_end)
            last_job = self.env.jobs_dict[all_jobs_to_end[-2]]

            if (
                last_job.scheduled_start_minutes +
                last_job.scheduled_duration_minutes +
                inside_travel[-1] +
                job_duration_minutes +
                next_travel
            ) > slot.end_minutes:
                is_ok_end = False

            if (
                last_job.scheduled_start_minutes +
                last_job.scheduled_duration_minutes +
                inside_travel[-1]
            ) < current_max_last_start:
                current_max_last_start = (
                    last_job.scheduled_start_minutes +
                    last_job.scheduled_duration_minutes +
                    inside_travel[-1]
                )

        final_result = JobsInSlotsDispatchResult(
                status = OptimizerSolutionStatus.INFEASIBLE,
                changed_action_dict_by_job_code= {},
                all_assigned_job_codes=[],
                planned_job_sequence=[]
            ) 
        if is_ok_start:
            # job_start_minutes = max_start_from_begin
            job_start_minutes = current_min_start
        elif is_ok_end:
            job_start_minutes = current_max_last_start
        else:
            return final_result
        final_result.status = OptimizerSolutionStatus.SUCCESS

        all_worker_codes = [s.worker_id for s in working_time_slots]
        one_job_action_dict = ActionDict(
            is_forced_action=False,
            job_code=job_code,
            action_type=ActionType.FLOATING,
            scheduled_worker_codes=all_worker_codes,
            scheduled_start_minutes=job_start_minutes,
            scheduled_duration_minutes=job_duration_minutes,
            # slot_code_list =
        )
        final_result.changed_action_dict_by_job_code[job_code] = one_job_action_dict

        planned_job_sequence = []
        seq = 0
        for s in working_time_slots:
            planned_job_sequence.append([])
            if is_ok_start:
                planned_job_sequence[-1].append([job_start_minutes, job_code, seq, True])
                seq = len(planned_job_sequence[-1])
            for jcode in s.assigned_job_codes[:-1]:
                planned_job_sequence[-1].append([
                    self.env.jobs_dict[jcode].scheduled_start_minutes, 
                    jcode, seq, False])
                seq = len(planned_job_sequence[-1])
            if not is_ok_start:
                planned_job_sequence[-1].append([job_start_minutes, job_code, seq, True])
                seq = len(planned_job_sequence[-1])

        final_result.planned_job_sequence = planned_job_sequence
        return final_result
Пример #8
0
    def save_solution(self, manager, routing, solution):
        """Prints solution on console."""
        max_route_distance = 0
        for vehicle_id in range(len(self.worker_slots)):
            index = routing.Start(vehicle_id)
            # First one should be depot
            previous_index = index
            index = solution.Value(routing.NextVar(index))
            plan_output = "Route for vehicle {}:\n".format(vehicle_id)
            route_distance = 0
            # worker/vehicle starts at 0
            scheduled_worker_codes = [self.worker_slots[vehicle_id].worker_id]

            next_start_minutes = self.worker_slots[vehicle_id].start_minutes
            prev_location = self.worker_slots[vehicle_id].end_location

            while not routing.IsEnd(index):
                plan_output += " {} -> ".format(manager.IndexToNode(index))
                # job starts at 0
                job_code = self.cluster_list[index][0]  # - 1
                job = self.env.jobs_dict[job_code]
                one_job_action_dict = ActionDict(
                    job_code=job.job_code,
                    scheduled_worker_codes=scheduled_worker_codes,
                    scheduled_start_minutes=next_start_minutes,
                    scheduled_duration_minutes=job.requested_duration_minutes,
                    action_type=ActionType.FLOATING,
                    is_forced_action=False,
                )
                internal_result_info = self.env.mutate_update_job_by_action_dict(
                    a_dict=one_job_action_dict, post_changes_flag=True)
                if internal_result_info.status_code != ActionScoringResultType.OK:
                    print(
                        f"{one_job_action_dict.job_code}: Failed to commit change, error: {str(internal_result_info)} "
                    )
                else:
                    print(
                        f"job({one_job_action_dict.job_code}) is planned successfully ..."
                    )

                db_job = job_service.get_by_code(
                    db_session=self.env.kp_data_adapter.db_session,
                    code=job.job_code)
                db_worker = worker_service.get_by_code(
                    db_session=self.env.kp_data_adapter.db_session,
                    code=scheduled_worker_codes[0])

                db_job.requested_primary_worker = db_worker

                self.env.kp_data_adapter.db_session.add(db_job)
                self.env.kp_data_adapter.db_session.commit()

                print(
                    f"job({job.job_code}) is updated to new requested_primary_worker = {scheduled_worker_codes[0]} "
                )

                prev_location = job.location
                previous_index = index
                index = solution.Value(routing.NextVar(index))

                # not used.
                travel_time = self._get_travel_time_2locations(
                    prev_location, job.location)

                route_distance = routing.GetArcCostForVehicle(
                    previous_index, index, vehicle_id)
                next_start_minutes += job.requested_duration_minutes + route_distance
Пример #9
0
def single_job_drop_check(
    request_in: SingleJobDropCheckInput,  #
    current_user: DispatchUser = Depends(get_current_user),
):
    # team_code = current_user.team_id
    # There is no team_id in user

    # if not request_in.start_day:
    #     if request_in.end_day:
    #         raise HTTPException(
    #             status_code=400, detail=f"The start_day and end_day must pair.",
    #         )
    #     request_in.start_day = DATA_START_DAY
    #     request_in.end_day = "20201024"
    # else:
    #     if not request_in.end_day:
    #         raise HTTPException(
    #             status_code=400, detail=f"The start_day and end_day must pair.",
    #         )

    org_code = current_user.org_code

    # if len(request_in.scheduled_primary_worker_id) < 1:
    #     raise HTTPException(
    #         status_code=400, detail=f"Empty request_in.scheduled_primary_worker_id is not allowed.",
    #     )

    # TODO verify team exist in org
    planner = get_default_active_planner(org_code=org_code, team_id=request_in.team_id)
    rl_env = planner["planner_env"]

    if request_in.job_code not in rl_env.jobs_dict.keys():
        raise HTTPException(
            status_code=400,
            detail=f"The job (with job_code ={request_in.job_code}) does not exists in env.",
        )

    scheduled_worker_codes = [request_in.scheduled_primary_worker_id] + \
        request_in.scheduled_secondary_worker_ids
    for worker_code in scheduled_worker_codes:
        if worker_code not in rl_env.workers_dict.keys():
            raise HTTPException(
                status_code=400, detail=f"The worker_code ({worker_code}) does not exists.",
            )

    scheduled_start_minutes = rl_env.env_encode_from_datetime_to_minutes(
        request_in.scheduled_start_datetime.replace(tzinfo=None)
    )

    one_job_action_dict = ActionDict(
        is_forced_action=False,
        job_code=request_in.job_code,
        action_type=ActionType.JOB_FIXED,
        scheduled_worker_codes=scheduled_worker_codes,
        scheduled_start_minutes=scheduled_start_minutes,
        scheduled_duration_minutes=request_in.scheduled_duration_minutes,
        # slot_code_list =
    )

    result_info = SingleJobDropCheckOutput(
        status_code=ActionScoringResultType.OK, score=0, travel_time=15, messages=[],
    )

    for rule in rl_env.rule_set:
        rule_checked = rule.evalute_action_normal(env=rl_env, action_dict=one_job_action_dict)

        rule_checked.score_type = rule.title

        # rule_checked_dict = dataclasses.asdict(rule_checked)
        result_info.messages.append(rule_checked)  # rule_checked_dict

        if (rule_checked.score < 1) & (result_info.status_code == ActionScoringResultType.OK):
            result_info.status_code = ActionScoringResultType.WARNING
        if rule_checked.score == -1:
            result_info.status_code = ActionScoringResultType.ERROR
    return result_info
Пример #10
0
    def dispatch_jobs(self, env, rl_agent=None):
        if rl_agent is None:
            raise ValueError("rl agent can not be none!")
        self.kandbox_env = env
        rl_agent.config["nbr_of_actions"] = 2
        pprint(env.get_planner_score_stats())

        log.info(
            f"Starting Batch planning for env={self.kandbox_env.env_inst_code}, ..."
        )

        for job_code in env.jobs_dict.keys():
            if (env.jobs_dict[job_code].job_type
                    == JobType.JOB) & (env.jobs_dict[job_code].planning_status
                                       == JobPlanningStatus.UNPLANNED):
                res = rl_agent.predict_action_dict_list(job_code=job_code)
                if len(res) < 1:
                    log.warn(f"Failed to predict for job_code = {job_code}")
                    continue
                one_job_action_dict = ActionDict(
                    is_forced_action=True,
                    job_code=job_code,
                    # I assume that only in-planning jobs can appear here...
                    action_type=ActionType.FLOATING,
                    scheduled_worker_codes=res[0].scheduled_worker_codes,
                    scheduled_start_minutes=res[0].scheduled_start_minutes,
                    scheduled_duration_minutes=res[0].
                    scheduled_duration_minutes,
                )
                internal_result_info = env.mutate_update_job_by_action_dict(
                    a_dict=one_job_action_dict, post_changes_flag=True)

                if internal_result_info.status_code != ActionScoringResultType.OK:
                    log.warn(
                        # {internal_result_info}
                        f"JOB:{ job_code}: Failed to act on job={job_code}. ")
                else:
                    log.info(
                        f"JOB:{job_code}: Successfully Planned job, action={res[0]}. "
                    )

        log.info("Batch planning Done, printing new scores...")
        pprint(env.get_planner_score_stats())

        return

        for job in job_list:
            one_job_action_dict = ActionDict(
                is_forced_action=False,
                job_code=job["job_code"],
                # I assume that only in-planning jobs can appear here...
                action_type=ActionType.FLOATING,
                scheduled_worker_codes=[job["scheduled_primary_worker_id"]],
                scheduled_start_minutes=job["scheduled_start_minutes"],
                scheduled_duration_minutes=job["scheduled_duration_minutes"],
            )
            internal_result_info = self.kandbox_env.mutate_update_job_by_action_dict(
                a_dict=one_job_action_dict, post_changes_flag=False)
            if internal_result_info.status_code != ActionScoringResultType.OK:
                print(
                    f"{one_job_action_dict.job_code}: Failed to commit change, error: {str(internal_result_info)} "
                )

            job_to_update = JobPlanningInfoUpdate(
                code=job["job_code"],
                planning_status=job["planning_status"],
                scheduled_start_datetime=self.kandbox_env.
                env_decode_from_minutes_to_datetime(
                    job["scheduled_start_minutes"]),
                scheduled_duration_minutes=job["scheduled_duration_minutes"],
                scheduled_primary_worker_code=job[
                    "scheduled_primary_worker_id"],
            )
            job_service.update_planning_info(
                db_session=self.kandbox_env.kp_data_adapter.db_session,
                job_in=job_to_update)

            print(
                f"job({job_to_update.code}) is updated with new planning info")
Пример #11
0
    def search_action_dict_on_workers(
        self,
        a_worker_code_list: List[str],
        curr_job: BaseJob,
        max_number_of_matching: int = 9,
        attempt_unplan_jobs=False,
        allow_overtime=False,
    ) -> List[RecommendedAction]:
        # if "job_gps" not in curr_job.keys():
        #    curr_job["job_gps"] = ([curr_job.location[0], curr_job.location[1]],)
        if curr_job.job_code in APPOINTMENT_DEBUG_LIST:
            log.info(
                f"appt={curr_job.job_code}, worker_codes:{a_worker_code_list} started searching...")
        if (curr_job.requested_duration_minutes is None) or (curr_job.requested_duration_minutes < 1):
            log.error(
                f"appt={curr_job.job_code}, requested_duration_minutes = {curr_job.requested_duration_minutes}, no recommendation is possible, quitting...")
            return []
        scheduled_duration_minutes = (self.env.get_encode_shared_duration_by_planning_efficiency_factor(
            requested_duration_minutes=curr_job.requested_duration_minutes,
            nbr_workers=len(a_worker_code_list),
        ))
        # if len(a_worker_code_list) > 1:
        # else:
        #     scheduled_duration_minutes = curr_job.requested_duration_minutes

        curr_job.scheduled_duration_minutes = scheduled_duration_minutes
        # self.env.jobs_dict[
        #     curr_job.job_code
        # ].scheduled_duration_minutes = scheduled_duration_minutes

        result_slot = []
        time_slot_list = []

        original_travel_minutes_difference = 0
        if curr_job.planning_status != JobPlanningStatus.UNPLANNED:
            for worker_code in curr_job.scheduled_worker_codes:
                a_slot_group = self.env.slot_server.get_overlapped_slots(
                    worker_id=worker_code,
                    start_minutes=curr_job.scheduled_start_minutes,
                    end_minutes=curr_job.scheduled_start_minutes + curr_job.scheduled_duration_minutes,
                )
                for a_slot in a_slot_group:
                    try:
                        original_travel_minutes_difference += (self.calc_travel_minutes_difference_for_1_job(
                            env=self.env, a_slot=a_slot, curr_job=curr_job))
                    except ValueError as err:
                        log.debug(
                            f"JOB:{curr_job.job_code}:ERROR:{err}, This job has lost the worker time slot, For now, no more recomemndations. and then I set original_travel_minutes_difference = 0")
                        original_travel_minutes_difference = 0
                        # For now, no more recomemndations , TODO @duan
                        # return []

        for curr_worker_code in a_worker_code_list:
            if curr_worker_code not in self.env.workers_dict.keys():
                log.error(
                    f"WORKER:{curr_worker_code}: Worker is requested to be searched but not found in env.")
                return []
            log.debug(
                f"Querying slots, worker_id={curr_worker_code}, start_minutes={curr_job.requested_start_min_minutes}, end_minutes={curr_job.requested_start_max_minutes}")
            curr_overlapped_slot_list = self.env.slot_server.get_overlapped_slots(
                worker_id=curr_worker_code,
                start_minutes=curr_job.requested_start_min_minutes,
                end_minutes=curr_job.requested_start_max_minutes,
            )

            curr_free_slot_list = []
            for slot in curr_overlapped_slot_list:

                log.debug(f"Checking slot: {self.env.slot_server.get_time_slot_key(slot)}")
                if slot.slot_type == TimeSlotType.FLOATING:
                    if allow_overtime:
                        # If attempt_unplan_jobs is set true, this is already secondary round.
                        # I will also consider the overtime minutes for each worker
                        #
                        new_overtime_minutes = self.env.get_worker_available_overtime_minutes(
                            slot.worker_id, day_seq=int(slot.start_minutes / 1440))
                        if slot.prev_slot_code is None:
                            slot.start_overtime_minutes += new_overtime_minutes
                        if slot.next_slot_code is None:
                            slot.end_overtime_minutes += new_overtime_minutes

                    if (slot.end_minutes - slot.start_minutes + slot.start_overtime_minutes + slot.end_overtime_minutes) > scheduled_duration_minutes:
                        # This may screen out some apparently no fitting slots, without considering travel time.

                        curr_free_slot_list.append(slot)
                        log.debug(
                            f"worker_codes:{a_worker_code_list}:worker:{curr_worker_code}: identified one free slot ({slot.start_minutes}->{slot.end_minutes}).")

            if len(curr_free_slot_list) < 1:
                log.debug(
                    f"appt={curr_job.job_code}, worker_codes:{a_worker_code_list}:worker:{curr_worker_code}: Worker has no free slots left by tolerance ({curr_job.requested_start_min_minutes}->{curr_job.requested_start_max_minutes}) ({self.env.env_decode_from_minutes_to_datetime(curr_job.requested_start_min_minutes)}->{self.env.env_decode_from_minutes_to_datetime(curr_job.requested_start_max_minutes)})."
                )
                return []
            # if curr_job.job_code in APPOINTMENT_DEBUG_LIST:
            # log.debug(
            #     "All free slots identified: "
            #     + str(
            #         [
            #             f"worker_id = {w.worker_id}, start_minutes= {w.start_minutes}, end_minutes= {w.end_minutes}, assigned_job_codes= {w.assigned_job_codes}"
            #             for w in curr_free_slot_list
            #         ]
            #     )
            # )
            time_slot_list.append(sorted(
                curr_free_slot_list,
                key=lambda item: item.start_minutes,
            ))

        if len(time_slot_list) < 1:
            log.warn("should not happend: len(time_slot_list) < 1")
            return []

        # This appends the customer avavailable slots to be the last timeslot list to screen against all work's timeslots.
        # I assume that customer does not like current time slot and remove it from available slot,
        # TODO, @duan, 2020-12-17 19:23:53 Why available_slots is on locaiton? different jobs may differ.

        orig_avail = copy.deepcopy(curr_job.available_slots)
        if len(orig_avail) < 1:
            log.info(
                f"appt={curr_job.job_code}, job_available_slots:{orig_avail}: No available slots per customer availability.")
            return []
        else:
            # if curr_job.job_code in APPOINTMENT_DEBUG_LIST:
            log.debug(
                f"appt={curr_job.job_code}, len(orig_avail) ={len(orig_avail)}: Found available slots per customer availability.")

        # This fake_appt_time_slot will used to host the slot intersection result, especially after customer availability
        fake_appt_time_slot = copy.deepcopy(orig_avail[0])
        fake_appt_time_slot.assigned_job_codes = [curr_job.job_code]

        # This is to exclude those available slots that overlap with current scheduling time.
        # Make sure that customer do not get same time slots (meaningless, from other technicians) as current one.
        net_cust_avail_slots = []
        slot_to_exclude = (
            curr_job.scheduled_start_minutes - 60,
            curr_job.scheduled_start_minutes + 90,
        )
        for ti in range(len(orig_avail)):
            slot_ti = (orig_avail[ti].start_minutes, orig_avail[ti].end_minutes)
            clip = date_util.clip_time_period(slot_ti, slot_to_exclude)
            if len(clip) < 1:
                net_cust_avail_slots.append(orig_avail[ti])
            else:
                if clip[0] > orig_avail[ti].start_minutes:
                    a_slot = copy.copy(orig_avail[ti])
                    a_slot.end_minutes = clip[0]
                    net_cust_avail_slots.append(a_slot)
                if clip[1] < orig_avail[ti].end_minutes:
                    a_slot = copy.copy(orig_avail[ti])
                    a_slot.start_minutes = clip[1]
                    net_cust_avail_slots.append(a_slot)

        # To include the customer available time slots in the search
        time_slot_list.append(net_cust_avail_slots)
        # = {net_cust_avail_slots}
        log.debug(f"Final net_cust_avail_slots len={len(net_cust_avail_slots)}")

        available_slot_groups = self.env.intersect_geo_time_slots(
            time_slot_list,
            curr_job,
            duration_minutes=scheduled_duration_minutes,
            max_number_of_matching=max_number_of_matching,
        )

        # if ( free_slot.start_minutes <= start_minutes - travel_minutes ) &  ( end_minutes <= free_slot[1]):
        # if len(available_slot_groups) < 1:
        # no hit in this day_i
        #    continue
        log.debug(
            f"after env.intersect_geo_time_slots, available_slot_groups len={len(available_slot_groups)}.")
        for avail_slot_group in available_slot_groups:
            scoped_slot_list = avail_slot_group[2]
            shared_time_slots_temp = []
            for sc_slot in scoped_slot_list[:-1]:
                try:
                    # TODO, merge this with first filtering. with overtime minutes processing 2021-01-01 10:18:06
                    sc = self.env.slot_server.get_time_slot_key(sc_slot)
                    temp_slot_ = self.env.slot_server.get_slot(
                        redis_handler=self.env.slot_server.r, slot_code=sc)
                    temp_slot_.start_overtime_minutes = sc_slot.start_overtime_minutes
                    temp_slot_.end_overtime_minutes = sc_slot.end_overtime_minutes
                    shared_time_slots_temp.append(temp_slot_)
                except Exception as mse:
                    log.error(f"failed to read slot {str(sc)}, error {str(mse)}")
            if len(shared_time_slots_temp) < 1:
                # After checking redis, there is no valid slot in the list
                log.info(
                    f"appt={curr_job.job_code}, After checking redis, there is no valid slot in the list, No available slots.")
                continue

            shared_time_slots_temp.append(scoped_slot_list[-1])

            shared_time_slots = shared_time_slots_temp.copy()
            # shared_time_slots_optimized may be changed/mutated during optimization process. Deep copy it.
            shared_time_slots_optimized = copy.deepcopy(shared_time_slots)

            for one_working_slot in shared_time_slots_optimized:
                one_working_slot.assigned_job_codes = sorted(
                    one_working_slot.assigned_job_codes,
                    key=lambda x: self.env.jobs_dict[x].scheduled_start_minutes
                )

                one_working_slot.assigned_job_codes.append(curr_job.job_code)

            # TODO, then append other related shared code slots... Right now only this one.

            # This is the last attached fake time slot according to availability search
            # TODO: WHy two availability?
            fake_appt_time_slot.start_minutes = avail_slot_group[0]
            fake_appt_time_slot.end_minutes = avail_slot_group[1]
            shared_time_slots_optimized.append(fake_appt_time_slot)

            if self.use_naive_search_for_speed:
                dispatch_jobs_in_slots_func = self.env.inner_slot_heur.dispatch_jobs_in_slots
            else:
                dispatch_jobs_in_slots_func = self.env.inner_slot_opti.dispatch_jobs_in_slots

            unplanned_job_codes = []
            # res = {"status":OptimizerSolutionStatus.INFEASIBLE}
            res = dispatch_jobs_in_slots_func(shared_time_slots_optimized)
            while attempt_unplan_jobs & (res.status != OptimizerSolutionStatus.SUCCESS):
                is_shrinked = False
                for ts in shared_time_slots_optimized:
                    if len(ts.assigned_job_codes) > 1:
                        j_i = len(ts.assigned_job_codes) - 2
                        while j_i >= 0:
                            job_code_to_unplan = ts.assigned_job_codes[j_i]
                            if self.env.jobs_dict[job_code_to_unplan].priority < curr_job.priority:
                                unplanned_job_codes.append(job_code_to_unplan)
                                ts.assigned_job_codes = ts.assigned_job_codes[0:j_i] + \
                                    ts.assigned_job_codes[j_i + 1:]
                                is_shrinked = True

                                log.info(
                                    f"job={curr_job.job_code}, attempting search after unplanning job {job_code_to_unplan}, to {ts.assigned_job_codes}, priority { self.env.jobs_dict[job_code_to_unplan].priority , curr_job.priority} ...")
                                break
                            j_i -= 1
                    if is_shrinked:
                        # only unplan one job in each attempt
                        break
                if not is_shrinked:
                    # No job can be unplanned.
                    break
                res = dispatch_jobs_in_slots_func(shared_time_slots_optimized)
                if res.status == OptimizerSolutionStatus.SUCCESS:
                    break

            if res.status != OptimizerSolutionStatus.SUCCESS:
                log.info(f"appt={curr_job.job_code}, failed to find solution on slots: " + str(
                    [f"(worker_id = {w.worker_id}, start_datetime= {self.env.env_decode_from_minutes_to_datetime(w.start_minutes)}, end_datetime= {self.env.env_decode_from_minutes_to_datetime(w.end_minutes)}, assigned_job_codes= {w.assigned_job_codes}) " for w in shared_time_slots_optimized]))
                continue
            try:

                new_start_mintues = res.changed_action_dict_by_job_code[curr_job.job_code].scheduled_start_minutes
            except KeyError:
                log.error(
                    f"{curr_job.job_code} is not in changed_action_dict_by_job_code. I should have already excluded it from search.")
                continue

            new_action_dict = ActionDict(
                is_forced_action=False,
                job_code=curr_job.job_code,
                action_type=ActionType.JOB_FIXED,
                scheduled_worker_codes=a_worker_code_list,
                scheduled_start_minutes=new_start_mintues,
                scheduled_duration_minutes=curr_job.scheduled_duration_minutes,
            )
            rule_check_result = self.env._check_action_on_rule_set(
                a_dict=new_action_dict, unplanned_job_codes=unplanned_job_codes)
            # TODO, track Failed number
            if rule_check_result.status_code == ActionScoringResultType.ERROR:
                log.info(
                    f"Failed on rule set check after acquiring recommendation. Skipped. appt = {curr_job.job_code}, workers = {a_worker_code_list}, start = {new_start_mintues}, messages = {['{}---{}---{}'.format(m.score,m.score_type, m.message) for m in rule_check_result.messages]} "
                )

                continue
            # This may not be necessary , because I have executed optimize_slot and the slot contains only possible jobs
            #
            # cut_off_success_flag, message_dict = self.env.slot_server.cut_off_time_slots(
            #     action_dict=new_action_dict, probe_only=True
            # )
            # if not cut_off_success_flag:
            #     log.info(
            #         f"After rule set, failed on probing cutting off slots. appt = {curr_job.job_code}, workers = {a_worker_code_list}, start = {new_start_mintues}, messages = {str(message_dict)} "
            #     )
            #     continue

            log.info(
                f"appt = {curr_job.job_code}, workers = {a_worker_code_list}, start = {new_start_mintues}, The solution passed all rules.")

            #  Now I should trim the last time slot attached as customer availability
            scoped_slot_code_list_no_cust = [self.env.slot_server.get_time_slot_key(
                s) for s in scoped_slot_list[0:len(a_worker_code_list)]]
            # Here i should also assert that len(scoped_slot_code_list) == worker_lenth + 1

            new_travel_minutes_difference = 0
            for a_slot in shared_time_slots:
                (
                    prev_travel,
                    next_travel,
                    inside_travel,
                ) = self.env.get_travel_time_jobs_in_slot(a_slot, a_slot.assigned_job_codes)
                new_travel_minutes_difference -= prev_travel + next_travel + sum(inside_travel)

            for a_slot in shared_time_slots_optimized:
                (
                    prev_travel,
                    next_travel,
                    inside_travel,
                ) = self.env.get_travel_time_jobs_in_slot(a_slot, a_slot.assigned_job_codes)
                new_travel_minutes_difference += prev_travel + next_travel + sum(inside_travel)

            total_score = ((self.env.config["scoring_factor_standard_travel_minutes"] - new_travel_minutes_difference + original_travel_minutes_difference) /
                           self.env.config["scoring_factor_standard_travel_minutes"]) - (len(unplanned_job_codes) * 0.5)

            if allow_overtime:
                total_score -= 2

            travel_message = f"original minutes difference:{original_travel_minutes_difference}, new minutes difference: {new_travel_minutes_difference}"
            metrics_detail = {
                "original_minutes_difference": original_travel_minutes_difference,
                "new_minutes_difference": new_travel_minutes_difference,
            }
            travel_score_obj = ActionEvaluationScore(
                score=total_score,
                score_type="travel_difference",
                message=travel_message,
                metrics_detail=metrics_detail,
            )
            # if curr_job.job_code == "04856415-b6ae-4aeb-9e6d-ff399f00de0d":
            #     log.debug("04856415-b6ae-4aeb-9e6d-ff399f00de0d")

            a_rec = RecommendedAction(
                job_code=curr_job.job_code,
                # action_type=ActionType.JOB_FIXED,
                # JobType = JOB, which can be acquired from self.jobs_dict[job_code]
                scheduled_worker_codes=a_worker_code_list,
                scheduled_start_minutes=new_start_mintues,
                scheduled_duration_minutes=curr_job.scheduled_duration_minutes,
                score=total_score,
                score_detail=[travel_score_obj],
                scoped_slot_code_list=scoped_slot_code_list_no_cust,
                job_plan_in_scoped_slots=res.planned_job_sequence[0:len(a_worker_code_list)],
                unplanned_job_codes=unplanned_job_codes,
            )

            log.info(f"Found solution on slots {a_rec.scoped_slot_code_list}")
            # TODO for @Xingtong, calculate KPI about recommendation

            result_slot.append(a_rec)
            if len(result_slot) >= max_number_of_matching:
                return result_slot
        # log.info(f"Partial solutions returned")
        return result_slot
Пример #12
0
    def dispatch_jobs_in_slots(self,
                               working_time_slots: list = [],
                               last_job_count=1):
        """Assign jobs to workers.
        
        Note: THis works only for single worker, not shared.!
        """
        num_slots = len(working_time_slots)  # - 2
        num_workers = len(working_time_slots) - 2
        if num_workers < 1:
            num_workers = 1
        final_result = JobsInSlotsDispatchResult(
            status=OptimizerSolutionStatus.INFEASIBLE,
            changed_action_dict_by_job_code={},
            all_assigned_job_codes=[],
            planned_job_sequence=[])
        #     {
        #     "status": OptimizerSolutionStatus.INFEASIBLE,
        #     "changed_action_dict_by_job_code": {},
        #     "all_assigned_job_codes": [],
        #     "travel_minutes_difference": 0,
        # }
        if (num_slots < 1) or (len(working_time_slots[0].assigned_job_codes) >
                               self.env.config["MAX_NBR_JOBS_IN_SLOT"]):
            log.debug(
                f"Plan rejected, trying to plan {len(working_time_slots[0].assigned_job_codes)} jobs ..."
            )
            return final_result

        slot = working_time_slots[0]
        horizon_start = self.env.get_env_planning_horizon_start_minutes()
        if horizon_start < slot.start_minutes:
            horizon_start = slot.start_minutes
        # orig_slot_job_codes = slot.assigned_job_codes[: 0 - last_job_count]

        if len(slot.assigned_job_codes) < 3:
            if len(slot.assigned_job_codes) < 2:
                log.error("Not enough jobs to route")
            assert slot.assigned_job_codes[0].split(
                "-")[-1] == "pick", "first must be pick if only up to two jobs"
            solution_index = [0, 1, 2]
        else:
            pre_job = [None for _ in range(len(slot.assigned_job_codes))]
            for ji, jc in enumerate(slot.assigned_job_codes):
                if jc.split("-")[-1] == "drop":
                    pick_code = jc[:-4] + "pick"  # jc.split("-")[1] + "-pick"
                    if pick_code in slot.assigned_job_codes:
                        pre_job[ji] = [
                            slot.assigned_job_codes.index(pick_code) + 1
                        ]

            locations = [
                self.env.jobs_dict[jc].location[0:2] +
                (0.2 if jc.split("-")[-1] == "pick" else 1, pre_job[ji])
                for ji, jc in enumerate(slot.assigned_job_codes)
            ]
            locations = [slot.start_location[0:2] + (1, None)] + locations

            solution_index = self.solve(locations)
            if solution_index is None:
                final_result.status = OptimizerSolutionStatus.INFEASIBLE
                return final_result

        job_list = [slot.assigned_job_codes[i - 1] for i in solution_index[1:]]
        final_result.status = OptimizerSolutionStatus.SUCCESS

        # is_ok_end = True
        job_1_code = working_time_slots[0].assigned_job_codes[0 -
                                                              last_job_count]
        job_1 = self.env.jobs_dict[job_1_code]
        # I assume that job1 and job2 have same duration. 2021-07-06 07:16:06
        job_duration_minutes = self.env.get_encode_shared_duration_by_planning_efficiency_factor(
            requested_duration_minutes=job_1.requested_duration_minutes,
            nbr_workers=num_workers,
        )

        job_2_code = working_time_slots[0].assigned_job_codes[-1]
        job_2 = self.env.jobs_dict[job_2_code]

        prev_start_time = horizon_start
        if prev_start_time < slot.start_minutes:
            prev_start_time = slot.start_minutes

        prev_loc = slot.start_location

        all_worker_codes = [s.worker_id for s in working_time_slots]

        final_result.all_assigned_job_codes = [job_list]
        current_start = prev_start_time
        for job_i in list(range(0, len(job_list))):
            j_code = job_list[job_i]
            curr_job = self.env.jobs_dict[j_code]
            current_start += self.env.travel_router.get_travel_minutes_2locations(
                prev_loc, curr_job.location)
            _action_dict = ActionDict(
                is_forced_action=False,
                job_code=j_code,
                action_type=ActionType.FLOATING,
                scheduled_worker_codes=all_worker_codes,
                scheduled_start_minutes=current_start,
                scheduled_duration_minutes=curr_job.requested_duration_minutes,
            )
            final_result.changed_action_dict_by_job_code[j_code] = _action_dict
            prev_loc = curr_job.location
        # (len(job_list) > 2) and (job_list[0].split("-")[-1]  == "drop")
        return final_result
Пример #13
0
    def dispatch_jobs(self, env):
        # , start_date="20191101", end_date="20191230"
        assert env.config["nbr_of_days_planning_window"] == 1
        if len(env.jobs) < 1:
            print("it is empty, nothing to dispatch!")
            return
        env.workers = env.workers  # [0:50]
        env.jobs = env.jobs  # [0:500]
        self.env = env
        begin_time = datetime.now()

        avg_long = sum([j.location.geo_longitude
                        for j in self.env.jobs]) / len(self.env.jobs)
        avg_lat = sum([j.location.geo_latitude
                       for j in self.env.jobs]) / len(self.env.jobs)

        AVG_JOB_LOCATION = JobLocationBase(
            geo_longitude=avg_long,
            geo_latitude=avg_lat,
            location_type=LocationType.HOME,
            location_code="depot",
        )
        self.job_locations = [AVG_JOB_LOCATION]

        for j in self.env.jobs:
            self.job_locations.append(j.location)

        # Create and register a transit callback.
        def distance_callback(from_index, to_index):
            # Convert from routing variable Index to distance matrix NodeIndex.
            from_node = manager.IndexToNode(from_index)  # - 1
            to_node = manager.IndexToNode(to_index)  # - 1

            if from_node == to_node:
                return 0
            return self._get_travel_time_2locations(
                self.job_locations[from_node], self.job_locations[to_node])

        # Create the routing index manager.
        manager = pywrapcp.RoutingIndexManager(len(self.job_locations),
                                               len(self.env.workers), 0)

        # Create Routing Model.
        routing = pywrapcp.RoutingModel(manager)

        transit_callback_index = routing.RegisterTransitCallback(
            distance_callback)

        # Define cost of each arc.
        routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)

        # Add Distance constraint.
        dimension_name = "Distance"
        routing.AddDimension(
            transit_callback_index,
            0,  # no slack
            300,  # vehicle maximum travel distance
            True,  # start cumul to zero
            dimension_name,
        )
        distance_dimension = routing.GetDimensionOrDie(dimension_name)
        distance_dimension.SetGlobalSpanCostCoefficient(100)

        # Setting first solution heuristic.
        search_parameters = pywrapcp.DefaultRoutingSearchParameters()
        search_parameters.first_solution_strategy = (
            routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)

        # Solve the problem.
        solution = routing.SolveWithParameters(search_parameters)

        # Print solution on console.
        if solution:
            print_solution(self, manager, routing, solution)

        total_time = datetime.now() - begin_time
        # Date: {begin_time},
        print(
            f"nbr workers: {len(self.env.workers)}, nbr jobs: {len(self.job_locations)}, Elapsed: {total_time}"
        )

        return

        while current_date < GENERATOR_END_DATE:

            print("current_start_day", current_date, "GENERATOR_END_DATE",
                  GENERATOR_END_DATE)
            current_start_day = datetime.strftime(current_date,
                                                  config.KANDBOX_DATE_FORMAT)

            day_seq = int(
                self.env.env_encode_from_datetime_to_minutes(current_date) /
                1440)

            # purge job status for this planner and this day.

            workers = self.env.workers  # start_day=current_start_day
            # TODO, do it per day

            jobs_orig = []
            for j in self.env.jobs:
                if (j.requested_start_minutes >=
                        day_seq * 1440) and (j.requested_start_minutes <
                                             (day_seq + 1) * 1440):
                    jobs_orig.append(j)
            if len(jobs_orig) < 1:
                print("it is empty, nothing to dispatch!")
                return

            current_shifts = jobs_orig
            current_workers = workers  # .T.to_dict().values()

            print({
                "loaded day": current_date,
                "job count": len(current_shifts),
                "current_workers count": len(current_workers),
            })

            worker_day = self.dispatch_jobs_1day(
                jobs=current_shifts[0:53],
                workers=current_workers)  # [0:20]  [:70]
            # worker_day is the dispatched result, now we save it into DB.

            if len(worker_day) < 1:
                print(
                    "no data returned from opti1day! I will move on to next day!"
                )
                current_date = current_date + timedelta(days=1)
                continue
            # pprint(worker_day)
            job_list = []
            for w_i in range(len(worker_day)):
                pre_job_code = "__HOME"
                for task in worker_day[w_i][:-1]:
                    task_id = task[0]  # + 1
                    worker_code = current_workers[w_i].worker_code

                    # updated_order =  {} # latest_order_dict[id]
                    job_list.append({
                        "job_code":
                        current_shifts[task_id].job_code,
                        "job_schedule_type":
                        current_shifts[task_id].job_schedule_type,
                        "planning_status":
                        JobPlanningStatus.IN_PLANNING,
                        "scheduled_primary_worker_id":
                        worker_code,
                        "scheduled_start_day":
                        datetime.strftime(current_date,
                                          config.KANDBOX_DATE_FORMAT),
                        "requested_start_day":
                        datetime.strftime(current_date,
                                          config.KANDBOX_DATE_FORMAT),
                        "scheduled_start_minutes":
                        task[1] + (day_seq * 1440),
                        "scheduled_duration_minutes":
                        current_shifts[task_id].requested_duration_minutes,
                        "scheduled_travel_minutes_before":
                        task[2],
                        "scheduled_travel_prev_code":
                        pre_job_code,
                        # "location_code": current_shifts[task_id]["location_code"],
                        "geo_longitude":
                        current_shifts[task_id].location[0],
                        "geo_latitude":
                        current_shifts[task_id].location[1],
                        "conflict_level":
                        0,
                        "scheduled_secondary_worker_ids":
                        "[]",
                        "scheduled_share_status":
                        "N",
                        "error_message":
                        "",
                    })
                    pre_job_code = current_shifts[task_id].job_code
            """
            import pprint

            pprint.pprint(job_list)
            return
            """
            # TODO: No needs of 2 rounds.
            # fmt:off
            self.env.kp_data_adapter.reload_data_from_db()
            for job in job_list:
                one_job_action_dict = ActionDict(
                    is_forced_action=False,
                    job_code=job["job_code"],
                    action_type=ActionType.
                    FLOATING,  # I assume that only in-planning jobs can appear here...
                    scheduled_worker_codes=[
                        job["scheduled_primary_worker_id"]
                    ],
                    scheduled_start_minutes=job["scheduled_start_minutes"],
                    scheduled_duration_minutes=job[
                        "scheduled_duration_minutes"],
                )
                internal_result_info = self.env.mutate_update_job_by_action_dict(
                    a_dict=one_job_action_dict, post_changes_flag=False)
                if internal_result_info.status_code != ActionScoringResultType.OK:
                    print(
                        f"{one_job_action_dict.job_code}: Failed to commit change, error: {str(internal_result_info)} "
                    )

                # job_to_create = copy.deepcopy(self.env.kp_data_adapter.jobs_db_dict[job["job_code"]])
                job_to_update = JobPlanningInfoUpdate(
                    code=job["job_code"],
                    planning_status=job["planning_status"],
                    scheduled_start_datetime=self.env.
                    env_decode_from_minutes_to_datetime(
                        job["scheduled_start_minutes"]),
                    scheduled_duration_minutes=job[
                        "scheduled_duration_minutes"],
                    scheduled_primary_worker_code=job[
                        "scheduled_primary_worker_id"],
                )
                job_service.update_planning_info(
                    db_session=self.env.kp_data_adapter.db_session,
                    job_in=job_to_update)

                # self.env.kp_data_adapter.db_session.add(db_job)
                # self.env.kp_data_adapter.db_session.commit()
                # job_to_create= copy.deepcopy(job_orig)

                print(
                    f"job({job_to_update.code}) is updated with new planning info"
                )

            # fmt:on

            current_date = current_date + timedelta(days=1)