def get_customers(jsonschema_dict): # master table customers = _index(jsonschema_dict["customers"]) # customers, time windows and orders tw_customers = index_list(jsonschema_dict["customersTimeWindows"], "id_customer", ["start", "end"]) ord_customers = index_list( jsonschema_dict["orders"], "id_customer", ["earliestTime", "latestTime", "orderQuantityFlexibility", "Quantity"], ) loc_trailers = TupList(jsonschema_dict["allowedTrailers"]).to_dict( result_col="id_trailer", indices=["id_location"]) assign_list_prop(customers, "timewindows", tw_customers) assign_list_prop(customers, "orders", ord_customers) assign_list_prop(customers, "allowedTrailers", loc_trailers) # customer forecasts # we assume forecasts don't have a number for each period forecasts = (TupList(jsonschema_dict["forecasts"]).take( ["id_customer", "time", "forecast"]).vfilter(lambda v: v[2] > 0)) horizon = jsonschema_dict["parameters"]["horizon"] for k, v in customers.items(): v["Forecast"] = TupList(np.zeros(horizon)) for _id, time, quantity in forecasts: # we have more periods than needed, apparently if time >= len(customers[_id]["Forecast"]): continue customers[_id]["Forecast"][time] += quantity return customers
def to_tsplib95(self): arcs = TupList(self.data["arcs"]) nodes = (arcs.take("n1") + arcs.take("n2")).unique() pos = {k: v for v, k in enumerate(nodes)} arc_dict = arcs.to_dict(result_col="w", indices=["n1", "n2"], is_list=False).to_dictdict() arc_weights = [[]] * len(nodes) for n1, n2dict in arc_dict.items(): n1list = arc_weights[pos[n1]] = [0] * len(n2dict) for n2, w in n2dict.items(): n1list[pos[n2]] = w if len(nodes)**2 == len(arcs): edge_weight_format = "FULL_MATRIX" elif abs(len(nodes)**2 - len(arcs) * 2) <= 2: edge_weight_format = "LOWER_DIAG_ROW" else: # TODO: can there another possibility? edge_weight_format = "LOWER_DIAG_ROW" dict_data = dict( name="TSP", type="TSP", comment="", dimension=len(nodes), edge_weight_type="EXPLICIT", edge_weight_format=edge_weight_format, edge_weights=arc_weights, ) return tsp.models.StandardProblem(**dict_data)
def from_dict(cls, data: dict) -> "Instance": tables = ["employees", "shifts", "contracts"] data_p = {el: {v["id"]: v for v in data[el]} for el in tables} data_p["demand"] = {(el["day"], el["hour"]): el for el in data["demand"]} data_p["parameters"] = pickle.loads(pickle.dumps(data["parameters"], -1)) if data.get("skill_demand"): data_p["skill_demand"] = { (el["day"], el["hour"], el["id_skill"]): el for el in data["skill_demand"] } else: data_p["skill_demand"] = {} if data.get("skills"): data_p["skills"] = { el["id"]: el for el in data["skills"] } else: data_p["skills"] = {} if data.get("skills_employees"): data_p["skills_employees"] = TupList(data["skills_employees"]).to_dict( result_col=["id_employee"], is_list=True, indices=["id_skill"] ) else: data_p["skills_employees"] = TupList() return cls(data_p)
def check_second_dose(self): nb_doses = self.instance.get_nb_doses() clients = self.instance.get_clients() first_doses = ( TupList(self.solution.data["flows"]) .vfilter( lambda v: v["day"] == "Day 1" and v["destination"] in clients and nb_doses[v["product"]] > 1 ) .take(["destination", "product", "flow"]) .to_dict(result_col=2, is_list=True) .vapply(lambda v: sum(v)) ) second_doses = ( TupList(self.solution.data["flows"]) .vfilter( lambda v: v["day"] == "Day 2" and v["destination"] in clients and nb_doses[v["product"]] > 1 ) .take(["destination", "product", "flow"]) .to_dict(result_col=2, is_list=True) .vapply(lambda v: sum(v)) ) return second_doses.kvapply(lambda k, v: v - first_doses.get(k, 0)).kvfilter( lambda k, v: v < 0 )
def __init__(self, instance, solution=None): super().__init__(instance, solution) # Sets and parameters self.employee_ts_availability = TupList() self.ts_employees = SuperDict() self.ts_managers = SuperDict() self.ts_open = TupList() self.max_working_ts_week = SuperDict() self.workable_ts_week = SuperDict() self.max_working_ts_day = SuperDict() self.min_working_ts_day = SuperDict() self.workable_ts_day = SuperDict() self.ts_ts_employee = SuperDict() self.max_working_days = SuperDict() self.managers = TupList() self.incompatible_ts_employee = TupList() self.first_ts_day_employee = SuperDict() self.demand = SuperDict() self.ts_demand_employee_skill = SuperDict() # Variables self.works = SuperDict() self.starts = SuperDict() self.initialize()
def solve(self, options: dict): distance = ( self.instance.get_arcs() .to_dict(result_col=["w"], indices=["n1", "n2"], is_list=False) .kfilter(lambda k: k[0] != k[1]) ) model = cp_model.CpModel() create_literal = lambda i, j: model.NewBoolVar("%i follows %i" % (j, i)) literals = distance.kapply(lambda k: create_literal(*k)) arcs = literals.to_tuplist() model.AddCircuit(arcs) model.Minimize(sum((literals * distance).values())) solver = cp_model.CpSolver() if options.get("msg", False): solver.parameters.log_search_progress = True # To benefit from the linearization of the circuit constraint. solver.parameters.linearization_level = 2 solver.parameters.max_time_in_seconds = options.get("timeLimit", 10) if "threads" in options: solver.parameters.num_search_workers = options["threads"] status = solver.Solve(model) if options.get("msg", False): print(solver.ResponseStats()) status_conv = { cp_model.OPTIMAL: STATUS_OPTIMAL, cp_model.INFEASIBLE: STATUS_INFEASIBLE, cp_model.UNKNOWN: STATUS_UNDEFINED, cp_model.MODEL_INVALID: STATUS_UNDEFINED, } if status not in [cp_model.OPTIMAL, cp_model.FEASIBLE]: return dict( status=status_conv.get(status), status_sol=SOLUTION_STATUS_INFEASIBLE ) next = ( literals.vapply(solver.BooleanValue) .vfilter(lambda v: v) .keys_tl() .to_dict(1, is_list=False) ) first = next.keys_tl(0) current_node = first solution = TupList([first]) while True: current_node = next[current_node] if current_node == first: break solution.append(current_node) nodes = solution.kvapply(lambda k, v: SuperDict(pos=k, node=v)) self.solution = Solution(dict(route=nodes)) return dict(status=status_conv.get(status), status_sol=SOLUTION_STATUS_FEASIBLE)
def _get_incompatible_slots(self) -> TupList: """ Returns a TupList with tuples that have time slots in consecutive days where if the employee works in one time slot it can not work in the other based on the minimum resting time For example: [("2021-09-06T20:00", "2021-09-07T07:00"), ("2021-09-06T21:00", "2021-09-07T07:00"), ...] """ if ( 24 - (self._get_ending_hour() - self._get_starting_hour()) >= self._get_min_resting_hours() ): return TupList() nb_incompatible = self._hour_to_slot( int( self._get_min_resting_hours() - (24 - (self._get_ending_hour() - self._get_starting_hour())) ) ) time_slots_wo_last_day = self.time_slots.vfilter( lambda v: self._get_date_string_from_ts(v) != get_date_string_from_ts(self._get_end_date()) ) def check_same_day(ts, ts2): return ts.date() == ts2.date() def check_one_day_apart(ts, ts2): return (ts2 - ts).days <= 1 return ( TupList( [ (val, self.time_slots[pos + i]) for pos, val in enumerate(time_slots_wo_last_day) for i in range(1, nb_incompatible + 1) ] ) .vfilter(lambda v: not check_same_day(v[0], v[1])) .vfilter(lambda v: check_one_day_apart(v[0], v[1])) .vapply( lambda v: ( self._get_time_slot_string(v[0]), self._get_time_slot_string(v[1]), ) ) )
def calculate_inventories(self): """ Calculates the inventory of each customer at each hour of the time horizon :return: A dictionary whose keys are the indexes of the customers and whose values are dictionaries containing two elements: - a list 'tank_quantity' containing the value of the inventory at each hour - an integer 'location' corresponding to the index of the customer For example: {2: {'tank_inventory': [15000, 14000, 13000, 16000, ... , 14000, 13000], 'location': 2}, 3: {'tank_inventory': [4000, 4000, 4000, 1000, ..., 3000, 3000], 'location': 3}} """ customers = self.instance.get_id_customers() _get_customer = lambda c, p: self.instance.get_location_property(c, p) # we need three things: consumption, initial stock and arrivals # we store each as a dictionary where each customer has a numpy array of length = horizon # 1. we get consumptions from forecasts consumption = (customers.to_dict(None).vapply( _get_customer, "Forecast").vapply(lambda v: -np.array(v)).vapply( lambda v: v[0:self.horizon])) # 2. we get initial tanks and assign them to the first period initial_tank = consumption.vapply(lambda v: np.zeros(self.horizon)) for k, v in initial_tank.items(): v[0] = _get_customer(k, "InitialTankQuantity") # 3. we now use the solution to get the arrivals with routes shifts = self.solution.get_all_shifts() all_operations = TupList(operation for route_list in shifts.take("route") for operation in route_list) # for each route we take the location, the time and how much. # we only get customers and round the time and make positive the quantity arrivals_tup = (all_operations.take( ["location", "arrival", "quantity"]).vfilter(lambda v: self.is_customer(v[0])).vapply( lambda v: (v[0], floor(v[1] / self.unit), -round(v[2], 3)))) # we initialize at 0 and increase when a truck arrives: arrivals = consumption.vapply(lambda v: np.zeros(self.horizon)) for customer, time, quantity in arrivals_tup: arrivals[customer][time] += quantity # we take advantage of both pytups broadcasting and numpys broadcasting # then we accumulate over periods stocks = (consumption + arrivals + initial_tank).vapply(np.cumsum) site_inventories = SuperDict() for _id, quantity_arr in stocks.items(): site_inventories[_id] = dict(tank_quantity=quantity_arr, location=_id) return site_inventories
def matrix_to_dict(matrix, key, func): matrix = TupList(matrix).vapply(lambda v: v[key]) result = SuperDict() for L1, row in enumerate(matrix): for L2, col in enumerate(row): result[L1, L2] = func(col) return result
def index_list(table, index, _list): """ indexes the table by index and returns a dictionary with _list keys """ return (TupList(table).to_dict(result_col=_list, indices=[ index ]).vapply(lambda v: v.vapply(lambda vv: SuperDict(zip(_list, vv)))))
def get_unique_locations_in(self, route): """ :return: A unique list of all the sources and customers in the given route """ return TupList([ step[0] for step in route.visited if not self.is_base(step[0]) ]).unique()
def read_excel(path: str, param_tables_names: list = None) -> dict: """ Read an entire excel file. :param path: path of the excel file :param param_tables_names: names of the parameter tables :return: a dict with a list of dict (records format) for each table. """ is_xl_type(path) try: import openpyxl except (ModuleNotFoundError, ImportError) as e: raise Exception("You must install openpyxl package to use this method") data = pd.read_excel(path, sheet_name=None) data_tables = { name: TupList(content.to_dict(orient="records")).vapply( lambda v: SuperDict(v).vapply(lambda vv: format_value(vv))) for name, content in data.items() if name not in param_tables_names } parameters_tables = { t: SuperDict(read_param_table(path, t)).vapply(lambda v: format_value(v)) for t in param_tables_names } return {**data_tables, **parameters_tables}
def unique_customers_in(self, route): """ :return: A unique list of all the customers in the given route """ return TupList([ step[0] for step in route.visited if self.is_customer(step[0]) ]).unique()
def unique_sources_in(self, route): """ :return: A unique list of all the sources in the given route """ return TupList([ step[0] for step in route.visited if self.is_source(step[0]) ]).unique()
def get_amount_supplied(self): return (TupList({ "origin": v["origin"], "product": v["product"], "flow": v["flow"] } for v in self.data["flows"]).to_dict( result_col="flow", indices=["origin", "product"]).vapply(lambda v: sum(v)))
def solve(self, options: dict): # we just get an arbitrary but complete list of nodes and we return it nodes = (TupList(v["n1"] for v in self.instance.get_arcs()).unique().kvapply( lambda k, v: dict(pos=k, node=v))) self.solution = Solution(dict(route=nodes)) return dict(status_sol=SOLUTION_STATUS_FEASIBLE, status=STATUS_UNDEFINED)
def get_sources(jsonschema_dict): # master table sources = _index(jsonschema_dict["sources"]) # locations and their trailers loc_trailers = TupList(jsonschema_dict["allowedTrailers"]).to_dict( result_col="id_trailer", indices=["id_location"]) assign_list_prop(sources, "allowedTrailers", loc_trailers) return sources
def __init__(self, data: dict): super().__init__(data) # Stores a list of the starting date of each week ordered self.weeks = TupList() # First object stores a list of the dates ordered, # the second the properties for each date. self.dates = TupList() self.dates_properties = SuperDict() # First object stores a list of the time slots ordered, # the second the properties for each one. self.time_slots = TupList() self.time_slots_properties = SuperDict() self.cache_properties()
def test_cases(self): cwd = os.path.dirname(os.path.realpath(__file__)) _get_file = lambda name: os.path.join(cwd, "data", name) _get_instance = lambda fn: Instance.from_json(_get_file(fn)).to_dict() _get_solution = lambda fn: Solution.from_json(_get_file(fn)).to_dict() return TupList( [("example_instance_filtered.json", "example_solution_filtered.json")] ).vapply(lambda v: (_get_instance(v[0]), _get_solution(v[1])))
def get_objective(self) -> float: # we get a sorted list of nodes by position route = (TupList(self.solution.data["route"]).sorted( key=lambda v: v["pos"]).vapply(lambda v: v["node"])) weight = {(el["n1"], el["n2"]): el["w"] for el in self.instance.data["arcs"]} # we sum all arcs in the solution return (sum([weight[n1, n2] for n1, n2 in zip(route, route[1:])]) + weight[route[-1], route[0]])
def check_restricted_flows(self): restricted_flows = self.instance.get_restricted_flows() return ( TupList(self.solution.data["flows"]) .vfilter(lambda v: (v["origin"], v["destination"]) in restricted_flows) .take(["origin", "destination", "flow"]) .to_dict(result_col=2, is_list=True) .vapply(lambda v: sum(v)) )
def setUp(self): super().setUp() self.full_inst_path = self._get_path("./data/instance.json") self.full_inst = SuperDict.from_dict( self.import_schema(self.full_inst_path)) # Removing parameter tables self.full_inst["properties"] = self.full_inst["properties"].vfilter( lambda v: v["type"] == "array") self.one_tab_inst_path = self._get_path("./data/one_table.json") self.one_tab_inst = SuperDict.from_dict( self.import_schema(self.one_tab_inst_path)) self.app_name = "test" self.second_app_name = "test_sec" self.default_output_path = self._get_path("./data/output") self.other_output_path = self._get_path("./data/output_path") self.last_path = self.default_output_path self.all_methods = TupList( ["getOne", "getAll", "deleteOne", "deleteAll", "update", "post"])
def check_consistency_warehouses(self): warehouses = self.instance.get_warehouses() flows = TupList(self.solution.data["flows"]) flow_in = ( flows.vfilter(lambda v: v["destination"] in warehouses) .take(["destination", "day", "product", "flow"]) .to_dict(result_col=3, is_list=True) .vapply(lambda v: sum(v)) ) flow_out = ( flows.vfilter(lambda v: v["origin"] in warehouses) .take(["origin", "day", "product", "flow"]) .to_dict(result_col=3, is_list=True) .vapply(lambda v: sum(v)) ) return ( flow_out.kvapply(lambda k, v: v - flow_in.get(k, 0)) .update(flow_in.kvapply(lambda k, v: flow_out.get(k, 0) - v)) .vfilter(lambda v: v != 0) )
def _get_weeks(self) -> TupList: """ Returns a TupList with the starting date of each week in date time format For example: [datetime(2021, 9, 6, 0, 0, 0), datetime(2021, 9, 13, 0, 0, 0), ...] """ return TupList( [ get_one_date(self._get_start_date(), weeks=i) for i in range(0, self._get_horizon()) ] ).sorted()
def get_hours_worked_per_week(self) -> SuperDict: """ Returns a SuperDict with the amount of time slots worked by each employee in each week. For example: {(0, 1): 40, ...} """ return (TupList({ "id_employee": id_employee, "ts": ts, "week": get_week_from_string(ts) } for (id_employee, ts) in self.data["works"]).to_dict( result_col="ts", indices=["week", "id_employee"]).vapply(lambda v: len(v)))
def check_consistency_suppliers(self): suppliers = self.instance.get_suppliers() clients = self.instance.get_clients() flows = TupList(self.solution.data["flows"]) sent = ( flows.vfilter(lambda v: v["origin"] in suppliers) .take(["day", "product", "flow"]) .to_dict(result_col=2, is_list=True) .vapply(lambda v: sum(v)) ) received = ( flows.vfilter(lambda v: v["destination"] in clients) .take(["day", "product", "flow"]) .to_dict(result_col=2, is_list=True) .vapply(lambda v: sum(v)) ) return ( received.kvapply(lambda k, v: v - sent.get(k, 0)) .update(sent.kvapply(lambda k, v: received.get(k, 0) - v)) .vfilter(lambda v: v != 0) )
def _get_dates(self) -> TupList: """ Returns a TupList with the dates of the whole horizon in datetime format For example: [datetime(2021, 9, 6, 0, 0, 0), datetime(2021, 9, 7, 0, 0, 0), ...] """ return TupList( [ get_one_date(self._get_start_date(), pos, d) for d in range(0, self._get_opening_days()) for pos, value in enumerate(self.weeks) ] ).sorted()
def check_warehouse_capacity(self): warehouses = self.instance.get_warehouses() capacities = self.instance.get_capacity() return ( TupList(self.solution.data["flows"]) .vfilter(lambda v: v["destination"] in warehouses) .take(["destination", "day", "flow"]) .to_dict(result_col=2, is_list=True) .vapply(lambda v: sum(v)) .kvapply(lambda k, v: v - capacities[k[0]]) .vfilter(lambda v: v > 0) )
def get_consecutive_time_slots_employee(self) -> TupList: """ Returns a TupList with a time slot, the nex time slot in the same day and an employee according to the employee availability For example: [("2021-09-06T07:00", "2021-09-06T08:00", 1), ...] """ return TupList( [ (ts, ts2, e) for (d, e), _time_slots in self.get_employees_time_slots_day().items() for ts, ts2 in zip(_time_slots, _time_slots[1:]) ] )
def _get_time_slots(self) -> TupList: """ Returns a TupList with the time slots of the whole horizon in datetime format For example: [datetime(2021, 9, 6, 7, 0, 0), datetime(2021, 9, 6, 8, 0, 0), ...] """ nb_hours = self._get_ending_hour() - self._get_starting_hour() nb_slots = int(self._hour_to_slot(nb_hours)) def date_hour_ts(d, s): return get_one_date_time(d, self._get_minutes(s)) return TupList( [date_hour_ts(date, s) for date in self.dates for s in range(nb_slots)] ).sorted()