def from_mm(cls, path, content=None): if content is None: with open(path, 'r') as f: content = f.readlines() content = pt.TupList(content) index_prec = \ content. \ index('PRECEDENCE RELATIONS:\n') index_requests = \ content.index('REQUESTS/DURATIONS:\n') index_avail = \ content.\ index('RESOURCEAVAILABILITIES:\n') # precedence. precedence = content[index_prec + 2:index_requests - 1] successors = pt.SuperDict() for line in precedence: _, job, modes, num_succ, *jobs, _ = re.split('\s+', line) successors[int(job)] = pt.TupList(jobs).vapply(int) successors = successors.kvapply(lambda k, v: dict(successors=v, id=k)) # requests/ durations requests = content[index_requests + 3:index_avail - 1] resources = re.findall(r'[RN] \d', content[index_requests + 1]) needs = pt.SuperDict() durations = pt.SuperDict() last_job = '' for line in requests: if line[2] == ' ': job = last_job _, mode, duration, *consumption, _ = re.split('\s+', line) else: _, job, mode, duration, *consumption, _ = re.split('\s+', line) last_job = job key = int(job), int(mode) needs[key] = \ {v: int(consumption[k]) for k, v in enumerate(resources)} needs[key] = pt.SuperDict(needs[key]) durations[key] = int(duration) # resources / availabilities line = content[index_avail + 2] _, *avail, _ = re.split('\s+', line) availability = {k: int(avail[i]) for i, k in enumerate(resources)} availability = pt.SuperDict(availability).kvapply( lambda k, v: dict(available=v, id=k)) data = dict(resources=availability, jobs=successors, durations=durations.to_dictdict(), needs=needs.to_dictdict()) return cls(data)
def download_backup_static(self): stations = self.get_stations(cache=False) static = pt.TupList(stations).apply(self.get_static) ts = self.get_timestamp(format="%Y-%m-%dT%H%M%S") self.set_cache(self.all_stations + '/static/' + ts, static, ext='.json')
def to_dict(self): routes = pt.SuperDict() for k, v in self.data["routes"].items(): routes[k] = pt.TupList(v).kvapply(lambda k, v: (k, v)) routes = routes.to_tuplist().vapply( lambda v: dict(route=v[0], pos=v[1], node=v[2])) return pt.SuperDict(routes=routes)
def from_dict(cls, data) -> "Instance": demand = pt.SuperDict({v["n"]: v for v in data["demand"]}) weights = (pt.TupList(data["arcs"]).vapply( lambda v: v.values()).vapply(lambda x: list(x)).to_dict( result_col=2, is_list=False)) datap = {**data, **dict(demand=demand, arcs=weights)} return cls(datap)
def read_file(filePath): with open(filePath, "r") as f: contents = f.read().splitlines() pairs = (pt.TupList( contents[1:]).vapply(lambda v: v.split(" ")).vapply( lambda v: dict(n1=int(v[0]), n2=int(v[1])))) return dict(pairs=pairs)
def solve(self, options: dict): model = cp_model.CpModel() input_data = pt.SuperDict.from_dict(self.instance.data) pairs = input_data["pairs"] n1s = pt.TupList(pairs).vapply(lambda v: v["n1"]) n2s = pt.TupList(pairs).vapply(lambda v: v["n2"]) nodes = (n1s + n2s).unique2() max_colors = len(nodes) - 1 # variable declaration: color = pt.SuperDict({ node: model.NewIntVar(0, max_colors, "color_{}".format(node)) for node in nodes }) # TODO: identify maximum cliques and apply constraint on the cliques instead of on pairs for pair in pairs: model.Add(color[pair["n1"]] != color[pair["n2"]]) obj_var = model.NewIntVar(0, max_colors, "total_colors") model.AddMaxEquality(obj_var, color.values()) model.Minimize(obj_var) solver = cp_model.CpSolver() solver.parameters.max_time_in_seconds = options.get("timeLimit", 10) status = solver.Solve(model) status_conv = { cp_model.OPTIMAL: STATUS_OPTIMAL, cp_model.INFEASIBLE: STATUS_INFEASIBLE, cp_model.UNKNOWN: STATUS_UNDEFINED, cp_model.MODEL_INVALID: STATUS_UNDEFINED, } if status not in [cp_model.OPTIMAL, cp_model.FEASIBLE]: return dict(status=status_conv.get(status), status_sol=SOLUTION_STATUS_INFEASIBLE) color_sol = color.vapply(solver.Value) assign_list = color_sol.items_tl().vapply( lambda v: dict(node=v[0], color=v[1])) self.solution = Solution(dict(assignment=assign_list)) return dict(status=status_conv.get(status), status_sol=SOLUTION_STATUS_FEASIBLE)
def get_distance_dict(complete_graph, max_dist_km_walk): complete_graph['distance'] = \ haversine_np(complete_graph.stop_lon_x, complete_graph.stop_lat_x, complete_graph.stop_lon_y, complete_graph.stop_lat_y) complete_graph['distance'] = complete_graph['distance'].round(4) complete_graph = complete_graph[complete_graph.distance < max_dist_km_walk] data_neighbors = \ complete_graph.\ filter(['stop_id_x', 'stop_id_y', 'distance']).\ to_records(index=False) return \ pt.TupList(data_neighbors). \ to_dict(result_col=2, is_list=False).\ to_dictdict()
def test_cases(self) -> List[Dict]: def read_file(filePath): with open(filePath, "r") as f: contents = f.read().splitlines() pairs = (pt.TupList( contents[1:]).vapply(lambda v: v.split(" ")).vapply( lambda v: dict(n1=int(v[0]), n2=int(v[1])))) return dict(pairs=pairs) file_dir = os.path.join(os.path.dirname(__file__), "data") files = os.listdir(file_dir) test_files = pt.TupList(files).vfilter(lambda v: v.startswith("gc_")) return [ read_file(os.path.join(file_dir, fileName)) for fileName in test_files ]
def generate_timetable(): data_dir = 'data_tisseo/stops_schedules/' files = os.listdir(data_dir) _get_name = lambda v: os.path.splitext(v)[0] files_data = \ pt.TupList(files). \ to_dict(None). \ vapply(_get_name). \ reverse(). \ vapply(lambda v: data_dir + v). \ vapply(read_json) all_passing = \ files_data. \ vapply(_treat_stop_area). \ to_dictup(). \ to_tuplist() return all_passing
def get_lats_longs(arcs, info): nodes = set() for node, neighbors in arcs.items(): nodes.add(node) for node2 in neighbors: nodes.add(node2) get_lat = lambda v: float(info['stops'][v.stop]['stop_lat']) get_lon = lambda v: float(info['stops'][v.stop]['stop_lon']) get_route = lambda v: info['routes'][v.route]['route_short_name'] get_trip = lambda v: v.trip get_seq = lambda v: v.seq get_time = lambda v: v.time.strftime('%H:%M') get_name = lambda v: info['stops'][v.stop]['stop_name'] get_all = lambda v: dict(lat=get_lat(v), long=get_lon(v), time=get_time(v), route=get_route(v), name=get_name(v), trip=get_trip(v), seq=get_seq(v)) return pt.TupList(nodes).to_dict(None).vapply(get_all).to_df(orient='index').reset_index(drop=True)
def get_tables(directory = 'data_tisseo/tisseo_gtfs/'): names = pt.TupList(['stop_times', 'trips', 'routes', 'stops', 'calendar']) return names.to_dict(None).vapply(read_table, directory=directory)
def _treat_stop_area(one_stop): get_line_time = lambda v: (v['line']['shortName'], v['dateTime']) return pt.TupList(one_stop['departures']['departure']).\ apply(get_line_time).\ to_dict(1).\ vapply(sorted)
def get_relevant_networks(self): filename = os.path.join(self.cache_dir, 'v2/relevant_networks.txt') with open(filename, 'r') as f: content = f.readlines() return pt.TupList(content).apply(str.strip)
# from pytups.pytups.tuplist import TupList # from pytups.pytups.superdict import SuperDict # from pytups import TupList, Superdict import pytups as pt # Data example data = [ dict(name="Alex", birthyear=1980, sex="M", height=175), dict(name="Bernard", birthyear=1955, sex="M", height=164), dict(name="Chloe", birthyear=1995, sex="F", height=178), dict(name="Daniel", birthyear=2010, sex="M", height=131), dict(name="Ellen", birthyear=1968, sex="F", height=158), ] data_tl = pt.TupList(data) # get all adult males (adults in 2021) adults_M = data_tl.vfilter(lambda v: v["sex"] == "M").vfilter( lambda v: v["birthyear"] <= 2003 ) print("adults_M:", adults_M) # get only their names and birthyear adults_M_names_BY = adults_M.take(["name", "birthyear"]) print("adults_M_names_BY:", adults_M_names_BY) # get only their names adults_M_names = adults_M.take("name") print("adults_M_names:", adults_M_names) # get everyone age (in 2021)
def get_pairs(self): return pt.TupList((el["n1"], el["n2"]) for el in self.data["pairs"])
14: 72, 15: 111, 16: 111, 17: 117, 18: 69, 19: 115, 20: 68, } # Get intermediate paramters, sets. C_max = sum(duration.values()) periods = range(C_max) tasks = duration.keys() # all legal combinations of task-period assignment jk_all = pt.TupList((t, p) for t in tasks for p in periods) # we filter the starts that are too late to be possible: JK = jk_all.vfilter(lambda x: x[1] + duration[x[0]] <= C_max) # we create a set of tasks that can start at time period k K_j = JK.to_dict(result_col=1) # all combinations (t, p, p2) such that I start a task j # in time period k and is active in period k2 jkk2 = pt.TupList( (j, k, k2) for j, k in JK for k2 in range(k, k + duration[j])) # given a period k2, what starts affect make it unavailable: JK_k2 = jkk2.to_dict(result_col=[0, 1])