Exemplo n.º 1
0
 def loadOriginal(self):
     self.origianl_orders, self.origianl_days = {}, {}
     df = pd.read_csv("haikou-experiments/network/combined_0.csv")
     for j in range(df.shape[0]):
         self.origianl_orders[getID(df["start_ver"][j],
                                    df["end_ver"][j])] = df["num"][j]
         self.origianl_days[getID(df["start_ver"][j],
                                  df["end_ver"][j])] = df["days"][j]
Exemplo n.º 2
0
 def getEdges(route):
     '''获得全部的边'''
     edges = []
     for i in range(len(route) - 1):
         combined_id = getID(route[i], route[i + 1])
         edges.append(ALL_EDGES_DIC[combined_id]["id"])
     return edges
Exemplo n.º 3
0
 def possibleOD(self):
     '''Load all origin-destination'''
     ODs_df = pd.read_csv("haikou-experiments/network/ODs_combined.csv")
     self.possible_ODs = {}
     for i in range(ODs_df.shape[0]):
         self.possible_ODs[getID(ODs_df["start_ver"][i],
                                 ODs_df["end_ver"][i])] = i
    def compareResult(self):
        '''比较预测结果'''
        period_index = 0
        prediction_df = pd.read_csv("haikou-experiments/results/PREDICTION_OD_%s_PERIOD_0_SAMPLE_15_TENDENCY_1.00.csv" % (self.OD_num))
        all_P_w,all_l_w,all_e_w = {},{},{}
        for i in range(prediction_df.shape[0]):
            combined_id = getID(prediction_df["start_ver"][i],prediction_df["end_ver"][i])
            all_P_w[combined_id] = prediction_df["P_w"][i]
            all_l_w[combined_id] = prediction_df["l_w"][i]
            all_e_w[combined_id] = prediction_df["e_w"][i]

        output_path = "haikou-experiments/results/COMPARISON_SAMPLE_%s_TENDENCY_%.2f.csv" % (self.min_sample,self.tendency)
        with open(output_path,"w") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(["start_ver","end_ver","distance","lambda","original_num","original_days","num","days","P_w","l_w","e_w","matching_probability","aver_final_distance", \
                "aver_shared_distance","P_w_err","l_w_err","e_w_err","P_w_err_ratio","l_w_err_ratio","e_w_err_ratio"])

        ODs_df = pd.read_csv("haikou-experiments/network/combined_%s.csv"%(period_index))
        all_lambda = {}
        for i in range(ODs_df.shape[0]):
            if i >= ODs_df.shape[0]: break
            if ODs_df["start_ver"][i] == ODs_df["end_ver"][i]: continue
            if ODs_df["num"][i] == 0: break
            combined_id = getID(ODs_df["start_ver"][i],ODs_df["end_ver"][i])
            all_lambda[combined_id] = ODs_df["num"][i]/(PERIODS_MINUTES[period_index]*40)

        simulation_df = pd.read_csv("haikou-experiments/results/SIMULATION_STATISTIC.csv")
        for i in range(simulation_df.shape[0]):
            new_key = getID(simulation_df["start_ver"][i],simulation_df["end_ver"][i])
            if new_key in all_P_w:
                with open(output_path,"a") as csvfile:
                    writer = csv.writer(csvfile)
                    distance = Schedule.distanceByHistory(simulation_df["start_ver"][i], simulation_df["end_ver"][i])
                    l_w_err_ratio = abs(simulation_df["aver_final_distance%s"%period_index][i]-all_l_w[new_key])/simulation_df["aver_final_distance%s"%period_index][i]
                    if simulation_df["matching_probability%s"%period_index][i] > 0:
                        P_w_err_ratio = abs(simulation_df["matching_probability%s"%period_index][i]-all_P_w[new_key])/simulation_df["matching_probability%s"%period_index][i]
                        e_w_err_ratio = abs(simulation_df["aver_shared_distance%s"%period_index][i]-all_e_w[new_key])/simulation_df["aver_shared_distance%s"%period_index][i]
                    else:
                        P_w_err_ratio,e_w_err_ratio = 0,0
                    writer.writerow([simulation_df["start_ver"][i], simulation_df["end_ver"][i], distance, all_lambda[new_key], \
                        simulation_df["original_num"][i], simulation_df["original_days"][i], \
                        simulation_df["num%s"%period_index][i], simulation_df["days%s"%period_index][i], all_P_w[new_key], \
                        all_l_w[new_key], all_e_w[new_key],simulation_df["matching_probability%s"%period_index][i],simulation_df["aver_final_distance%s"%period_index][i], \
                        simulation_df["aver_shared_distance%s"%period_index][i], abs(simulation_df["matching_probability%s"%period_index][i]-all_P_w[new_key]), \
                        abs(simulation_df["aver_final_distance%s"%period_index][i]-all_l_w[new_key]), abs(simulation_df["aver_shared_distance%s"%period_index][i]-all_e_w[new_key]), \
                        P_w_err_ratio,l_w_err_ratio,e_w_err_ratio])
def getperiodsIndex(all_periods):
    time_dics = {}
    for i,period in enumerate(all_periods):
        for hour in range(period["from"]["hour"],period["to"]["hour"]+1):
            min_minute,max_minute = 0,60
            if hour == period["from"]["hour"]: min_minute = period["from"]["minute"]
            if hour == period["to"]["hour"]: max_minute = period["to"]["minute"]
            for minute in range(min_minute,max_minute):
                time_dics[getID(hour,minute)] = i
    return time_dics
Exemplo n.º 6
0
    def loadODs(self):
        lambda_df = pd.read_csv(
            "haikou-experiments/network/combined_0.csv")  # demand rates
        ODs_df = pd.read_csv(
            "haikou-experiments/matching_relationship/ODs.csv")  # OD
        self.all_ODs = {}
        bar = progressbar.ProgressBar(widgets=[
            "ODs Loading:",
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        for j in bar(range(self.max_OD_ID)):
            if j >= lambda_df.shape[0]: break
            if lambda_df["days"][j] <= self.min_samples: break
            combined_id = getID(lambda_df["start_ver"][j],
                                lambda_df["end_ver"][j])
            i = self.OD_dic[combined_id]["line_id"]
            self.all_ODs[ODs_df["id"][i]] = {
                "OD_id":
                ODs_df["id"][i],
                "start_ver":
                ODs_df["start_ver"][i],
                "end_ver":
                ODs_df["end_ver"][i],
                "num":
                lambda_df["num"][j],
                "taker_keys":
                json.loads(ODs_df["taker_keys"][i]),
                "seeker_keys":
                json.loads(ODs_df["seeker_keys"][i]),
                "lam_w":
                lambda_df["num"][j] * self.tendency /
                (PERIODS_MINUTES[self.HOUR_INDEX] * 40)
            }

        print("#############Experiments Setting##############")
        print(
            "Experiments Period: %02s:%02s  - %02s:%02s" %
            (PERIODS[self.HOUR_INDEX][0], PERIODS[self.HOUR_INDEX][1],
             PERIODS[self.HOUR_INDEX + 1][0], PERIODS[self.HOUR_INDEX + 1][1]))
        print("Search Distance: %s " % (MAX_SEARCH_LAYERS * 500))
        print("MAX OD ID: %s" % self.max_OD_ID)
        print("Feasible OD: %s" % len(self.all_ODs))
Exemplo n.º 7
0
 def countNewOD(self, start_ver, end_ver):
     _id = getID(start_ver, end_ver)
     self.count[_id] = {
         "num": 0,
         "start_ver": start_ver,
         "end_ver": end_ver,
         "matching_num": 0,
         "total_shared_distance": 0,
         "total_final_distance": 0,
         "period_num": [0 for _ in range(len(ALL_PERIODS))],
         "period_matching_num": [0 for _ in range(len(ALL_PERIODS))],
         "period_shared_distance": [0 for _ in range(len(ALL_PERIODS))],
         "period_final_distance": [0 for _ in range(len(ALL_PERIODS))]
     }
     self.all_start_ver.append(start_ver)
     self.all_end_ver.append(end_ver)
Exemplo n.º 8
0
 def generateLine(self, cur_line, history_df):
     '''Generate one line in csv'''
     original_start_ver, original_end_ver = history_df["depature_ver"][
         cur_line], history_df["arrive_ver"][cur_line]
     start_ver, end_ver = history_df["combined_depature_ver"][
         cur_line], history_df["combined_arrive_ver"][cur_line]
     combined_id = getID(start_ver, end_ver)
     if random.random() > self.tendency or (
             start_ver == end_ver and self.COMBINED_OD == True) or (
                 original_start_ver == original_end_ver and self.COMBINED_OD
                 == False) or combined_id not in self.possible_ODs:
         yield self.env.timeout(0.0000000000001)
     else:
         real_start_time = history_df["depature_time"][cur_line]
         self.env.process(
             self.passenger([
                 start_ver, end_ver, original_start_ver, original_end_ver,
                 real_start_time
             ], self.possible_ODs[combined_id]))
Exemplo n.º 9
0
 def getNeighbor(vertex):
     '''获得某个顶点邻接边'''
     search_layer, neighbor_edges, neighbor_points = 0, [], []
     current_vertex = [vertex]
     while search_layer < MAX_SEARCH_LAYERS:
         temp_current_vertex = []
         for search_ver in current_vertex:
             temp_current_vertex = temp_current_vertex + ALL_VERTEXES[
                 search_ver]["front_ver"]
             for ver in ALL_VERTEXES[search_ver]["front_ver"]:
                 dic_key = getID(ver, search_ver)
                 if dic_key not in ALL_EDGES_DIC.keys(): continue
                 neighbor_edges.append(ALL_EDGES_DIC[dic_key]["id"])
                 neighbor_points.append(ver)
         current_vertex = deepcopy(temp_current_vertex)
         search_layer = search_layer + 1
     # print("neighbor_points", vertex, neighbor_points)
     # print("neighbor_edges", vertex, neighbor_edges)
     return Schedule.delExist(neighbor_edges), Schedule.delExist(
         neighbor_points)
Exemplo n.º 10
0
 def loadODDic(self):
     df = pd.read_csv("haikou-experiments/matching_relationship/ODs.csv")
     self.OD_dic = {}
     bar = progressbar.ProgressBar(widgets=[
         "OD Dic Loading:",
         progressbar.Percentage(),
         ' (',
         progressbar.SimpleProgress(),
         ') ',
         ' (',
         progressbar.AbsoluteETA(),
         ') ',
     ])
     for i in range(df.shape[0]):
         if i > self.max_OD_ID: break
         combined_id = getID(df["start_ver"][i], df["end_ver"][i])
         self.OD_dic[combined_id] = {
             "line_id": i,
             "start_ver": df["start_ver"][i],
             "end_ver": df["end_ver"][i]
         }
import json
import pandas as pd
from basis.assistant import getID

ALL_EDGES, ALL_EDGES_DIC = {}, {}

data = pd.read_csv("haikou-experiments/network/all_edges.csv")
for i in range(data.shape[0]):
    ALL_EDGES[data["id"][i]] = {
        "block": json.loads(data["block"][i]),
        "length": data["length"][i],
        "class": data["class"][i],
        "head_ver": data["head_ver"][i],
        "tail_ver": data["tail_ver"][i],
        "polyline": json.loads(data["polyline"][i])
    }
    combined_id = getID(data["head_ver"][i], data["tail_ver"][i])
    ALL_EDGES_DIC[combined_id] = {
        "id": data["id"][i],
        "block": json.loads(data["block"][i]),
        "length": data["length"][i],
        "class": data["class"][i],
        "head_ver": data["head_ver"][i],
        "tail_ver": data["tail_ver"][i]
    }
print("Load all edges")
Exemplo n.º 12
0
    def computeOneDay(self, today_data, cur_date):
        self.count = {}
        for i in today_data.index:
            time_id = getID(today_data["real_start_time"][i].hour,
                            today_data["real_start_time"][i].minute)
            if time_id not in TIME_DICS: continue
            period_index = TIME_DICS[time_id]
            if self.combined == True:
                start_ver, end_ver = today_data["start_ver"][i], today_data[
                    "end_ver"][i]
            else:
                start_ver, end_ver = today_data["orginal_start_ver"][
                    i], today_data["orginal_end_ver"][i]
            _id = getID(start_ver, end_ver)
            if _id not in self.all_ODs:
                self.all_ODs[_id] = [{
                    "start_ver": start_ver,
                    "end_ver": end_ver,
                    "num": [],
                    "matching_num": [],
                    "matching_probability": [],
                    "aver_shared_distance": [],
                    "aver_final_distance": []
                } for _ in range(len(PERIODS_MINUTES))]
            if _id not in self.count:
                self.countNewOD(start_ver, end_ver)
            if today_data["matching_or"][i] == 1:
                self.count[_id]["period_shared_distance"][
                    period_index] += today_data["shared_distance"][i]
                self.count[_id]["period_matching_num"][
                    period_index] = self.count[_id]["period_matching_num"][
                        period_index] + 1
            self.count[_id]["period_final_distance"][
                period_index] += today_data["final_distance"][i]
            self.count[_id]["period_num"][
                period_index] = self.count[_id]["period_num"][period_index] + 1

        sta_day = []
        for period_index in range(len(PERIODS_MINUTES)):
            sta_day.append({})
            for key in self.count:
                sta_day[period_index][key] = {}
                sta_day[period_index][key]["num"] = self.count[key][
                    "period_num"][period_index]
                sta_day[period_index][key]["matching_num"] = self.count[key][
                    "period_matching_num"][period_index]
                if self.count[key]["period_num"][period_index] == 0:
                    sta_day[period_index][key]["matching_probability"] = 0
                    sta_day[period_index][key]["aver_shared_distance"] = 0
                    sta_day[period_index][key]["aver_final_distance"] = 0
                else:
                    sta_day[period_index][key][
                        "matching_probability"] = self.count[key][
                            "period_matching_num"][period_index] / self.count[
                                key]["period_num"][period_index]
                    sta_day[period_index][key][
                        "aver_shared_distance"] = self.count[key][
                            "period_shared_distance"][
                                period_index] / self.count[key]["period_num"][
                                    period_index]
                    sta_day[period_index][key][
                        "aver_final_distance"] = self.count[key][
                            "period_final_distance"][period_index] / self.count[
                                key]["period_num"][period_index]

        return sta_day
Exemplo n.º 13
0
    def computeByDay(self):
        self.loadOriginal()
        exp_res = pd.read_csv(
            "haikou-experiments/results/SIMULATION_RESULTS_ALL_DIDI_CHUXING_HAIKOU.csv"
        )
        exp_res["real_start_time"] = pd.to_datetime(exp_res["real_start_time"])
        self.all_ODs = {}
        bar = progressbar.ProgressBar(widgets=[
            'Days ',
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        all_days_str = exp_res["real_start_date"].unique()
        all_days = []
        print("共计天数:", len(all_days_str))
        for cur_date in bar(all_days_str):
            if self.date_week[cur_date] >= 5: continue
            sta_res = self.computeOneDay(
                exp_res[exp_res["real_start_date"] == cur_date], cur_date)
            all_days.append(sta_res)

        for sta_day in all_days:
            for period_index in range(len(PERIODS_MINUTES)):
                for key in sta_day[period_index].keys():
                    if sta_day[period_index][key]["num"] == 0: continue
                    self.all_ODs[key][period_index]["num"].append(
                        sta_day[period_index][key]["num"])
                    self.all_ODs[key][period_index]["matching_num"].append(
                        sta_day[period_index][key]["matching_num"])
                    self.all_ODs[key][period_index][
                        "matching_probability"].append(
                            sta_day[period_index][key]["matching_probability"])
                    self.all_ODs[key][period_index][
                        "aver_shared_distance"].append(
                            sta_day[period_index][key]["aver_shared_distance"])
                    self.all_ODs[key][period_index][
                        "aver_final_distance"].append(
                            sta_day[period_index][key]["aver_final_distance"])

        with open("haikou-experiments/results/SIMULATION_STATISTIC.csv",
                  "w") as csvfile:
            writer = csv.writer(csvfile)
            row = ["start_ver", "end_ver", "original_num", "original_days"]
            for i in range(len(PERIODS_MINUTES)):
                row += [
                    "num%s" % i,
                    "matching_num%s" % i,
                    "days%s" % i,
                    "matching_probability%s" % i,
                    "aver_shared_distance%s" % i,
                    "aver_final_distance%s" % i
                ]
            writer.writerow(row)
            for i, key in enumerate(self.all_ODs.keys()):
                combined_id = getID(self.all_ODs[key][0]["start_ver"],
                                    self.all_ODs[key][0]["end_ver"])
                if combined_id not in self.origianl_days: continue
                detail = [
                    self.all_ODs[key][0]["start_ver"],
                    self.all_ODs[key][0]["end_ver"],
                    self.origianl_orders[combined_id],
                    self.origianl_days[combined_id]
                ]
                for j in range(len(PERIODS_MINUTES)):
                    detail += [sum(self.all_ODs[key][j]["num"]),sum(self.all_ODs[key][j]["matching_num"]),len(self.all_ODs[key][j]["num"]),\
                        np.mean(self.all_ODs[key][j]["matching_probability"]), np.mean(self.all_ODs[key][j]["aver_shared_distance"]),\
                            np.mean(self.all_ODs[key][j]["aver_final_distance"])]
                writer.writerow(detail)