コード例 #1
0
ファイル: examples.py プロジェクト: rlconsult/python-progress
def eta_types_demonstration():
    widgets = [
        progressbar.Percentage(),
        ' ETA: ',
        progressbar.ETA(),
        ' Adaptive ETA: ',
        progressbar.AdaptiveETA(),
        ' Absolute ETA: ',
        progressbar.AbsoluteETA(),
        ' Transfer Speed: ',
        progressbar.FileTransferSpeed(),
        ' Adaptive Transfer Speed: ',
        progressbar.AdaptiveTransferSpeed(),
        ' ',
        progressbar.Bar(),
    ]
    bar = progressbar.ProgressBar(widgets=widgets, max_value=500)
    bar.start()
    for i in range(500):
        if i < 100:
            time.sleep(0.02)
        elif i > 400:
            time.sleep(0.1)
        else:
            time.sleep(0.01)
        bar.update(i + 1)
    bar.finish()
コード例 #2
0
def test_all_widgets_small_values(max_value):
    widgets = [
        progressbar.Timer(),
        progressbar.ETA(),
        progressbar.AdaptiveETA(),
        progressbar.AbsoluteETA(),
        progressbar.DataSize(),
        progressbar.FileTransferSpeed(),
        progressbar.AdaptiveTransferSpeed(),
        progressbar.AnimatedMarker(),
        progressbar.Counter(),
        progressbar.Percentage(),
        progressbar.FormatLabel('%(value)d'),
        progressbar.SimpleProgress(),
        progressbar.Bar(),
        progressbar.ReverseBar(),
        progressbar.BouncingBar(),
        progressbar.CurrentTime(),
        progressbar.CurrentTime(microseconds=False),
        progressbar.CurrentTime(microseconds=True),
    ]
    p = progressbar.ProgressBar(widgets=widgets, max_value=max_value)
    for i in range(10):
        time.sleep(1)
        p.update(i + 1)
    p.finish()
コード例 #3
0
def test_all_widgets_max_width(max_width, term_width):
    widgets = [
        progressbar.Timer(max_width=max_width),
        progressbar.ETA(max_width=max_width),
        progressbar.AdaptiveETA(max_width=max_width),
        progressbar.AbsoluteETA(max_width=max_width),
        progressbar.DataSize(max_width=max_width),
        progressbar.FileTransferSpeed(max_width=max_width),
        progressbar.AdaptiveTransferSpeed(max_width=max_width),
        progressbar.AnimatedMarker(max_width=max_width),
        progressbar.Counter(max_width=max_width),
        progressbar.Percentage(max_width=max_width),
        progressbar.FormatLabel('%(value)d', max_width=max_width),
        progressbar.SimpleProgress(max_width=max_width),
        progressbar.Bar(max_width=max_width),
        progressbar.ReverseBar(max_width=max_width),
        progressbar.BouncingBar(max_width=max_width),
        progressbar.FormatCustomText('Custom %(text)s',
                                     dict(text='text'),
                                     max_width=max_width),
        progressbar.DynamicMessage('custom', max_width=max_width),
        progressbar.CurrentTime(max_width=max_width),
    ]
    p = progressbar.ProgressBar(widgets=widgets, term_width=term_width)
    p.update(0)
    p.update()
    for widget in p._format_widgets():
        if max_width and max_width < term_width:
            assert widget == ''
        else:
            assert widget != ''
コード例 #4
0
def test_all_widgets_large_values(max_value):
    widgets = [
        progressbar.Timer(),
        progressbar.ETA(),
        progressbar.AdaptiveETA(),
        progressbar.AbsoluteETA(),
        progressbar.DataSize(),
        progressbar.FileTransferSpeed(),
        progressbar.AdaptiveTransferSpeed(),
        progressbar.AnimatedMarker(),
        progressbar.Counter(),
        progressbar.Percentage(),
        progressbar.FormatLabel('%(value)d/%(max_value)d'),
        progressbar.SimpleProgress(),
        progressbar.Bar(fill=lambda progress, data, width: '#'),
        progressbar.ReverseBar(),
        progressbar.BouncingBar(),
        progressbar.FormatCustomText('Custom %(text)s', dict(text='text')),
    ]
    p = progressbar.ProgressBar(widgets=widgets, max_value=max_value)
    p.update()
    time.sleep(1)
    p.update()

    for i in range(0, 10**6, 10**4):
        time.sleep(1)
        p.update(i)
コード例 #5
0
def eta():
    widgets = [
        'Test: ', progressbar.Percentage(),
        ' | ETA: ', progressbar.ETA(),
        ' | AbsoluteETA: ', progressbar.AbsoluteETA(),
        ' | AdaptiveETA: ', progressbar.AdaptiveETA(),
    ]
    bar = progressbar.ProgressBar(widgets=widgets, max_value=50).start()
    for i in range(50):
        time.sleep(0.1)
        bar.update(i + 1)
    bar.finish()
コード例 #6
0
    def loadODs(self):
        lambda_df = pd.read_csv(
            "haikou-experiments/network/combined_0.csv")  # demand rates
        ODs_df = pd.read_csv(
            "haikou-experiments/matching_relationship/ODs.csv")  # OD
        self.all_ODs = {}
        bar = progressbar.ProgressBar(widgets=[
            "ODs Loading:",
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        for j in bar(range(self.max_OD_ID)):
            if j >= lambda_df.shape[0]: break
            if lambda_df["days"][j] <= self.min_samples: break
            combined_id = getID(lambda_df["start_ver"][j],
                                lambda_df["end_ver"][j])
            i = self.OD_dic[combined_id]["line_id"]
            self.all_ODs[ODs_df["id"][i]] = {
                "OD_id":
                ODs_df["id"][i],
                "start_ver":
                ODs_df["start_ver"][i],
                "end_ver":
                ODs_df["end_ver"][i],
                "num":
                lambda_df["num"][j],
                "taker_keys":
                json.loads(ODs_df["taker_keys"][i]),
                "seeker_keys":
                json.loads(ODs_df["seeker_keys"][i]),
                "lam_w":
                lambda_df["num"][j] * self.tendency /
                (PERIODS_MINUTES[self.HOUR_INDEX] * 40)
            }

        print("#############Experiments Setting##############")
        print(
            "Experiments Period: %02s:%02s  - %02s:%02s" %
            (PERIODS[self.HOUR_INDEX][0], PERIODS[self.HOUR_INDEX][1],
             PERIODS[self.HOUR_INDEX + 1][0], PERIODS[self.HOUR_INDEX + 1][1]))
        print("Search Distance: %s " % (MAX_SEARCH_LAYERS * 500))
        print("MAX OD ID: %s" % self.max_OD_ID)
        print("Feasible OD: %s" % len(self.all_ODs))
コード例 #7
0
def test_widgets_large_values(max_value):
    widgets = [
        'Test: ',
        progressbar.Percentage(),
        ' ',
        progressbar.Bar(marker=progressbar.RotatingMarker()),
        ' ',
        progressbar.ETA(),
        ' ',
        progressbar.AbsoluteETA(),
        ' ',
        progressbar.FileTransferSpeed(),
    ]
    p = progressbar.ProgressBar(widgets=widgets, max_value=max_value).start()
    for i in range(0, 10**6, 10**4):
        time.sleep(1)
        p.update(i + 1)
    p.finish()
def saveResponseContent(response, destination, CHUNK_SIZE):
    with open(destination, "wb") as f:
        bar = progressbar.ProgressBar(max_value=CHUNK_SIZE,
                                      widgets=[
                                          "Datasets Downloading:",
                                          progressbar.Percentage(),
                                          ' (',
                                          progressbar.SimpleProgress(),
                                          ') ',
                                          ' (',
                                          progressbar.AbsoluteETA(),
                                          ') ',
                                      ])
        i = 0
        for chunk in bar(response.iter_content(CHUNK_SIZE)):
            if chunk:
                f.write(chunk)
            i = 1 + i
            bar.update(i)
コード例 #9
0
def test_widgets_small_values():
    widgets = [
        'Test: ',
        progressbar.Percentage(),
        ' ',
        progressbar.Bar(marker=progressbar.RotatingMarker()),
        ' ',
        progressbar.ETA(),
        ' ',
        progressbar.AbsoluteETA(),
        ' ',
        progressbar.FileTransferSpeed(),
    ]
    p = progressbar.ProgressBar(widgets=widgets, max_value=10).start()
    p.update(0)
    for i in range(10):
        time.sleep(0.001)
        p.update(i + 1)
    p.finish()
コード例 #10
0
 def generateByHistory(self):
     '''Run simulation experiments based on data provided by Didi Chuxing'''
     self.csv_path = "haikou-experiments/results/SIMULATION_RESULTS_ALL_DIDI_CHUXING_HAIKOU.csv"
     with open(self.csv_path, "w") as csvfile:
         writer = csv.writer(csvfile)
         writer.writerow([
             "passenger_id", "real_start_date", "real_start_time",
             "start_time", "end_time", "OD_id", "start_ver", "end_ver",
             "orginal_start_ver", "orginal_end_ver", "original_distance",
             "final_distance", "matching_or", "shared_distance", "detour",
             "gap", "matching_id", "matching_type", "matching_ver", ""
         ])
     history_df = pd.read_csv(
         "haikou-experiments/datasets/DATASETS_DIDI_CHUXING_HAIKOU.csv")
     history_df["depature_time"] = pd.to_datetime(
         history_df['depature_time'])
     cur_line = 1
     print(
         "The simulation result is stored in results/SIMULATION_RESULTS_ALL_DIDI_CHUXING_HAIKOU.csv"
     )
     bar = progressbar.ProgressBar(max_value=history_df.shape[0],
                                   widgets=[
                                       "Simulation:",
                                       progressbar.Percentage(),
                                       ' (',
                                       progressbar.SimpleProgress(),
                                       ') ',
                                       ' (',
                                       progressbar.AbsoluteETA(),
                                       ') ',
                                   ])
     while True:
         while history_df["depature_time"][cur_line].minute == history_df[
                 "depature_time"][cur_line - 1].minute:
             self.env.process(self.generateLine(cur_line, history_df))
             cur_line = cur_line + 1
             if cur_line >= history_df.shape[0]: break
         if cur_line >= history_df.shape[0]: break
         yield self.env.timeout(1)
         self.env.process(self.generateLine(cur_line, history_df))
         cur_line = cur_line + 1
         bar.update(cur_line)
         if cur_line >= history_df.shape[0]: break
コード例 #11
0
def example29():
    """
    Display progress bar using tqdm.

    >>> example29()
    True
    """
    widgets = [
        "Test: ",
        progressbar.Percentage(),
        " | ",
        progressbar.ETA(),
        " | ",
        progressbar.AbsoluteETA(),
    ]
    pbar = progressbar.ProgressBar(widgets=widgets, maxval=500).start()
    for i in range(500):
        sleep(0.01)
        pbar.update(i + 1)
    pbar.finish()
    return True
コード例 #12
0
 def loadODDic(self):
     df = pd.read_csv("haikou-experiments/matching_relationship/ODs.csv")
     self.OD_dic = {}
     bar = progressbar.ProgressBar(widgets=[
         "OD Dic Loading:",
         progressbar.Percentage(),
         ' (',
         progressbar.SimpleProgress(),
         ') ',
         ' (',
         progressbar.AbsoluteETA(),
         ') ',
     ])
     for i in range(df.shape[0]):
         if i > self.max_OD_ID: break
         combined_id = getID(df["start_ver"][i], df["end_ver"][i])
         self.OD_dic[combined_id] = {
             "line_id": i,
             "start_ver": df["start_ver"][i],
             "end_ver": df["end_ver"][i]
         }
コード例 #13
0
def test_all_widgets_large_values():
    widgets = [
        progressbar.Timer(),
        progressbar.ETA(),
        progressbar.AdaptiveETA(),
        progressbar.AbsoluteETA(),
        progressbar.FileTransferSpeed(),
        progressbar.AdaptiveTransferSpeed(),
        progressbar.AnimatedMarker(),
        progressbar.Counter(),
        progressbar.Percentage(),
        progressbar.FormatLabel('%(value)d/%(max_value)d'),
        progressbar.SimpleProgress(),
        progressbar.Bar(fill=lambda progress, data, width: '#'),
        progressbar.ReverseBar(),
        progressbar.BouncingBar(),
    ]
    p = progressbar.ProgressBar(widgets=widgets, max_value=10 ** 6)
    for i in range(0, 10 ** 6, 10 ** 4):
        time.sleep(0.001)
        p.update(i + 1)
    p.finish()
コード例 #14
0
# директория результирующим датасетом
dir_dataset = 'train'

# текстовый файл с данными по классам
file_with_classnames = r'config\voc_annotation.txt'
# размер изображения
image_size = {'height': 1024, 'width': 512}
# размер матрицы
matrix_size = (4, 4)
# размер ячейки матрицы
step = {
    'height': image_size['height'] // matrix_size[0],
    'width': image_size['width'] // matrix_size[1]
}
# директория с результирующими изображениями
dir_pix_img = r'output_data\img'
# директория с результирующей xml разметкой
dir_pix_xml = r'output_data\xml'
# директория с изначальными изображениями классов
dir_or = r'input_data\png_boxes_original'
# директория с преобразованными изображениями классов
dir_jpg = r'output_data\png_boxes'
# параметры для настройки загрузчика
widgets = [
    progressbar.Percentage(),
    progressbar.Bar(left='|', marker='█', right='|'),  # Прогресс
    progressbar.AnimatedMarker(),
    progressbar.Timer(format=' Current time: %(elapsed)s '),
    progressbar.AbsoluteETA(format='| Est. finish: %(eta)s', )
]
コード例 #15
0
    def predictResults(self, final):
        index = self.iterate_time % 2
        starttime = datetime.datetime.now()
        matching_probability = {}
        G_n, all_P_w = {}, {}
        bar = progressbar.ProgressBar(widgets=[
            'Probability: ',
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        for i in bar(self.all_ODs.keys()):
            start_seeker = self.all_ODs[i]["seeker_keys"][0]
            start_taker = self.ALL_SEEKERS[start_seeker]["sub_taker_key"]
            P_A_w = self.P_seeker[start_seeker][index]
            P_B_w = (1 - self.P_seeker[start_seeker][index]
                     ) * self.P_taker[start_taker][index]

            G_n[start_seeker] = 1
            last_seeker_key = start_seeker
            last_segment_key = self.ALL_SEEKERS[last_seeker_key][
                "sub_taker_key"]
            for j in self.all_ODs[i]["seeker_keys"][1:]:
                G_n[j] = G_n[last_seeker_key] * (
                    1 - self.P_seeker[last_seeker_key][index]) * (
                        1 - self.P_taker[last_segment_key][index])
                last_seeker_key, last_segment_key = j, self.ALL_SEEKERS[j][
                    "sub_taker_key"]
            P_w = 1 - G_n[j]
            P_C_w = P_w - P_A_w - P_B_w
            matching_probability[i] = [P_w, P_A_w, P_B_w, P_C_w]
            all_P_w[i] = P_w

        # 预测拼车距离和共享距离
        all_l_w, all_e_w = {}, {}
        bar = progressbar.ProgressBar(widgets=[
            'Distance: ',
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        for i in bar(self.all_ODs.keys()):
            l_w_0 = Schedule.distanceByHistory(self.all_ODs[i]["start_ver"],
                                               self.all_ODs[i]["end_ver"])
            all_l_n_0, all_e_n_0 = [], []
            for seeker in self.all_ODs[i]["seeker_keys"]:
                if self.ALL_SEEKERS[seeker]["matching_takers"] != []:
                    l_n_0, e_n_0 = 0, 0
                    overall_denominator = 0
                    for j in range(
                            len(self.ALL_SEEKERS[seeker]["matching_takers"])):
                        matching_takers = self.ALL_SEEKERS[seeker][
                            "matching_takers"][j]
                        detour = self.ALL_SEEKERS[seeker]["all_detour"][j]
                        shared_distance = self.ALL_SEEKERS[seeker][
                            "all_shared_distance"][j]
                        l_n_s, e_n_s = l_w_0 + detour, shared_distance

                        multiplier = self.eta_taker_seeker[matching_takers][
                            seeker][index] * self.rho_taker[matching_takers][
                                index]
                        overall_denominator = overall_denominator + multiplier
                        l_n_0, e_n_0 = l_n_0 + multiplier * l_n_s, e_n_0 + multiplier * e_n_s

                    all_l_n_0.append(l_n_0 /
                                     overall_denominator), all_e_n_0.append(
                                         e_n_0 / overall_denominator)
                else:
                    all_l_n_0.append(l_w_0), all_e_n_0.append(0)

            # 路段中的距离计算
            all_l_s_1, all_e_s_1 = [], []
            for taker in self.all_ODs[i]["taker_keys"]:
                l_s_1, e_s_1 = [], []
                if self.ALL_TAKERS[taker]["matching_seekers"] != []:
                    l_s_1, e_s_1 = 0, 0
                    for j in range(
                            len(self.ALL_TAKERS[taker]["matching_seekers"])):
                        matching_seekers = self.ALL_TAKERS[taker][
                            "matching_seekers"][j]
                        detour = self.ALL_TAKERS[taker]["all_detour"][j]
                        shared_distance = self.ALL_TAKERS[taker][
                            "all_shared_distance"][j]
                        l_n_s, e_n_s = l_w_0 + detour, shared_distance
                        l_s_1 = l_s_1 + self.eta_taker_seeker[taker][
                            matching_seekers][index] * l_n_s
                        e_s_1 = e_s_1 + self.eta_taker_seeker[taker][
                            matching_seekers][index] * e_n_s
                    all_l_s_1.append(
                        l_s_1 /
                        self.eta_taker[taker][index]), all_e_s_1.append(
                            e_s_1 / self.eta_taker[taker][index])
                else:
                    all_l_s_1.append(l_w_0), all_e_s_1.append(0)

            # 综合的期望值计算
            l_w = l_w_0 * (1 - all_P_w[i])
            e_w = 0
            for j in range(len(self.all_ODs[i]["seeker_keys"]) - 1):
                seeker_key = self.all_ODs[i]["seeker_keys"][j]
                segment_key = self.ALL_SEEKERS[seeker_key]["sub_taker_key"]
                l_w = l_w + all_l_n_0[j] * G_n[seeker_key] * self.P_seeker[
                    seeker_key][index] + all_l_s_1[j] * G_n[seeker_key] * (
                        1 - self.P_seeker[seeker_key][index]
                    ) * self.P_taker[segment_key][index]
                e_w = e_w + all_e_n_0[j] * G_n[seeker_key] * self.P_seeker[
                    seeker_key][index] + all_e_s_1[j] * G_n[seeker_key] * (
                        1 - self.P_seeker[seeker_key][index]
                    ) * self.P_taker[segment_key][index]
            all_l_w[i] = l_w
            all_e_w[i] = e_w
        endtime = datetime.datetime.now()
        print("Execution Time: %s second" % (endtime - starttime))

        fo = open("haikou-experiments/results/experiments_log.txt", "a+")
        fo.write("Execution Time: %s second\n\n" % (endtime - starttime))
        fo.close()

        with open(
                "haikou-experiments/results/PREDICTION_OD_%s_PERIOD_%s_SAMPLE_%s_TENDENCY_%.2f.csv"
                %
            (self.max_OD_ID, self.HOUR_INDEX, self.min_samples, self.tendency),
                "w") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(
                ["OD_id", "start_ver", "end_ver", "num", "P_w", "l_w", "e_w"])
            for i in self.all_ODs.keys():
                writer.writerow([
                    self.all_ODs[i]["OD_id"], self.all_ODs[i]["start_ver"],
                    self.all_ODs[i]["end_ver"], self.all_ODs[i]["num"],
                    all_P_w[i], all_l_w[i], all_e_w[i]
                ])
コード例 #16
0
    def loadSeekerTaker(self):
        self.ALL_SEEKERS = {}
        self.ALL_TAKERS = {}
        seekers_df = pd.read_csv(
            "haikou-experiments/matching_relationship/seekers.csv")
        takers_df = pd.read_csv(
            "haikou-experiments/matching_relationship/takers.csv")

        bar = progressbar.ProgressBar(widgets=[
            "Seeker Loading:",
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        self.all_seeker_keys = []
        for i in bar(range(seekers_df.shape[0])):
            if seekers_df["OD_id"][i] not in self.all_ODs: continue
            self.ALL_SEEKERS[i] = {
                "seeker_id": seekers_df["seeker_id"][i],
                "vertex_id": seekers_df["vertex_id"][i],
                "OD_id": seekers_df["OD_id"][i],
                "type": seekers_df["type"][i],
                "sub_taker_key": seekers_df["sub_taker_key"][i],
            }
            self.all_seeker_keys.append(i)
        self.all_seeker_num = len(self.all_seeker_keys)

        bar = progressbar.ProgressBar(widgets=[
            "Taker Loading:",
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        self.all_taker_keys = []
        for i in bar(range(takers_df.shape[0])):
            if takers_df["OD_id"][i] not in self.all_ODs: continue
            original_matching_seekers = json.loads(
                takers_df["matching_seekers"][i])
            original_shared_distance = json.loads(
                takers_df["shared_distance"][i])
            original_detour = json.loads(takers_df["detour"][i])
            matching_seekers, all_shared_distance, all_detour = self.getFeasibleSeekers(
                original_matching_seekers, original_shared_distance,
                original_detour)
            self.ALL_TAKERS[i] = {
                "taker_id": takers_df["taker_id"][i],
                "edge_id": json.loads(takers_df["edge_id"][i]),
                "OD_id": takers_df["OD_id"][i],
                "type": takers_df["type"][i],
                "length": takers_df["length"][i],
                "matching_seekers": matching_seekers,
                "all_shared_distance": all_shared_distance,
                "all_detour": all_detour,
            }
            self.all_taker_keys.append(i)
        self.all_taker_num = len(self.all_taker_keys)
        for i in self.ALL_SEEKERS.keys():
            matching_takers, all_shared_distance, all_detour = self.getFeasibleTakers(json.loads(seekers_df["matching_takers"][i]),\
                json.loads(seekers_df["shared_distance"][i]),json.loads(seekers_df["detour"][i]))
            self.ALL_SEEKERS[i]["matching_takers"] = matching_takers
            self.ALL_SEEKERS[i]["all_shared_distance"] = all_shared_distance
            self.ALL_SEEKERS[i]["all_detour"] = all_detour

        print("Number of Takers: %s " % len(self.ALL_TAKERS))
        print("Number of Seekers: %s" % len(self.ALL_SEEKERS))
コード例 #17
0
        for filename in fnmatch.filter(filenames, ext):
            files.append(os.path.join(root, filename))

    print("found " + ext + " %d files in %s" % (len(files), audio_folder))

    collectedFeat_MFCC = []
    collectedFeat_Mel = []
    collectedFeat_STFT = []

    widgets = [
        prog.Counter(), '/',
        str(len(files)), ' | ',
        prog.Percentage(), ' | ',
        prog.Bar(marker='='), ' | ',
        prog.ETA(), ' | ',
        prog.AbsoluteETA()
    ]
    bar = prog.ProgressBar(widgets=widgets, maxval=len(files)).start()

    for i, f in enumerate(files):

        y, sr = librosa.load(f)
        if (np.sum(y) != 0):
            y *= 1.0 / np.max(np.abs(y))

        # Feature Extraction

        feat = get_features(y, sr)

        tail, filename = os.path.split(f)
        feat['fileFolder'] = tail + '/'
コード例 #18
0
            random.shuffle(all_actions)
        all_actions = all_actions[:10000]

        count = 0
        try:
            pre_states = all_actions[:,:N]
            suc_states = all_actions[:,N:]
            pre_images = ae.decode_binary(pre_states,batch_size=1000)
            suc_images = ae.decode_binary(suc_states,batch_size=1000)

            import progressbar as pb
            bar = pb.ProgressBar(
                max_value=len(all_actions),
                widgets=[
                    pb.Timer("Elap: %(elapsed) "),
                    pb.AbsoluteETA("Est: %(elapsed) "),
                    pb.Bar(),
                ])
            for pre_state,suc_state,pre_image,suc_image in bar(zip(pre_states,suc_states,pre_images,suc_images)):
                
                generated_transitions = aae.decode([
                    np.repeat([pre_state],128,axis=0),
                    all_labels,
                ],batch_size=1000)
                generated_suc_states = generated_transitions[:,N:]
                generated_suc_images = ae.decode_binary(generated_suc_states,batch_size=1000)

                from latplan.util import bce
                errors = bce(generated_suc_images, np.repeat([suc_image],128,axis=0), axis=(1,2))
                min_error = np.amin(errors)
                if min_error < 0.01:
コード例 #19
0
bar_format = \
    [
        "Maven build: ",
        get_colour(Fore.YELLOW),
        progressbar.Percentage(),
        get_colour(Fore.RESET),
        " ",
        progressbar.Counter(format='(%(value)d of %(max_value)d)'),
        get_colour(Fore.LIGHTGREEN_EX),
        progressbar.Bar(marker="\u2588"),
        get_colour(Fore.RESET),
        " ",
        progressbar.Timer(),
        " ",
        get_colour(Fore.MAGENTA),
        progressbar.AbsoluteETA(format='Finishes: %(eta)s', format_finished='Finished at %(eta)s')
        if absolute_time else progressbar.AdaptiveETA(),
        get_colour(Fore.RESET)
    ]


def ansi_length(o):
    ansi_occ = re.findall(r'\x1B\[[0-?]*[ -/]*[@-~]', o)
    ansi_len = 0
    for occ in ansi_occ:
        ansi_len += len(occ)
    return len(o) - ansi_len


def match():
    count = 0
コード例 #20
0
import inspect
import io
import pprint
import traceback

import numpy

import progressbar

from typhon.utils.cache import mutable_cache

my_pb_widget = [
    progressbar.Bar("=", "[", "]"), " ",
    progressbar.Percentage(), " (",
    progressbar.AdaptiveETA(), " -> ",
    progressbar.AbsoluteETA(), ') '
]


class switch(object):
    """Simulate a switch-case statement.

    http://code.activestate.com/recipes/410692/
    """
    def __init__(self, value):
        self.value = value
        self.fall = False

    def __iter__(self):
        """Return the match method once, then stop"""
        yield self.match
コード例 #21
0
if not remainng: exit(0)

# remainng = remainng[slice(0,10000,200)]

dir_list = set(map(get_dir_name, remainng))
total_size = len(remainng)
wid = [
    pb.Percentage(), ' | ',
    pb.SimpleProgress(), ' ',
    pb.Bar(marker="#", left="[", right="]"), ' ',
    pb.AnimatedMarker(),
    pb.ETA(), ' | ',
    pb.AdaptiveTransferSpeed(unit='Video'), ' | ',
    pb.AdaptiveETA(), ' | ',
    pb.AbsoluteETA(), ' | ',
    pb.Timer()
]
bar = pb.ProgressBar(widgets=wid, maxval=total_size).start()


def process(paths):
    """
    :param paths: list of pairs of source, destination video path
    :type paths: list of lists(of size 2, str path)
    """
    global done_list
    in_path, out_path = paths
    try:
        out = subprocess.check_call([
            'ffmpeg', '-y', '-hwaccel', 'cuvid', '-c:v', 'h264_cuvid',
コード例 #22
0
ファイル: filters.py プロジェクト: wangjianzju/typhon
    def update_firstline_db(self,
                            satname=None,
                            start_date=None,
                            end_date=None,
                            overwrite=False):
        """Create / update the firstline database

        Create or update the database describing for each granule what the
        first scanline is that doesn't occur in the preceding granule.

        If a granule is entirely contained within the previous one,
        firstline is set to L+1 where L is the number of lines.
        """
        prev_head = prev_line = None
        satname = satname or self.ds.satname
        start_date = start_date or self.ds.start_date
        end_date = end_date or self.ds.end_date
        if end_date > datetime.datetime.now():
            end_date = datetime.datetime.now()
        logger.info("Updating firstline-db {:s} for "
                    "{:%Y-%m-%d}--{:%Y-%m-%d}".format(satname, start_date,
                                                      end_date))
        count_updated = count_all = 0
        with dbm.open(str(self.granules_firstline_file), "c") as gfd:
            try:
                bar = progressbar.ProgressBar(
                    max_value=1,
                    widgets=[
                        progressbar.Bar("=", "[", "]"), " ",
                        progressbar.Percentage(), ' (',
                        progressbar.AdaptiveETA(), " -> ",
                        progressbar.AbsoluteETA(), ') '
                    ])
            except AttributeError:
                dobar = False
                bar = None
                logger.info("If you had the "
                            "progressbar2 module, you would have gotten a "
                            "nice progressbar.")
            else:
                dobar = sys.stdout.isatty()
                if dobar:
                    bar.start()
                    bar.update(0)
            for (g_start,
                 gran) in self.ds.find_granules_sorted(start_date,
                                                       end_date,
                                                       return_time=True,
                                                       satname=satname):
                try:
                    (cur_line, extra) = self.ds.read(gran,
                                                     apply_scale_factors=False,
                                                     calibrate=False)
                    cur_head = extra["header"]
                    cur_time = self.ds._get_time(cur_line)
                except (dataset.InvalidFileError,
                        dataset.InvalidDataError) as exc:
                    logger.error("Could not read {!s}: {!s}".format(gran, exc))
                    continue
                lab = self.ds.get_dataname(cur_head, robust=True)
                if lab in gfd and not overwrite:
                    logger.debug("Already present: {:s}".format(lab))
                elif prev_line is not None:
                    # what if prev_line is None?  We don't want to define any
                    # value for the very first granule we process, as we might
                    # be starting to process in the middle...
                    if cur_time.max() > prev_time.max():
                        # Bugfix 2017-01-16: do not get confused between
                        # the index and the hrs_scnlin field.  So far, I'm using
                        # the index to set firstline but the hrs_scnlin
                        # field to apply it.
                        #first = (cur_time > prev_time[-1]).nonzero()[0][0]
                        # Bugfix 2017-08-21: instead of taking the last
                        # time from the previous granule, take the
                        # maximum; this allows for time sequence errors.
                        # See #139
                        first = cur_line["hrs_scnlin"][
                            cur_time > prev_time.max()].min()
                        logger.debug("{:s}: {:d}".format(lab, first))
                    else:
                        first = cur_line["hrs_scnlin"].max() + 1
                        logger.info("{:s}: Fully contained in {:s}!".format(
                            lab, self.ds.get_dataname(prev_head, robust=True)))
                    gfd[lab] = str(first)
                    count_updated += 1
                prev_line = cur_line.copy()
                prev_head = cur_head.copy()
                prev_time = cur_time.copy()
                if dobar:
                    bar.update(
                        (g_start - start_date) / (end_date - start_date))
                count_all += 1
            if dobar:
                bar.update(1)
                bar.finish()
            logger.info("Updated {:d}/{:d} granules".format(
                count_updated, count_all))
コード例 #23
0
    def computeByDay(self):
        self.loadOriginal()
        exp_res = pd.read_csv(
            "haikou-experiments/results/SIMULATION_RESULTS_ALL_DIDI_CHUXING_HAIKOU.csv"
        )
        exp_res["real_start_time"] = pd.to_datetime(exp_res["real_start_time"])
        self.all_ODs = {}
        bar = progressbar.ProgressBar(widgets=[
            'Days ',
            progressbar.Percentage(),
            ' (',
            progressbar.SimpleProgress(),
            ') ',
            ' (',
            progressbar.AbsoluteETA(),
            ') ',
        ])
        all_days_str = exp_res["real_start_date"].unique()
        all_days = []
        print("共计天数:", len(all_days_str))
        for cur_date in bar(all_days_str):
            if self.date_week[cur_date] >= 5: continue
            sta_res = self.computeOneDay(
                exp_res[exp_res["real_start_date"] == cur_date], cur_date)
            all_days.append(sta_res)

        for sta_day in all_days:
            for period_index in range(len(PERIODS_MINUTES)):
                for key in sta_day[period_index].keys():
                    if sta_day[period_index][key]["num"] == 0: continue
                    self.all_ODs[key][period_index]["num"].append(
                        sta_day[period_index][key]["num"])
                    self.all_ODs[key][period_index]["matching_num"].append(
                        sta_day[period_index][key]["matching_num"])
                    self.all_ODs[key][period_index][
                        "matching_probability"].append(
                            sta_day[period_index][key]["matching_probability"])
                    self.all_ODs[key][period_index][
                        "aver_shared_distance"].append(
                            sta_day[period_index][key]["aver_shared_distance"])
                    self.all_ODs[key][period_index][
                        "aver_final_distance"].append(
                            sta_day[period_index][key]["aver_final_distance"])

        with open("haikou-experiments/results/SIMULATION_STATISTIC.csv",
                  "w") as csvfile:
            writer = csv.writer(csvfile)
            row = ["start_ver", "end_ver", "original_num", "original_days"]
            for i in range(len(PERIODS_MINUTES)):
                row += [
                    "num%s" % i,
                    "matching_num%s" % i,
                    "days%s" % i,
                    "matching_probability%s" % i,
                    "aver_shared_distance%s" % i,
                    "aver_final_distance%s" % i
                ]
            writer.writerow(row)
            for i, key in enumerate(self.all_ODs.keys()):
                combined_id = getID(self.all_ODs[key][0]["start_ver"],
                                    self.all_ODs[key][0]["end_ver"])
                if combined_id not in self.origianl_days: continue
                detail = [
                    self.all_ODs[key][0]["start_ver"],
                    self.all_ODs[key][0]["end_ver"],
                    self.origianl_orders[combined_id],
                    self.origianl_days[combined_id]
                ]
                for j in range(len(PERIODS_MINUTES)):
                    detail += [sum(self.all_ODs[key][j]["num"]),sum(self.all_ODs[key][j]["matching_num"]),len(self.all_ODs[key][j]["num"]),\
                        np.mean(self.all_ODs[key][j]["matching_probability"]), np.mean(self.all_ODs[key][j]["aver_shared_distance"]),\
                            np.mean(self.all_ODs[key][j]["aver_final_distance"])]
                writer.writerow(detail)