class ProgressBar(RemoteProgress): # pragma: no cover '''Nice looking progress bar for long running commands''' def setup(self, repo_name): self.bar = Bar(message='Pulling from {}'.format(repo_name), suffix='') def update(self, op_code, cur_count, max_count=100, message=''): #log.info("{}, {}, {}, {}".format(op_code, cur_count, max_count, message)) max_count = int(max_count or 100) if max_count != self.bar.max: self.bar.max = max_count self.bar.goto(int(cur_count))
class ProgressBar(git.RemoteProgress): def __init__(self): super().__init__() self.bar = Bar() def setup(self, repo_name): self.bar = Bar(message='git pull {}'.format(repo_name), suffix='') def update(self, op_code, cur_count, max_count=100, message=''): max_count = int(max_count or 100) if max_count != self.bar.max: self.bar.max = max_count self.bar.goto(int(cur_count)) def finish(self): self.bar.finish()
def alignChapter(lang, bookid, chapter): """ Align a chapter of a book Args: lang (str): language bookid (str): identifier of a book chapter (int): the chapter to be aligned Returns: list of spacy tokens: the tokens with the added audio alignment information """ bar = IncrementalBar('Processing %s [%s] (%s)' % (bookid, lang, chapter) , max=100) bar.start() outfile = os.path.join(book_manager.chaptersPath(lang, bookid),book_manager.mappingFile(chapter)) audio_file, start_time, stop_time = book_manager.chapterAudio(lang, bookid, chapter) wavfile = os.path.join(config.TEMP_DIR, 'chapter%s.wav' % chapter) gu.removeFile(wavfile) encodeForSphinx(audio_file, start_time, stop_time, wavfile) # encode audio for speech recognition # get spacy models for language processing sp = utils.getSpacy(lang) text = book_manager.bookChapter(lang, bookid, chapter) doc = sp(text) # prepare sentences without punctuation token_count = 0 doc_tokens = [tkn for tkn in doc if tkn.is_alpha and (not tkn.is_punct) and tkn.text.strip()] token_count = len(doc_tokens) audio_segment = AudioSegment.from_wav(wavfile) # read the audio audio_len = len(audio_segment) begin_tkn = 0 begin_audio = 0 startm = time2msec(start_time) stopm = time2msec(stop_time) l = stopm - startm while begin_tkn < token_count: chunk = doc_tokens[begin_tkn:begin_tkn+50] rel_len = 1.25 * len(chunk) / token_count end_audio = begin_audio + int(rel_len * audio_len) last_idx, begin_audio = alignChunk(lang, audio_segment=audio_segment, audio_begin=begin_audio, audio_end=end_audio, chunk=chunk) bar.goto(int(100.0 * begin_audio / l)) if last_idx == -1: # could not map anything break else: begin_tkn += last_idx + 1 gu.removeFile(wavfile) saveAudioMapping(doc_tokens, startm, stopm, outfile)
class ProgressBar(RemoteProgress): # pragma: no cover '''Nice looking progress bar for long running commands''' class Action(Enum): PULL = 1 PUSH = 2 def setup(self, repo_name, action=Action.PULL): if action == ProgressBar.Action.PULL: message = 'Pulling from {}'.format(repo_name) elif action == ProgressBar.Action.PUSH: message = 'Pushing to {}'.format(repo_name) self.bar = Bar(message=message, suffix='') def update(self, op_code, cur_count, max_count=100, message=''): #log.info("{}, {}, {}, {}".format(op_code, cur_count, max_count, message)) max_count = int(max_count or 100) if max_count != self.bar.max: self.bar.max = max_count self.bar.goto(int(cur_count))
class MarbleGame: mplain_fmt = lambda n: ' %d' % (n) mbold_fmt = lambda n: ' %s' % (colored(n, 'green')) round_fmt = lambda n: colored('[%3d]' % (n), 'red') score_multipleof = 23 score_backsteps = 7 def __init__(self, nplayers, last_marble): self.last_marble = last_marble self.score = [ 0, ] * nplayers self.board = [0] self.current_marble = 1 self.current_pos = 0 if DEBUG: print(self) else: self.bar = IncrementalBar(max=self.last_marble) def __str__(self): s_round = MarbleGame.round_fmt(self.current_marble - 1) s_marbles = ''.join([ MarbleGame.mbold_fmt(m) if i == self.current_pos else MarbleGame.mplain_fmt(m) for i, m in enumerate(self.board) ]) return '%s%s' % (s_round, s_marbles) @property def scoring_player(self): if self.current_marble > 0: return (self.current_marble - 1) % len(self.score) else: return None def finished(self): return self.current_marble > self.last_marble def next_round(self): if self.finished(): if not DEBUG: self.bar.goto(self.current_marble) return None if self.current_marble % MarbleGame.score_multipleof != 0: self.current_pos = self.current_pos + 2 if (self.current_pos > len(self.board)): self.current_pos -= len(self.board) self.board.insert(self.current_pos, self.current_marble) self.current_marble += 1 else: if not DEBUG: self.bar.goto(self.current_marble) # score current marble self.score[self.scoring_player] += self.current_marble # remove and score another marble remove_pos = (self.current_pos - MarbleGame.score_backsteps) if remove_pos < 0: remove_pos += len(self.board) self.score[self.scoring_player] += self.board.pop(remove_pos) # update position/marble/round self.current_pos = remove_pos if remove_pos < len( self.board) else 0 self.current_marble += 1 if DEBUG_SCORE: print(self.score) if DEBUG: print(self) return self.current_marble
t += t * random.uniform(-0.1, 0.1) # Add some variance time.sleep(t) for bar_cls in (Bar, ChargingBar, FillingSquaresBar, FillingCirclesBar): suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]' bar = bar_cls(bar_cls.__name__, suffix=suffix) for i in bar.iter(range(200)): sleep() for bar_cls in (IncrementalBar, PixelBar, ShadyBar): suffix = '%(percent)d%% [%(elapsed_td)s / %(eta)d / %(eta_td)s]' with bar_cls(bar_cls.__name__, suffix=suffix, max=200) as bar: for i in range(200): bar.next() sleep() for spin in (Spinner, PieSpinner, MoonSpinner, LineSpinner, PixelSpinner): for i in spin(spin.__name__ + ' ').iter(range(100)): sleep() for singleton in (Counter, Countdown, Stack, Pie): for i in singleton(singleton.__name__ + ' ').iter(range(100)): sleep() bar = IncrementalBar('Random', suffix='%(index)d') for i in range(100): bar.goto(random.randint(0, 100)) sleep() bar.finish()
class CliUI: def __init__(self, player: TrackPlayer, api: Mobileclient, play_all_songs=False): self.player = player self.api = api self.library = self.player.library self.play_all_songs = play_all_songs self.progress_bar = None self.current_song_info = None def get_user_selected_playlist_tracks(self): self.library.load_playlist_contents() playlists = self.library.playlist_meta for i, playlist in enumerate(playlists): print("[{0}]: {1}".format(i, playlist['name'])) index = int(input("\nSelect a playlist. ")) if index >= len(playlists): print("No playlist at that index.") return None playlist = playlists[index] return self.library.playlist_contents[playlist['id']] def run_player(self): self.player.initialize() if self.play_all_songs: track_ids = list(self.library.songs.keys()) else: track_ids = self.get_user_selected_playlist_tracks() self.player.set_tracks_to_play(track_ids) self.player.shuffle_tracks() self.player.toggle_play() def init_progress_bar(self, song_str): if self.progress_bar is not None: self.progress_bar.finish() self.progress_bar = IncrementalBar(song_str, max=100, suffix='%(percent)d%%') self.current_song_info = song_str self.progress_bar.goto(0) def clear_progress_bar(self): if self.progress_bar is not None: self.progress_bar.finish() self.progress_bar = None def update_progress_bar(self): if self.player is not None and self.progress_bar is not None: prog = min(int(self.player.get_position() * 100), 100) self.progress_bar.goto(prog) def update_ui(self): if self.player is None: self.clear_progress_bar() return player_current_song_info = self.player.current_song_info.value if player_current_song_info != self.current_song_info: # Sleep extra long before switching, since VLC usually prints out # some error. sleep(2.0) self.init_progress_bar(player_current_song_info) self.update_progress_bar() def run_loop(self): t = threading.currentThread() while getattr(t, "do_run", True): sleep(1.0) self.update_ui() log.info("CliUI loop exited") def exec_(self): """Runs the UI thread loop""" self.run_player() try: self.run_loop() except KeyboardInterrupt: print("\nReceived Ctrl-C") self.clear_progress_bar()
def train(net: nn.Module, epoch_count: int, start_epoch: int = 0, use_scheduler: bool = False) -> None: criterion = algorithm.get_loss() # Create loss object if use_scheduler: # Create optimizer optimizer = algorithm.get_optimizer(net, scheduler.params_list[start_epoch]) else: optimizer = algorithm.get_optimizer(net) metric = algorithm.get_metric() total = len(ds.trainset) # Total number of imgs in dataset bar_step = total // 50 # Progressbar step best_acc = 0.0 for epoch_idx in range(start_epoch, epoch_count): net.train() if use_scheduler and epoch_idx > 0: # Update lr and other params if needed algorithm.update_optimizer(optimizer, scheduler.params_list[epoch_idx]) # Set init values to zero average_loss = 0.0 train_accuracy = 0.0 curr_iter = 0 # Progressbar iter_bar = IncrementalBar("Current progress", max=total, suffix='%(percent)d%%') for _, data in enumerate(ds.trainloader, 0): # Compute forward inputs, labels = data inputs = inputs.cuda() labels = labels.cuda() optimizer.zero_grad() outputs = net(inputs) _, predicted = torch.max(outputs, 1) # Stats (old) train_accuracy += metric(outputs, labels).item() * outputs.shape[0] # Compute loss and backward loss = criterion(outputs, labels) loss.backward() optimizer.step() # Add batch loss to get average after epoch is finished average_loss += loss.item() * outputs.shape[0] # Progressbar things if curr_iter >= bar_step > 0: iter_bar.next(bar_step) curr_iter -= bar_step curr_iter += ds.batch_size iter_bar.goto(total) iter_bar.finish() # Compute avg train loss and accuracy average_loss = average_loss / total train_accuracy = 100.0 * train_accuracy / total # Compute avg test loss and accuracy net.eval() test_accuracy, test_loss = float("nan"), float( "nan") # validation.eval(net) # Add to log log.add(epoch_idx, (train_accuracy, test_accuracy, average_loss, test_loss, scheduler.params_list[epoch_idx][0])) # Flush log changes log.save() # Print useful numbers print('[%d, %5d] average loss: %.3f, test loss: %.3f' % (epoch_idx, total, average_loss, test_loss)) print('Train accuracy: %.2f %%' % train_accuracy) print('Test accuracy: %.2f %%' % test_accuracy) # Save model if it scored better than previous if test_accuracy > best_acc: PATH = 'model_instances/net_tmp_epoch_%d_acc_%.2f%%.pth' % ( epoch_idx, test_accuracy) torch.save(net.state_dict(), PATH) best_acc = test_accuracy # End of training print('Complete')
class Converter: def __init__(self, color_count_method=None): if color_count_method: self.color_count_method = color_count_method else: self.color_count_method = self.color_count_all self.__img = None self.__output_img = None self.__progress = 0 self.__progress_bar = None def set_image(self, img: np.ndarray): self.__img = img.copy() def quadify_image(self, max_colors): self.__progress_bar = IncrementalBar('Render Progress', suffix='%(percent)d%%') self.__output_img = self.__img.copy() width, height, *_ = self.__output_img.shape self._quad(0, 0, width, height, max_colors, 0) self.__progress_bar.finish() return self.__output_img def _update_progress(self, new_progress): temp = self.__progress self.__progress += new_progress self.__progress_bar.goto(self.__progress * 100) if temp // 0.05 < self.__progress // 0.05: print('#', end='') def _quad(self, x, y, nx, ny, max_colors, depth): width = nx - x height = ny - y num_of_colors = self.color_count_method(self.__output_img, x, y, nx, ny, max_colors) if num_of_colors <= max_colors: # pixel_to_color_ratio = width * height / num_of_colors self.__output_img[x: nx, y: ny, :] = np.mean(self.__output_img[x: nx, y: ny, :], axis=(0, 1)) # * pixel_to_color_ratio self._update_progress(0.25 ** depth) else: mx, my = width // 2 + x, height // 2 + y self._quad(x, y, mx, my, max_colors, depth + 1) self._quad(mx, y, nx, my, max_colors, depth + 1) self._quad(x, my, mx, ny, max_colors, depth + 1) self._quad(mx, my, nx, ny, max_colors, depth + 1) @staticmethod def color_count_all(img, x, y, nx, ny, max_colors): colors = set() for i, j in iterate_cartesian(range(x, nx), range(y, ny)): colors.add(str(img[i, j, :])) if len(colors) > max_colors: return len(colors) return len(colors) @staticmethod def color_count_differing(img, x, y, nx, ny, max_colors): colors = [] for i, j in iterate_in_steps(x, y, nx, ny, step=(ny - y) // 8): pixel_color = img[i, j, :] for color in colors: if ((color - pixel_color) ** 2).sum() < 256: break else: colors.append(pixel_color) if len(colors) > max_colors: return len(colors) return len(colors)