def test_load_cmap_by_name(self): for preinstalled_cmap_name in VALID_CMAPS: existing_cmap = _registered_colormaps[preinstalled_cmap_name] self.assertIs(load_cmap(preinstalled_cmap_name), existing_cmap) with self.assertRaises(ColormapNotFound): load_cmap("thiscmapdoesnotexist")
def __init__( self, device, duration=1, mode="spec", cmap=None, **kwargs, ): """App for streaming live audio data to terminal Listens to the first channel of the specified device This is very slow - calculating a spectrogram every loop """ super().__init__(**kwargs) self.device = device self.duration = duration self.data = None self.mode = mode if self.mode == "amp": self.transform = AmplitudeEnvelopeTwoSidedTransform(gradient=(0.3, 0.7)) elif self.mode == "spec": self.transform = SpectrogramTransform(spec_sampling_rate=500, spec_freq_spacing=100, min_freq=250, max_freq=8000) else: raise ValueError("mode must be spec or amp") self.cmap = load_cmap(cmap) self.colorized_char_array = None
async def handle_key(self, ch): """Handle key presses""" if ch == ord("q"): self.close() elif ch == curses.KEY_LEFT or ch == ord("h"): self.left() elif ch == curses.KEY_RIGHT or ch == ord("l"): self.right() elif ch == curses.KEY_UP or ch == ord("k"): self.up() elif ch == curses.KEY_DOWN or ch == ord("j"): self.down() elif ch == curses.KEY_RESIZE: self.check_size_reset() elif ch == ord("r"): rows = self.prompt("Set rows [0-9]: ", int) if rows and 0 < rows <= 9: self.stdscr.clear() self.stdscr.refresh() self.pad.clear() self.windows = [] self.rows = rows await self.initialize_display() for view in self.views: view.needs_redraw = True # This is a hack to wait for the refresh loop to consume stuff await asyncio.sleep(self._refresh_interval * 2) elif ch == ord("c"): cols = self.prompt("Set cols [0-9]: ", int) if cols and 0 < cols <= 9: self.stdscr.clear() self.stdscr.refresh() self.pad.clear() self.windows = [] self.cols = cols await self.initialize_display() for view in self.views: view.needs_redraw = True # This is a hack to wait for the refresh loop to consume stuff await asyncio.sleep(self._refresh_interval * 2) elif ch == ord("m"): resp = self.prompt( "Choose colormap ['greys', 'viridis', 'plasma', ...]: ", str) if resp in VALID_CMAPS: self.cmap = load_cmap(resp) for view in self.views: view.needs_redraw = True elif ch == ord("p"): page = self.prompt("Jump to page: ", int) if page and 0 < page <= self.paginator.n_pages: self.jump_to_page(page - 1) elif ch == ord("z"): self._selected_transform_idx = (self._selected_transform_idx + 1) % len(self._transforms) for view in self.views: view.needs_redraw = True
def imshow( filename, height=None, width=None, cmap=None, vertical=False, characters="quarter", thumbnail=False ): cmap = load_cmap(cmap or var.DEFAULT_CMAP) termsize = os.get_terminal_size() if height and isinstance(height, float) and 0 < height <= 1: height = int(np.round(termsize.lines * height)) elif height: height = int(height) if width and isinstance(width, float) and 0 < width <= 1: width = int(np.round(termsize.columns * width)) elif width: width = int(width) charmap = get_char_map(characters) height = height or termsize.lines width = width or termsize.columns if vertical: height, width = width, height desired_size = charmap.max_img_shape(height, width) data, _ = PILImageReader.read_file(filename) img, metadata = DEFAULTS["image"]["transform"].convert( data, output_size=desired_size, size_multiple_of=charmap.patch_dimensions, rotated=vertical, ) if vertical: img = img.T char_array = charmap.to_char_array(img) char_array = StdoutRenderer.apply_cmap_to_char_array(cmap, char_array) StdoutRenderer.render(char_array)
def __init__( self, device, mode="amp", chunk_size=1024, step_chars=2, # number of character columns to render in one calculation (overrides duration) duration=None, # step_chunks=2, # number of chunks in one calculation channels=1, # define which channels to listen to transform=None, cmap=None, map=None, **kwargs, ): """App for streaming a single live audio data to terminal """ super().__init__(**kwargs) if duration is None and step_chars is None: raise ValueError("Either duration or step_chars must be set") self.device = device self.duration = duration self.step_chunks = step_chunks self.chunk_size = chunk_size self.step_chars = step_chars self.channels = channels self.data = None self.mode = mode self.map = map self.gain = 0 self._padx = max(self._padx, 4) self.state = { "current_x": 0, } self.transform = transform self.cmap = load_cmap(cmap) self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
def benchmark_render(height, width, repeat): import time import numpy as np from inspec.colormap import load_cmap from inspec.maps import ( FullCharMap, HalfCharMap, QuarterCharMap, ) from inspec.render import StdoutRenderer cmap = load_cmap(None) msgs = [] def _profile(msg, cycles=1): nonlocal _t msgs.append("{}: {:.3f}s / loop".format(msg, (time.time() - _t) / cycles)) _t = time.time() for mapper in [FullCharMap, HalfCharMap, QuarterCharMap]: random_data = np.random.random(( height * mapper.patch_dimensions[0], width * mapper.patch_dimensions[1] )) * 1000 _t = time.time() for _ in range(repeat): char_array = mapper.to_char_array(random_data) _profile("Mapped array {} to char array with {}".format(random_data.shape, mapper), repeat) for _ in range(repeat): cmapped_array = StdoutRenderer.apply_cmap_to_char_array(cmap, char_array) _profile("Applied cmap to char array of size {}".format(random_data.shape), repeat) for _ in range(repeat): StdoutRenderer.render(cmapped_array) _profile("Rendered {}".format(random_data.shape), repeat) for msg in msgs: click.echo(msg)
def run_all_tests(sample_audio_file): data, sampling_rate, metadata = AudioReader.read_file(sample_audio_file) renderer = StdoutRenderer all_maps = [ FullCharMap, HalfCharMap, QuarterCharMap, ] all_transforms = [ SpectrogramTransform(1000, 50, min_freq=250, max_freq=10000), AmplitudeEnvelopeTwoSidedTransform(gradient=(0.2, 0.8)), ] cmap_names = VALID_CMAPS for Map in all_maps: for transform in all_transforms: for cmap_name in cmap_names: cmap = load_cmap(cmap_name) if cmap_name.endswith("_r"): continue print("Running {} with {} using cmap {}".format( Map, transform, cmap_name)) termsize = os.get_terminal_size() desired_size = Map.max_img_shape(termsize.lines // 2, termsize.columns // 2) print("Attemping size {}".format(desired_size)) img, metadata = transform.convert(data, sampling_rate, output_size=desired_size) char_array = Map.to_char_array(img) char_array = StdoutRenderer.apply_cmap_to_char_array( cmap, char_array) StdoutRenderer.render(char_array)
def test_load_cmap_bad_input(self): with self.assertRaises(ColormapNotFound): load_cmap(1000.0)
def test_load_cmap_by_cmap(self): self.assertIs(self.dummy_cmap, load_cmap(self.dummy_cmap)) for preinstalled_cmap_name in VALID_CMAPS: existing_cmap = _registered_colormaps[preinstalled_cmap_name] self.assertIs(load_cmap(existing_cmap), existing_cmap)
def show( filename, height=None, width=None, duration=None, time_=None, channel=None, cmap=None, show_spec=True, show_amp=False, min_freq=var.DEFAULT_SPECTROGRAM_MIN_FREQ, max_freq=var.DEFAULT_SPECTROGRAM_MAX_FREQ, vertical=False, characters="quarter", ): cmap = load_cmap(cmap or var.DEFAULT_CMAP) termsize = os.get_terminal_size() if height and isinstance(height, float) and 0 < height <= 1: height = int(np.round(termsize.lines * height)) elif height: height = int(height) if width and isinstance(width, float) and 0 < width <= 1: width = int(np.round(termsize.columns * width)) elif width: width = int(width) is_audio = True if is_audio: charmap = get_char_map(characters) height = height or termsize.lines width = width or termsize.columns if show_spec and show_amp: height = height // 2 if vertical: height, width = width, height desired_size = charmap.max_img_shape(height, width) if channel is None: channel = 0 data, sampling_rate, _ = AudioReader.read_file_by_time( filename, duration=duration, time_start=time_, channel=channel ) if show_spec: transform = DEFAULTS["audio"]["spec_transform"] transform.min_freq = min_freq transform.max_freq = max_freq img, metadata = DEFAULTS["audio"]["spec_transform"].convert( data, sampling_rate, output_size=desired_size ) if vertical: img = img.T char_array = charmap.to_char_array(img) char_array = StdoutRenderer.apply_cmap_to_char_array(cmap, char_array) StdoutRenderer.render(char_array) if show_amp: img, metadata = DEFAULTS["audio"]["amp_transform"].convert( data, sampling_rate, output_size=desired_size ) if vertical: img = img.T char_array = charmap.to_char_array(img) char_array = StdoutRenderer.apply_cmap_to_char_array(cmap, char_array) StdoutRenderer.render(char_array)
def __init__( self, rows, cols, files, padx=0, pady=0, cmap=None, file_reader=None, view_class=None, transform=None, map=None, threads=4, **kwargs, ): """App for viewing files in a grid pattern """ super().__init__(**kwargs) self.rows = rows self.cols = cols self.state = {} self._slot_to_page = {} self._page_to_slot = {} self.current_selection = 0 self.current_page = 0 self.current_page_slot = 0 self.cmap = load_cmap(cmap) self.map = map self.reader = file_reader if isinstance(transform, InspecTransform): self._transforms = [transform] self._selected_transform_idx = 0 elif isinstance(transform, list) and all( [isinstance(t, InspecTransform) for t in transform]): self._transforms = transform self._selected_transform_idx = 0 else: raise ValueError( "transform parameter must be a InspecTransform or a list of InspecTransforms" ) self.views = [] idx = 0 for filename in files: try: self.views.append( view_class(self, dict(filename=filename), idx)) except: # TODO better warning when files dont load right? pass else: idx += 1 self.windows = [] self._n_threads = threads self._window_idx_to_tasks = defaultdict(list) self.executor = concurrent.futures.ThreadPoolExecutor( max_workers=self._n_threads, )
def view_colormap(stdscr, cmap=None, num=True): curses.use_default_colors() from inspec.colormap import curses_cmap, load_cmap if cmap is None: show_full = True else: show_full = False WIDTH = 4 if show_full: # might have a different behavior on windows vs ubuntu i = 1 for color in range(255): curses.init_pair(i, color + 1, -1) i += 1 blocks = [ range(0, 16), range(16, 16 + 36), range(16 + 36, 16 + 72), range(16 + 72, 16 + 108), range(16 + 108, 16 + 144), range(160, 160 + 36), range(160 + 36, 160 + 72), range(160 + 72, 256), ] tempts = [] for i, block in enumerate(blocks): if i == 0 or i == 7: for block_idx, color_idx in enumerate(block): color_str = str(color_idx) if num: full_str = (WIDTH - len(color_str)) * " " + color_str else: full_str = WIDTH * const.FULL_1 col = ((i) // 2) * WIDTH * 6 row = block_idx color = curses.color_pair(color_idx) tempts.append((row, col)) try: stdscr.addstr(row, col, full_str, color) except curses.error: continue else: bottom = bool(i % 2 == 0) for block_idx, color_idx in enumerate(block): color_str = str(color_idx) if num: full_str = (WIDTH - len(color_str)) * " " + color_str else: full_str = WIDTH * const.FULL_1 row = bottom * 6 + block_idx % 6 col = WIDTH + ( (i - 1) // 2) * WIDTH * 6 + (block_idx // 6) * WIDTH color = curses.color_pair(color_idx) stdscr.addstr(row, col, full_str, color) else: cmap = load_cmap(cmap) curses_cmap.init_colormap(cmap) col_idx = 0 row_idx = 0 for color0 in curses_cmap.colors: for color1 in curses_cmap.colors: slot, inv = curses_cmap.get_slot(color0, color1) color_str = str(slot) if num: full_str = (WIDTH - len(color_str)) * " " + color_str else: if color0.idx == color1.idx == 0: full_str = WIDTH * const.FULL_0 elif color0.idx == color1.idx != 0: full_str = WIDTH * const.FULL_1 elif inv: full_str = WIDTH * const.QTR_1001 else: full_str = WIDTH * const.QTR_0110 row_idx += 1 color = curses.color_pair(slot) try: stdscr.addstr(row_idx, col_idx, full_str, color) except curses.error: pass col_idx += WIDTH row_idx = 0 while True: ch = stdscr.getch() if ch == ord("q"): break