def __init__(self, buf_size: int): self.buf_size: int = buf_size # Set up pyaudio and aubio beat detector self.p: pyaudio.PyAudio = pyaudio.PyAudio() self.samplerate: int = 44100 self.stream: pyaudio.Stream = self.p.open( format=pyaudio.paFloat32, channels=1, rate=self.samplerate, input=True, frames_per_buffer=self.buf_size, stream_callback=self._pyaudio_callback, ) self.next_plot_time = time.monotonic() + (1 / 60) self.spec_x = [] self.spec_y = [] self.spectrum = plt.figure() fft_size: int = self.buf_size * 2 # self.tempo: aubio.tempo = aubio.tempo( # "default", fft_size, self.buf_size, self.samplerate # ) self.spinner: BeatPrinter = BeatPrinter()
def energyplot(): sigma = [] with open("OUTCAR") as outcar: for line in outcar: if 'y=' in line: sigma.append(float(line.split()[-1])) numsteps = len(sigma) x = [x for x in range(1, numsteps + 1)] minstep = 0 if args.steprange[0] <= 1 else args.steprange[0] - 1 maxstep = -1 if args.steprange[1] > x[-1] or args.steprange[ 1] == -1 else args.steprange[1] - 1 fig2 = tp.figure() fig2.plot(x[minstep:maxstep], sigma[minstep:maxstep], width=40, height=20, label='Etot', xlabel='ionic step') figure2 = [] for string in fig2.get_string(): if string == '*': figure2.append(Fore.GREEN + string) else: figure2.append(Fore.WHITE + string) print(''.join(figure2))
def test_plot_lim(): x = numpy.linspace(0, 2 * numpy.pi, 10) y = numpy.sin(x) fig = tpl.figure() fig.plot( x, y, label="data", width=50, height=15, xlim=[-1, 1], ylim=[-1, 1], xlabel="x vals", title="header", ) string = fig.get_string() ref = """ header 1 +---------------------------------------+ | ********| 0.5 | ************ | | **** | 0 | *** | | | -0.5 | | | | -1 +---------------------------------------+ -1 -0.5 0 0.5 1 x vals""" assert string == ref
def test_plot_lim(): x = np.linspace(0, 2 * np.pi, 10) y = np.sin(x) fig = tpl.figure() fig.plot( x, y, label="data", width=50, height=15, xlim=[-1, 1], ylim=[-1, 1], xlabel="x vals", title="header", ) string = fig.get_string() # for some reason, this gives a different result locally; perhaps a different # gnuplot version ref = """ header 1 +---------------------------------------+ | ********| 0.5 | ************ | | **** | 0 | *** | | | -0.5 | | | | -1 +---------------------------------------+ -1 -0.5 0 0.5 1 x vals""" assert string == ref, string
def test_scatter(): rng = np.random.default_rng(0) x = np.arange(0.0, 50.0, 2.0) y = x**1.3 + rng.random(x.shape) * 30.0 fig = tpl.figure() fig.plot(x, y, plot_command="plot '-' w points", width=50, height=15) # fig.show() string = fig.get_string() ref = """\ 180 +---------------------------------------+ 160 | A AA | 140 | | | A A | 120 | AA A A | 100 | A | 80 | A A | 60 | A A A | | A AA A A | 40 | A | 20 | A A | 0 +---------------------------------------+ 0 5 10 15 20 25 30 35 40 45 50""" assert string == ref, string
def infer(): trcs = [encode_trace(trc) for trc in TRACES] mdp = DYN2 best, spec2score = spec_mle( mdp, trcs, SPEC2MONITORS.values(), parallel=False, psat=0.9 ) def normalize(score): return int(round(score - spec2score[SPEC2MONITORS[CONST_TRUE]])) best_score = normalize(spec2score[best]) fig = tpl.figure() fig.barh( fn.lmap(normalize, spec2score.values()), labels=SPEC_NAMES, force_ascii=False, show_vals=True, ) print('\n' + "="*80) print(' (log likelihood(spec) - log_likelihood(True))'.rjust(40) + '\n') print('(higher is better)'.rjust(41)) print("="*80) fig.show() print(f"\n\nbest score: {abs(best_score)}") return best
def analyze_packets(signal_received, frame): # insert new line after Ctrl+C print() collected_data = processor.data if config.no_analysis: [print(entry) for entry in set(collected_data)] exit(0) if config.verbose or config.verbose_extra: print("Analyzing packets") if not collected_data: print("No data to show :-)") else: columns = ["data"] df = pd.DataFrame(data=list(collected_data), columns=columns) table = df["data"].value_counts() labels = list(table.index) counts = [int(c) for c in table.to_numpy().tolist()] fig = tpl.figure() fig.barh(counts, labels, force_ascii=True) fig.show() if config.show_missed and len(missed) > 0: print() print("Packets not analyzed: ") [print(miss) for miss in missed] elif len(missed) > 0: miss_count = len(missed) print(f"Not showing {miss_count} unknown packets. Run with -m") exit(0)
def test_padding_2(): fig = tpl.figure(padding=(1, 2)) fig.aprint("abc") string = fig.get_string() assert (string == """ abc """)
def test_vertical_ascii(): numpy.random.seed(123) sample = numpy.random.normal(size=1000) counts, bin_edges = numpy.histogram(sample, bins=40) fig = tpl.figure() fig.hist(counts, bin_edges, force_ascii=True) # fig.show() string = fig.get_string() assert ( string == """\ ** **** ****** ******** * * *********** *************** ****************** ********************** ************************ * *********************************** *\ """ )
def test_vertical(): numpy.random.seed(123) sample = numpy.random.normal(size=1000) counts, bin_edges = numpy.histogram(sample, bins=40) fig = tpl.figure() fig.hist(counts, bin_edges) fig.show() string = fig.get_string() assert ( string == """\ ▆█ ▄▄██ ▃█████ ▁██████▃ ▅ ▂ ████████▇▅█ ▂█▅████████████ ▂███████████████▃▂ ▂▃██████████████████▃▁ ▁▂██████████████████████ ▂ ▃▂▄▄█████████████████████████▅▃▁▂▁▁ ▁\ """ )
def load_tuning(): global mb voltage_to_frequency = json.load(open("voltage_to_frequency.json", "rb")) x = [] y = [] y0 = [] for k in voltage_to_frequency: x.append(float(k)) y0.append(voltage_to_frequency[k]) y.append(math.log(voltage_to_frequency[k])) mb = np.polyfit(y, x, 1) fig = tpl.figure() print("\n") fig.plot( x, y0, plot_command="plot '-' w points", width=60, height=22, xlabel="voltage (v)", title="frequency (hz) vs voltage", label="freq = exp((volts{:+2.2f})/{:2.2f}) ".format(mb[1], mb[0]), ) fig.show() print("\n") time.sleep(1)
def test_padding_1(): fig = tpl.figure(padding=1) fig.aprint("abc") string = fig.get_string() assert (string == """ abc """) return
def _plot_cities(self, io): try: cities, counts = list(io.cities.keys()), list(io.cities.values()) fig = tpl.figure() fig.barh(counts, cities, max_width=60) fig.show() except Exception as ex: self.logger.error('Could not plot cities.')
def print_sig(dat, title="data", width=130, height=30, base=0): fig = tpl.figure() datlen = len(dat) datx = np.arange(0 + base, base + datlen - 1) fig.plot(datx, dat, title=title, width=width, height=height) fig.show() return dat
def plotOct(octaves): oct_range = np.array( ['31.5', '63', '125', '250', '500', '1k', '2k', '4k', '8k', '16k']) oct_values = np.array([round(x) for x in octaves]) fig = tpl.figure() fig.barh(oct_values, oct_range, force_ascii=True) fig.show() print('')
def plot_hist(sample, bins=40): counts, bin_edges = np.histogram(sample, bins=bins) fig = tpl.figure() fig.hist(counts, bin_edges, grid=[15, 25], orientation="horizontal", force_ascii=False) fig.show()
def plot_termPlot(x, y, cmd="plot -'- w points pt '0'"): import termplotlib as tpl fig = tpl.figure() fig.plot(x=x, y=y, width=50, height=15, plot_command="plot '-' w points pt 'o'") fig.show() return fig
def showProgress(agent, x, y, y2, meanOfN): os.system('clear') print('+-------------------------------------+') agent.printName() print('+-------------------------------------+') agent.printParameters() print('+-------------------------------------+') print('+ Episode ' + str(len(x)) + ' score: ' + str(y[len(y) - 1])) print('+ Mean of last ' + str(meanOfN) + ' = ' + str(meanOfLast(x, y, meanOfN)) + ' Highest Score: ' + str(np.max(y))) print('+-------------------------------------+') fig = tpl.figure() fig.plot(x, y, width=100, height=30) fig.show() fig = tpl.figure() fig.plot(x, y2, width=100, height=30) fig.show()
def test_padding_4(): fig = tpl.figure(padding=(1, 2, 3, 4)) fig.aprint("abc") string = fig.get_string() assert (string == """ abc """) fig.show()
def plot(pars, memo_space, sim): # todo use names in pars instead of signals '''plot function''' x_block, y_block = pars x = x_block.outputs[0].hist y = y_block.outputs[0].hist fig = tpl.figure() fig.plot(x, y, height=15) fig.show() return None
def _plot_volume(self, i): x = range(len(self.iter_tweets[i])) x = [x_i * self.update_interval for x_i in x] y = self.iter_tweets[i] fig = tpl.figure() fig.plot(x, y, label="Stream {0} volume".format(i + 1), width=150, height=12) fig.show()
def main(): dest = bytearray(1 << 21) for data in DATA: xz_path = os.path.join(os.path.dirname(__file__), 'squash-benchmark', f'{data}.xz') with lzma.LZMAFile(xz_path) as fp: content = fp.read() log_bufsizes = range(20) bufsizes = [1 << log_bufsize for log_bufsize in log_bufsizes] tcs = [] tus = [] for bufsize in bufsizes: tc, tu = timeit_bo3(content, bufsize, dest) tcs.append(len(content) / tc) tus.append(len(content) / tu) fig = tpl.figure() fig.plot(log_bufsizes, tcs, label=f'compress {data}') fig.show() fig = tpl.figure() fig.plot(log_bufsizes, tus, label=f'uncompress {data}') fig.show()
def termogram(self) -> Optional[str]: """ Return a text histogramm of the vote results. """ choice = list(map(lambda choice: choice[1], self.choices)) count = list(map(lambda choice: choice[2], self.choices)) if not count: return None fig = tpl.figure() fig.barh(count, choice) return fig.get_string()
def plot_stocks(lst, graph): x = [] y = [] for _, price in graph: x.append(_) y.append(price) fig = tpl.figure() fig.plot(x, y, width=80, height=15) for l in fig.get_string().split("\n"): lst.append(l) lst[-1] = " " * 33 + "14 days prices" + " " * 33
def lr_finder(model, training_loader, optimizer, lr_scheduler, smoothing=0.05, plt_fig=True): """Runs training cycles to get a plot of loss vs learning rate. Args: model: The U-net model. training_loader (torch.utils.data.DataLoader): Dataloader with training batches. optimizer (torch.optim): Optimizer for updating the U-net parameters. lr_scheduler (torch.optim/lr_scheduler): Scheduler to adjust the learning rate. smoothing (float, optional): Parameter to adjust smoothing of learning rate vs loss curve. Defaults to 0.05. plt_fig (bool, optional): If true, displays the plot in the terminal. Defaults to True. Returns: tuple: A list of the loss and a list of the corresponding learning rate. """ lr_find_loss = [] lr_find_lr = [] iters = 0 model.train() print(f"Training for {LR_FIND_EPOCHS} epochs to create a learning " "rate plot.") for i in range(LR_FIND_EPOCHS): for batch in tqdm(training_loader, desc=f'Epoch {i + 1}, batch number', bar_format='{l_bar}{bar:30}{r_bar}{bar:-30b}'): inputs, targets = prepare_batch(batch, DEVICE_NUM) optimizer.zero_grad() output = model(inputs) if LOSS_CRITERION == 'CrossEntropyLoss': loss = loss_criterion(output, torch.argmax(targets, dim=1)) else: loss = loss_criterion(output, targets) loss.backward() optimizer.step() lr_scheduler.step() lr_step = optimizer.state_dict()["param_groups"][0]["lr"] lr_find_lr.append(lr_step) if iters == 0: lr_find_loss.append(loss) else: loss = smoothing * loss + (1 - smoothing) * lr_find_loss[-1] lr_find_loss.append(loss) if loss > 1 and iters > len(training_loader)// 1.333: break iters += 1 if plt_fig: fig = tpl.figure() fig.plot(np.log10(lr_find_lr), lr_find_loss, width=50, height=30, xlabel='Log10 Learning Rate') fig.show() return lr_find_loss, lr_find_lr
def test_barh_floats(): fig = tpl.figure() fig.barh([0.3, 0.4, 0.6, 0.2], ["Cats", "Dogs", "Cows", "Geese"]) # fig.show() string = fig.get_string() assert (string == """\ Cats [0.3] ████████████████████ Dogs [0.4] ██████████████████████████▋ Cows [0.6] ████████████████████████████████████████ Geese [0.2] █████████████▍\ """) return
def test_barh(): fig = tpl.figure() fig.barh([3, 10, 5, 2], ["Cats", "Dogs", "Cows", "Geese"]) # fig.show() string = fig.get_string() assert (string == """\ Cats [ 3] ████████████ Dogs [10] ████████████████████████████████████████ Cows [ 5] ████████████████████ Geese [ 2] ████████\ """) return
def print_histogram(self, values): counts, bin_edges = np.histogram(values, bins="doane") fig = tpl.figure() labels = [ "[{:#.6g} - {:#.6g})".format(bin_edges[k], bin_edges[k + 1]) for k in range(len(bin_edges) - 2) ] labels.append( "[{:#.6g} - {:#.6g}]".format( bin_edges[len(bin_edges) - 2], bin_edges[len(bin_edges) - 1] ) ) fig.barh(counts, labels=labels) fig.show()
def test_barh_ascii(): fig = tpl.figure() fig.barh([3, 10, 5, 2], ["Cats", "Dogs", "Cows", "Geese"], force_ascii=True) # fig.show() string = fig.get_string() assert (string == """\ Cats [ 3] ************ Dogs [10] **************************************** Cows [ 5] ******************** Geese [ 2] ********\ """) return
def main(type, *args, **kwargs): global v1 try: config.load_kube_config() version = client.VersionApi().get_code() logging.info(f"Connected to {Configuration._default.host} - {version.git_version}") except Exception as e: logging.error(f"Kubernetes version check failed: {e}") sys.exit(1) res = client.ApiClient().call_api('/metrics', 'GET', _return_http_data_only=True, _preload_content=False) operations = {} prev_value = 0 for line in res.readlines(): match = re.search(r'(?P<metric>.+){(?P<labels>.+)} (?P<value>\d+)', decode(line)) if match: labels = {} metric = match.group('metric') value = int(match.group('value')) if not metric.startswith(METRIC): continue for part in match.group('labels').split(','): k, v = part.split('=') labels[k] = v.strip('"') if not labels.get('type', '').endswith(type): continue if labels['operation'] not in operations: operations[labels['operation']] = {'counts': [], 'buckets': [], 'type': labels['type']} prev_value = 0 if metric.endswith('_bucket'): operations[labels['operation']]['counts'].append(value - prev_value) operations[labels['operation']]['buckets'].append(labels['le']) prev_value = value elif metric.endswith('_sum'): operations[labels['operation']]['sum'] = value elif metric.endswith('_count'): operations[labels['operation']]['count'] = value for operation, stats in operations.items(): print(f"\n{stats['sum'] / stats['count']:.3f} average etcd request duration (seconds): {operation} {stats['type']}") fig = tpl.figure() fig.barh(stats['counts'], stats['buckets'], max_width=50) fig.show()