def graph(metric, agent, tag, resolution, period): """graphs things""" try: if not agent and not tag: click.echo('Specify an agent or tag to get the metrics') sys.exit(1) if agent: agent_id = None agent_details = agents_api.get_agents(**context.settings) for a in agent_details: if a['name'] == agent: agent_id = a['id'] for s in series_api.get_agent_series(agent_id=agent_id, metric=metric, resolution=resolution, period=period, **context.settings): points = [] for point in s['points']: if point['type'] == 'boolean': if point['all']: points.append(0) else: points.append(2) else: points.append(point['avg']) print "Min: %d Max: %d Avg: %d %s " % ( min(points), max(points), sum(points) / len(points), sparkline.sparkify(points).encode('utf-8')) if tag: for s in series_api.get_tag_series(tag=tag, metric=metric, resolution=resolution, period=period, **context.settings): points = [] click.echo(click.style(s['source']['name'], fg='green')) for point in s['points']: if point['type'] == 'boolean': if point['all']: points.append(0) else: points.append(2) else: points.append(point['avg']) print "Min: %d Max: %d Avg: %d %s " % ( min(points), max(points), sum(points) / len(points), sparkline.sparkify(points).encode('utf-8')) except Exception, e: print 'Graph failed. %s' % e sys.exit(1)
def _print_conn_metrics( conn, metrics, height=1, prefix="", auto_yaxis=True, file=sys.stdout ): colors = [Fore.RED, Fore.CYAN, Fore.CYAN] for name, get_values in metrics.items(): values = [value for timestamp, value in get_values()] if name == "ConnectionState": max_yaxis = 1 elif auto_yaxis: max_yaxis = None else: max_yaxis = bps(conn["bandwidth"]) if height == 1: chart = sparkify(values, minimum=0, maximum=max_yaxis) print(prefix, end=" ", file=file) elif height > 1: opts = { "minimum": 0, "height": height, "format": "{:14,.0f} ", } # The asciichartpy module is a bit wonky and not really idiomatic # python as they just pass a giant dictionary of options (typical of # javascript land). Do not provide a "maximum" key with a value of # None. If use wants auto-scaling, don't provide the key at all. if max_yaxis is not None: opts["maximum"] = max_yaxis chart = plot(values, opts) print(_format_metric(name, chart, color=colors.pop()), file=file) print(file=file)
def refresh(self, data): self.win.border(0) self.win.addstr(1, 1, 'FR {:<8.2f} []'.format(data[0])) self.win.addstr(2, 1, 'FF {:<8.2f} []'.format(data[1])) self.win.addstr(3, 1, 'FF {:<8.2f} []'.format(data[2])) # self.win.addstr(4, 1, 'FL {:<8.2f} []'.format(data[3])) self.win.addstr(4, 1, 'Battery {}'.format(sparkify([1.0, 2.0, 3.0, 2.5, 0.2, 1.0, 2.0, 3.0, 2.5, 0.2]).encode('utf-8'))) self.win.refresh()
def vmStates_update(self): #this method performs the API call 'listVirtualMachines' # Delete all current content self.vmStatesText.delete(1.0, END) request = {} checkTime = datetime.datetime.utcnow( ) # get the current time (UTC = GMT) try: result = self.api.listVirtualMachines(request) testdict = result['virtualmachine'][ 0] #this should throw exception if dictionary lookup fails except: # something wrong with the API connection, show error message and exit self.vmStatesText.insert( 'end', "*** Error: VM data not returned by API ***") return # Only data for Running VM will be added to cpuData timenow = datetime.datetime.now() self.cpuData.append([[vm['name'], [timenow, self.get_cpuused(vm)]] for vm in result['virtualmachine'] if vm['state'] == 'Running']) vm_names = map(lambda x: x[0], self.cpuData[0]) #create a list of VM names data = sum(self.cpuData, []) # flattens data by removing some of the list brackets self.vmStatesText.insert( 'end', "%d VMs in the account '%s'\nchecked at %s" % (result['count'], result['virtualmachine'][1]['account'], checkTime.strftime("%Y-%m-%d %H:%M:%S UTC"))) vmcounter = 0 for vm in result['virtualmachine']: vmcounter = vmcounter + 1 if vm['state'] == 'Running': data_per_vm = [d[1] for d in data if d[0] == vm['name']] self.vmStatesText.insert( 'end', "\n [%2d] %s |%s|" % (vmcounter, vm['name'], sparkline.sparkify([d[1] for d in data_per_vm])), ('stateRunning')) elif vm['state'] == 'Stopped': self.vmStatesText.insert( 'end', "\n [%2d] %s (%s)" % (vmcounter, vm['name'], vm['state']), ('stateStopped')) else: self.vmStatesText.insert( 'end', "\n [%2d] %s (%s)" % (vmcounter, vm['name'], vm['state']), ('stateOther')) #set callback to this method after plot_interval seconds self.vmStatesText.after(self.plot_interval * 1000, self.vmStates_update)
def print_results(): try: highest_mem_usage = next(s for s in sorted(mem_usage, key=lambda x: x['mem_usage'], reverse=True)) except StopIteration: highest_mem_usage = {"pid": os.getpid(), "timestamp": 0, "mem_usage": 0} graph = sparkline.sparkify([m['mem_usage'] for m in mem_usage]).encode("utf-8") print("PID: {pid} Highest memory usage: {mem_usage}MB. Usage over time: {sparkline}".format(sparkline=graph, **highest_mem_usage))
def print_sparkline(r, g, b): # prints a sparkline, a small graph which shows the color levels of R, G, and B spark = sparkline.sparkify([r, g, b])#.encode('utf-8') r = round(r/(255/5.)) g = round(g/(255/5.)) b = round(b/(255/5.)) color_code = 16 + (r * 36) + (g * 6) + b shape = u'\u25CF' print colored(spark[0], 'red'), \ colored(spark[1], 'green'), \ colored(spark[2], 'blue'), \ '\x1b[38;5;%dm' % color_code, shape
help="text | json (default: text)", default="text") opts, args = opt_parser.parse_args() profiler = Profiler({"extended": True, "blocks": ["all"]}) profiler.gettweets(opts, args) data = profiler.report() if (opts.output == "json"): print data else: print "Count: " + "{:>9}".format(str(data["count"])) print "Users: " + "{:>9}".format(str(data["usercount"])) print "User percentiles: " + sparkline.sparkify(data["userspercentiles"]) print " " + str(data["userspercentiles"]) print "Has hashtag: " + "{:>9}".format(str( data["hashtagcount"])) + " (" + str("%.2f" % ( float(data["hashtagcount"]) / float(data["count"]) * 100.0)) + "%)" print "Hashtags: " + "{:>9}".format(str(data["hashtags"])) print "Hashtags percentiles: " + sparkline.sparkify( data["hashtagspercentiles"]) print " " + str(data["hashtagspercentiles"]) print "Has URL: " + "{:>9}".format(str( data["urlcount"])) + " (" + str( "%.2f" % (float(data["urlcount"]) / float(data["count"]) * 100.0)) + "%)" print "URLs: " + "{:>9}".format(str(data["urls"])) print "URLs percentiles: " + sparkline.sparkify(data["urlspercentiles"]) print " " + str(data["urlspercentiles"])
def sparkify(difficulty): return sparkline.sparkify(difficulty)
def graph(metric, agent, tag, resolution, period): """graphs things""" try: if not agent and not tag: click.echo('Specify an agent or tag to get the metrics') sys.exit(1) if agent: agent_id = None agent_details = agents_api.get_agents(**context.settings) for a in agent_details: if a['name'] == agent: agent_id = a['id'] for s in series_api.get_agent_series(agent_id=agent_id, metric=metric, resolution=resolution, period=period, **context.settings): points = [] for point in s['points']: if point['type'] == 'boolean': if point['all']: points.append(0) else: points.append(2) else: points.append(point['avg']) print "Min: %d Max: %d Avg: %d %s " % (min(points), max(points), sum(points)/len(points), sparkline.sparkify(points).encode('utf-8')) if tag: for s in series_api.get_tag_series(tag=tag, metric=metric, resolution=resolution, period=period, **context.settings): points = [] click.echo(click.style(s['source']['name'], fg='green')) for point in s['points']: if point['type'] == 'boolean': if point['all']: points.append(0) else: points.append(2) else: points.append(point['avg']) print "Min: %d Max: %d Avg: %d %s " % (min(points), max(points), sum(points)/len(points), sparkline.sparkify(points).encode('utf-8')) except Exception, e: print 'Graph failed. %s' % e sys.exit(1)
except ImportError: print('install https://github.com/RedKrieg/pysparklines') def display_sentence(sentence): words = sentence.split() if len(words) <= 20: return sentence fmt = '{} […] {}' return fmt.format(' '.join(words[:10]), ' '.join(words[-10:])) if __name__ == '__main__': # pylint: disable=C0103 data = [] for line in fileinput.input(): sentence = line.strip() data.append((sentence, len(sentence.split()))) data.sort(key=itemgetter(1), reverse=True) separated = False for s, l in data: if l < 25 and not separated: print(78*'-') separated = True print('{}{}'.format(str(l).ljust(5), display_sentence(s))) lengths = [_[1] for _ in data] if sparkline: print(sparkline.sparkify(lengths)) import numpy as np print('average: {:.1f}, std: {:.1f}'.format(np.mean(lengths), np.std(lengths)))
def printInfo(self): sensors = self.sensors['create'] imu = self.sensors['imu'] if sensors is None or imu is None: print('No valid sensor info') return a, m, g = imu now = time.time() dt = self.last_time - now beta = 0.5 q = self.ahrs.updateAGM(a, m, g, beta, dt) r, p, y = quat2euler(q) # self.data['r'].push(r) # self.data['p'].push(p) self.data['y'].push(y) self.last_time = now ir = [] for i in [36, 37, 38, 39, 40, 41]: ir.append(sensors[i]) cliff = [] for i in [20, 21, 22, 23]: cliff.append(sensors[i]) po = [ '--------------------------------------------------------', ' Light Bumper: {:6} {:6} {:6} L| {:6} |R {:6} {:6} {:6}'.format( sensors.light_bumper_left, sensors.light_bumper_front_left, sensors.light_bumper_center_left, sparkline.sparkify(ir).encode('utf-8'), # '', sensors.light_bumper_center_right, sensors.light_bumper_front_right, sensors.light_bumper_right), ' Cliff: {:6} {:6} L| {:4} |R {:6} {:6}'.format( sensors.cliff_left_signal, sensors.cliff_front_left_signal, sparkline.sparkify(cliff).encode('utf-8'), # '', sensors.cliff_front_right_signal, sensors.cliff_right_signal), ' Encoders: {:7} L|R {:7}'.format(sensors.encoder_counts_left, sensors.encoder_counts_right), ' Distance Delta: {:8} mm Total: {:10.1f} m'.format( sensors.distance, self.distance), # ' Yaw: {:8.1f} {:30} degrees'.format(self.data['y'].get_last(), self.data['y'].spark()), '--------------------------------------------------------', ' Power: {:6} mAhr [{:3} %]'.format( sensors.battery_charge, int(100.0 * sensors.battery_charge / sensors.battery_capacity)), ' Voltage: {:7.1f} V Current: {:7.1f} A'.format( sensors.voltage / 1000, sensors.current / 1000) ] for s in po: print(s)
def spark(self): data = self.get_all() return sparkline.sparkify(data).encode('utf-8')
if self.index > 0: ret = self._data[self.index:self.size] + self._data[0:self.index] else: ret = self._data return ret def get_last(self): return self._data[self.index - 1] def get_first(self): return self._data[self.index] def spark(self): data = self.get_all() return sparkline.sparkify(data).encode('utf-8') if __name__ == "__main__": cb = CircularBuffer(60) for i in range(200): cb.push(i) print(cb.get_all()) print('get cb[7]', cb[7]) print('get cb[0]', cb[0]) print('get last', cb.get_last()) print('ine', cb.get_last(), sparkline.sparkify(cb.get_all()).encode('utf-8')) print(cb.get_first(), cb.spark(), cb.get_last())
print("install https://github.com/RedKrieg/pysparklines") def display_sentence(sentence): words = sentence.split() if len(words) <= 20: return sentence fmt = "{} […] {}" return fmt.format(" ".join(words[:10]), " ".join(words[-10:])) if __name__ == "__main__": # pylint: disable=C0103 data = [] for line in fileinput.input(): sentence = line.strip() data.append((sentence, len(sentence.split()))) data.sort(key=itemgetter(1), reverse=True) separated = False for s, l in data: if l < 25 and not separated: print(78 * "-") separated = True print("{}{}".format(str(l).ljust(5), display_sentence(s))) lengths = [_[1] for _ in data] if sparkline: print(sparkline.sparkify(lengths)) import numpy as np print("average: {:.1f}, std: {:.1f}".format(np.mean(lengths), np.std(lengths)))