def main(): raw_data = pd.read_csv('../../Data/starbucks_drinkMenu_expanded.csv') beverage = dict() #making dict for {beverage:[vlues]} for i in range( 179, 213): #chage Number to index (see index in Data/beverage-index) if raw_data['Beverage'][i] not in beverage: beverage[raw_data['Beverage'][i]] = [raw_data[' Sugars (g)'][i]] else: beverage[raw_data['Beverage'][i]].append( raw_data[' Sugars (g)'][i]) #find sugar avg sugar_avg = 0 for i in beverage: sugar_avg += sum(beverage[i]) sugar_avg /= 34 #graph making zone graph = pg.SolidGauge(show_legend=True, half_pie=True, inner_radius=0.70, style=custom_style, legend_at_bottom=True) graph.title = raw_data['Beverage_category'][ 183] #change name to index number between 1st - last index of beverage percent_formatter = lambda x: '{:.4g}%'.format(x) graph.value_formatter = percent_formatter for i in beverage: graph.add(i, (sum(beverage[i]) / len(beverage[i])) / sugar_avg * 100) graph.render_to_file('../../docs/graph/Frappuccino® Blended Coffee.svg')
def dashboard(): conn = psycopg2.connect(dbname='safenetworking', user='******', host='127.0.0.1', password='******') cur = conn.cursor() #Top Severity Distribution cur.execute(''' SELECT "Severity", count(*) as count from sn1dnseventsraw group by "Severity" ''') rows = cur.fetchall() print(rows) chart = pygal.Pie() pie_chart = pygal.Pie(width=300,height=300,truncate_legend=-1) pie_chart.title = 'Alert Severity Distribution' for row in rows: print (row) pie_chart.add('%s: %s' % (row[0], row[1]), [{'value': row[1], 'label': row[0]}]) chart = pie_chart.render(is_unicode=True) #Top 5 Domain Report cur.execute(''' SELECT "Threat/Content Name", count(*) as count from sn1dnsthreatnameraw where "Threat/Content Name" != '' and "Threat/Content Name" != '""' group by "Threat/Content Name" order by count desc limit 10 ''') rows1 = cur.fetchall() print(rows1) chart1 = pygal.HorizontalBar(rounded_bars=2) bar_chart = pygal.HorizontalBar(width=300,height=300,truncate_legend=-1) bar_chart.title = 'Top 10 Domains' # (in %)' for row1 in rows1: print (rows1) bar_chart.add('%s: %s' % (row1[0], row1[1]), [{'value': row1[1], 'label': row1[0]}]) chart1 = bar_chart.render(is_unicode=True) #Top Threat Category cur.execute(''' SELECT "Threat/Content Type", count(*) as count from sn1dnseventsraw group by "Threat/Content Type" order by count desc limit 4 ''') rows2 = cur.fetchall() chart2 = pygal.HorizontalBar(rounded_bars=20) bar_chart = pygal.HorizontalBar(width=300,height=300,truncate_legend=-1) bar_chart.title = 'Top Threat Categories' # (in %)' for row2 in rows2: print (rows2) bar_chart.add('%s: %s' % (row2[0], row2[1]), [{'value': row2[1], 'label':str(row2[0])}]) chart2 = bar_chart.render(is_unicode=True) #Infected Source IP cur.execute(''' SELECT count(distinct "Source address") as count from sn1dnseventsraw ''') rows12 = cur.fetchall() print(int(rows12[0][0])) infected = int(rows12[0][0]) print (infected) for row12 in rows12: print (str(rows12)) chart12 = pygal.SolidGauge(half_pie=True, inner_radius=0.80, human_readable = True, style=pygal.style.styles['default'](value_font_size=25,value_label_font_size=25,title_font_size=25,label_font_size=25,legend_font_size=25)) percent_formatter = lambda x: '{:.30g} Source IPs'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) chart12.value_formatter = percent_formatter chart12.add('Possible Infections', [{'value': infected, 'color': 'red','max_value': 1000000}]) chart12 = chart12.render(is_unicode=True) #Top Source IP cur.execute(''' SELECT "Source address", count(*) as count from sn1dnseventsraw group by "Source address" order by count desc limit 10 ''') rows5 = cur.fetchall() chart5 = pygal.HorizontalBar(rounded_bars=2) bar_chart = pygal.HorizontalBar(width=300,height=300,truncate_legend=-1) bar_chart.title = 'Top 10 Source IP Addresses' # (in %)' for row5 in rows5: print (rows5) bar_chart.add('%s: %s' % (row5[0], row5[1]), [{'value': row5[1], 'label': row5[0]}]) chart5 = bar_chart.render(is_unicode=True) #Top Dest IP cur.execute(''' SELECT "Destination address", count(*) as count from sn1dnseventsraw group by "Destination address" order by count desc limit 10 ''') rows6 = cur.fetchall() chart6 = pygal.HorizontalBar(rounded_bars=2) bar_chart = pygal.HorizontalBar(width=300,height=300,truncate_legend=-1) bar_chart.title = 'Top 10 Destination Addresses' # (in %)' for row6 in rows6: print (rows6) bar_chart.add('%s: %s' % (row6[0], row6[1]), [{'value': row6[1], 'label': row6[0]}]) chart6 = bar_chart.render(is_unicode=True) #Top Malware Families cur.execute(''' SELECT "tags", count(*) as count from connectionreport2 where "tags" != '' group by "tags" order by count desc limit 10 ''') rows3 = cur.fetchall() chart3 = pygal.HorizontalBar(rounded_bars=20, style=CleanStyle) bar_chart = pygal.HorizontalBar(width=300,height=300,truncate_legend=-1) bar_chart.title = 'Top Malware Families' # (in %)' for row3 in rows3: print (rows3) bar_chart.add('%s: %s' % (row3[0], row3[1]), [{'value': row3[1], 'label': row3[0]}]) chart3 = bar_chart.render(is_unicode=True) #Total Domains cur.execute(''' SELECT "domain", count(*) as count from snuniquedomains group by "domain" order by count desc limit 10''') rows7 = cur.fetchall() chart7 = pygal.HorizontalBar(rounded_bars=20, style=CleanStyle) bar_chart = pygal.HorizontalBar(width=300,height=300,truncate_legend=-1) bar_chart.title = 'Domain Count' # (in %)' for row7 in rows7: print (rows7) bar_chart.add('%s: %s' % (row7[0], row7[1]), [{'value': row7[1], 'label': row7[0]}]) chart7 = bar_chart.render(is_unicode=True) return render_template('dashboard.html', chart=chart, chart2=chart2, chart3=chart3, chart1=chart1, chart5=chart5, chart6=chart6, chart7=chart7, chart12=chart12)
def charts(): """ Recipe ingredients statistics by cuisine """ dot_chart = pygal.Dot(x_label_rotation=30, print_values=False, show_legend=False, style=pygal.style.styles['default']( value_font_size=30, title_font_size=30, legend_font_size=30, dots_size=3000, background='transparent', tooltip_font_size=30, label_font_size=22)) dot_chart.title = 'Recipe Ingredients Statistics by Cuisine' dot_chart.y_title = 'Recipes by cuisine' dot_chart.x_labels = [ 'milk', 'egg', 'sugar', 'flour', 'salt', 'water', 'garlic', 'vanilla', 'butter' ] dot_chart.y_labels = [ 'French - 4', 'Mexican - 2', 'Greek - 2', 'English - 2', 'Asian - 4', 'Indian - 3', 'Irish - 2', 'Italian - 5' ] dot_chart.add('French', French_val) dot_chart.add('Mexican', Mexican_val) dot_chart.add('Greek', Greek_val) dot_chart.add('English', English_val) dot_chart.add('Asian', Asian_val) dot_chart.add('Indian', Indian_val) dot_chart.add('Irish', Irish_val) dot_chart.add('Italian', Italian_val) dot_chart = dot_chart.render_data_uri() """ Recipe allergens statistics (in %) """ solid_gauge_chart = pygal.SolidGauge(inner_radius=0.70, style=pygal.style.styles['default']( value_font_size=25, title_font_size=30, legend_font_size=30, background='transparent', tooltip_font_size=30)) solid_gauge_chart.title = 'Recipe Allergens Statistics (in %)' percent_formatter = lambda x: '{:.10g}%'.format(x) solid_gauge_chart.value_formatter = percent_formatter solid_gauge_chart.add('Egg', [{'value': 37.5, 'max_value': 100}]) solid_gauge_chart.add('Milk', [{'value': 8.33, 'max_value': 100}]) solid_gauge_chart.add('Nuts', [{'value': 4.16, 'max_value': 100}]) solid_gauge_chart.add('Garlic', [{'value': 41.66, 'max_value': 100}]) solid_gauge_chart.add('No allergens', [{'value': 25, 'max_value': 100}]) solid_gauge_chart = solid_gauge_chart.render_data_uri() """ Average calories by cuisine """ gauge_chart = pygal.Gauge(human_readable=True, style=pygal.style.styles['default']( value_font_size=30, title_font_size=30, legend_font_size=30, background='transparent', tooltip_font_size=30, label_font_size=25)) gauge_chart.title = 'Average calories by cuisine' gauge_chart.range = [0, 1000] gauge_chart.add('French', 393.5) gauge_chart.add('Mexican', 296) gauge_chart.add('Greek', 599) gauge_chart.add('English', 476) gauge_chart.add('Asian', 292) gauge_chart.add('Indian', 204.66) gauge_chart.add('Irish', 413.5) gauge_chart.add('All', 344.91) gauge_chart = gauge_chart.render_data_uri() return render_template('statistics.html', dot_chart=dot_chart, solid_gauge_chart=solid_gauge_chart, gauge_chart=gauge_chart)
def main(): gauge_chart = pygal.Gauge(human_readable=True) main_kwh_constant = interpret_csv("Main (kWh)") while main_kwh_constant == '' or main_kwh_constant is None: main_kwh_constant = 0 main_kwh_constant = int(main_kwh_constant) gym_kwh_constant = interpret_csv("DG (kWh)") while gym_kwh_constant == '' or gym_kwh_constant is None: gym_kwh_constant = 0 gym_kwh_constant = int(gym_kwh_constant) kitchen_kwh_constant = interpret_csv("DE (kWh)") while kitchen_kwh_constant == '' or kitchen_kwh_constant is None: kitchen_kwh_constant = 0 kitchen_kwh_constant = int(kitchen_kwh_constant) collins_center_kwh_constant = interpret_csv("AMDP (kWh)") while collins_center_kwh_constant == '' or collins_center_kwh_constant is None: collins_center_kwh_constant = 0 collins_center_kwh_constant = int(collins_center_kwh_constant) six = False seven = False eight = False nine = False ten = False eleven = False twelve = False one = False two = False three = False gauge_chart.title = 'Electricity used hourly in kWh all of ahs' gauge_chart.range = [0, 7000] try: global six_am_kwh, seven_am_kwh, eight_am_kwh, nine_am_kwh, ten_am_kwh, eleven_am_kwh, twelve_pm_kwh, one_pm_kwh, two_pm_kwh, three_pm_kwh percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) kW_formatter = lambda x: '{:.10g}kW'.format(x) kWh_formatter = lambda x: '{:.10g}kWh'.format(x) while True: currentDT = datetime.datetime.now() currentDT2 = datetime.date.today() main_kw = interpret_csv("Main (kW)") if main_kw == '' or main_kw is None: main_kw = 0 main_kw = int(main_kw) main_kwh = interpret_csv("Main (kWh)") if main_kwh == '' or main_kwh is None: main_kwh = 0 main_kwh = int(main_kwh) main_kwh = main_kwh - main_kwh_constant gym_kw = interpret_csv("DG (kW)") if gym_kw == '' or gym_kw is None: gym_kw = 0 gym_kw = int(gym_kw) gym_kwh = interpret_csv("DG (kWh)") if gym_kwh == '' or gym_kwh is None: gym_kwh = 0 gym_kwh = int(gym_kwh) gym_kwh = gym_kwh - gym_kwh_constant kitchen_kw = interpret_csv("DE (kW)") if kitchen_kw == '' or kitchen_kw is None: kitchen_kw = 0 kitchen_kw = int(kitchen_kw) kitchen_kwh = interpret_csv("DE (kWh)") if kitchen_kwh == '' or kitchen_kwh is None: kitchen_kwh = 0 kitchen_kwh = int(kitchen_kwh) kitchen_kwh = kitchen_kwh - kitchen_kwh_constant collins_center_kw = interpret_csv("AMDP (kW)") if collins_center_kw == '' or collins_center_kw is None: collins_center_kw = 0 collins_center_kw = int(collins_center_kw) collins_center_kwh = interpret_csv("AMDP (kWh)") if collins_center_kwh == '' or collins_center_kwh is None: collins_center_kwh = 0 collins_center_kwh = int(collins_center_kwh) collins_center_kwh = collins_center_kwh - collins_center_kwh_constant kW = pygal.SolidGauge( half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) kW.add('AHS MAIN (AKA: All of AHS)', [{'value': main_kw, 'max_value': 750}], formatter=kW_formatter) kW.add('AHS GYM', [{'value': gym_kw, 'max_value': 200}], formatter=kW_formatter) kW.add('AHS COLLINS CENTER', [{'value': collins_center_kw, 'max_value': 250}], formatter=kW_formatter) kW.add('AHS KITCHEN', [{'value': kitchen_kw, 'max_value': 150}], formatter=kW_formatter) kW.render_to_file("static/svg/kw.svg") kWh = pygal.SolidGauge(half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) kWh.add('AHS MAIN (AKA: All of AHS)', [{'value': main_kwh, 'max_value': 50}], formatter=kWh_formatter) kWh.add('AHS GYM', [{'value': gym_kwh, 'max_value': 20}], formatter=kWh_formatter) kWh.add('AHS COLLINS CENTER', [{'value': collins_center_kwh, 'max_value': 20}], formatter=kWh_formatter) kWh.add('AHS KITCHEN', [{'value': kitchen_kwh, 'max_value': 20}], formatter=kWh_formatter) kWh.render_to_file("static/svg/kwh.svg") dollar = pygal.SolidGauge(half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) dollar.add('AHS MAIN (AKA: All of AHS)', [{'value': int(main_kwh * 0.12), 'max_value': int(0.12 * 50)}], formatter=dollar_formatter) dollar.add('AHS GYM', [{'value': int(gym_kwh * 0.12), 'max_value': int(0.12 * 20)}], formatter=dollar_formatter) dollar.add('AHS COLLINS CENTER', [{'value': int(0.12 * collins_center_kwh), 'max_value': int(0.12 * 20)}], formatter=dollar_formatter) dollar.add('AHS KITCHEN', [{'value': int(0.12 * kitchen_kwh), 'max_value': int(0.12 * 20)}], formatter=dollar_formatter) dollar.render_to_file("static/svg/dollars.svg") if int(currentDT.hour) == 23: current_day = currentDT2 + timedelta(days=1) main_kwh_constant = interpret_csv("Main (kWh)") while main_kwh_constant == '' or main_kwh_constant is None: main_kwh_constant = interpret_csv("Main (kWh)") main_kwh_constant = int(main_kwh_constant) gym_kwh_constant = interpret_csv("DG (kWh)") while gym_kwh_constant == '' or gym_kwh_constant is None: gym_kwh_constant = interpret_csv("DG (kWh)") gym_kwh_constant = int(gym_kwh_constant) kitchen_kwh_constant = interpret_csv("DE (kWh)") while kitchen_kwh_constant == '' or kitchen_kwh_constant is None: kitchen_kwh_constant = interpret_csv("DE (kWh)") kitchen_kwh_constant = int(kitchen_kwh_constant) collins_center_kwh_constant = interpret_csv("AMDP (kWh)") while collins_center_kwh_constant == '' or collins_center_kwh_constant is None: collins_center_kwh_constant = interpret_csv("AMDP (kWh)") collins_center_kwh_constant = int(collins_center_kwh_constant) six = False seven = False eight = False nine = False ten = False eleven = False twelve = False one = False two = False three = False six_am_kwh = 0 try: current_day except NameError: print("kWh hourly not ready yet") else: if currentDT2.day == current_day: if 5 <= currentDT.hour < 16: if (int(currentDT.hour) == 6) and not six: six = True gauge_chart.add('6 am', 0) six_am_kwh = interpret_csv("Main (kWh)") if six_am_kwh == '' or six_am_kwh is None: six_am_kwh = interpret_csv("Main (kWh)") six_am_kwh = int(six_am_kwh) if (int(currentDT.hour) == 7) and not seven: seven = True seven_am_kwh = interpret_csv("Main (kWh)") if seven_am_kwh == '' or seven_am_kwh is None: seven_am_kwh = interpret_csv("Main (kWh)") seven_am_kwh = int(seven_am_kwh) gauge_chart.add('7 am', seven_am_kwh - six_am_kwh) if (int(currentDT.hour) == 8) and not eight: eight = True eight_am_kwh = interpret_csv("Main (kWh)") if eight_am_kwh == '' or eight_am_kwh is None: eight_am_kwh = interpret_csv("Main (kWh)") eight_am_kwh = int(eight_am_kwh) gauge_chart.add('8 am', eight_am_kwh - six_am_kwh) if (int(currentDT.hour) == 9) and not nine: nine = True nine_am_kwh = interpret_csv("Main (kWh)") if nine_am_kwh == '' or nine_am_kwh is None: nine_am_kwh = interpret_csv("Main (kWh)") nine_am_kwh = int(nine_am_kwh) gauge_chart.add('9 am', nine_am_kwh - six_am_kwh) if (int(currentDT.hour) == 10) and not ten: ten = True ten_am_kwh = interpret_csv("Main (kWh)") if ten_am_kwh == '' or ten_am_kwh is None: ten_am_kwh = interpret_csv("Main (kWh)") ten_am_kwh = int(ten_am_kwh) gauge_chart.add('10 am', ten_am_kwh - six_am_kwh) if (int(currentDT.hour) == 11) and not eleven: eleven = True eleven_am_kwh = interpret_csv("Main (kWh)") if eleven_am_kwh == '' or eleven_am_kwh is None: eleven_am_kwh = interpret_csv("Main (kWh)") eleven_am_kwh = int(eleven_am_kwh) gauge_chart.add('11 am', eleven_am_kwh - six_am_kwh) if (int(currentDT.hour) == 12) and not twelve: twelve = True twelve_pm_kwh = interpret_csv("Main (kWh)") if twelve_pm_kwh == '' or twelve_pm_kwh is None: twelve_pm_kwh = interpret_csv("Main (kWh)") twelve_pm_kwh = int(twelve_pm_kwh) gauge_chart.add('12 pm', twelve_pm_kwh - six_am_kwh) if (int(currentDT.hour) == 13) and not one: one = True one_pm_kwh = interpret_csv("Main (kWh)") if one_pm_kwh == '' or one_pm_kwh is None: one_pm_kwh = interpret_csv("Main (kWh)") one_pm_kwh = int(one_pm_kwh) gauge_chart.add('1 pm', one_pm_kwh - six_am_kwh) if (int(currentDT.hour) == 14) and not two: two = True two_pm_kwh = interpret_csv("Main (kWh)") if two_pm_kwh == '' or two_pm_kwh is None: two_pm_kwh = interpret_csv("Main (kWh)") two_pm_kwh = int(two_pm_kwh) gauge_chart.add('2 pm', two_pm_kwh - six_am_kwh) if (int(currentDT.hour) == 15) and not three: three = True three_pm_kwh = interpret_csv("Main (kWh)") if three_pm_kwh == '' or three_pm_kwh is None: three_pm_kwh = interpret_csv("Main (kWh)") three_pm_kwh = int(three_pm_kwh) gauge_chart.add('3 pm', three_pm_kwh - six_am_kwh) gauge_chart.render_to_file("kWhHourly.svg") time.sleep(15) except KeyboardInterrupt: kW.render_to_file("static/svg/kw.svg") kWh.render_to_file("static/svg/kwh.svg") dollar.render_to_file("static/svg/dollars.svg") if 5 < currentDT.hour < 16: gauge_chart.render_to_file("static/svg/kWhHourly.svg")
def __init__(self, **kwargs): self.chart = pygal.SolidGauge(**kwargs, half_pie=True, inner_radius=0.50) self.id = kwargs.pop('id')
# Se buscan el índice de los elementos en la columna 'Estado UEM' que satisfacen la condicion de decir 'TRABAJO TERMINADO' indexNames = df_total_mes[ df_total_mes['Estado UEM'] == 'TRABAJO TERMINADO'].index # eliminar valores en índices que cumplieron la condicion en línea 28 df_total_mes.drop(indexNames , inplace=True) # contar la cantidad de filas que tienen por estado 'Pendiente' num_pendientes = np.shape(df_total_mes)[0] #calculo porcentaje porcentaje = round((num_trab - num_pendientes)/num_trab * 100,1) import pygal b_chart = pygal.SolidGauge(inner_radius=0.75) b_chart.title = "Destiny Kill/Death Ratio" b_chart.add("Trabajos Completados", porcentaje) #b_chart.add("Trabajos Terminados", num_trab-num_pendientes) #b_chart.add("Total Trabajos", num_trab) b_chart.render_in_browser() # ============================================================================= # Segundo Indicador (?) # En esta parte haremos que se solicite un mes y una unidad en particular, # para entonces, mostrar las órdenes de trabajo asociadas a dicha unidad en dicho mes. # ============================================================================= import pandas as pd import numpy as np
def make_graph(graph_style): msg = "" graph_data = "" rv = ran.randint(3, 60) try: if graph_style == "Line": graph = pygal.Line(fill=True, interpolate='cubic', style=DarkColorizedStyle) graph.title = "Timeline year soda sales in mil" graph.x_labels = ["2000", "2001", "2003", "2004", "2005"] graph.add("Pepsi", [rv, rv + 20, rv - 5, rv + 3, rv + 12]) graph.add("Coca Cola", [rv + 5, rv + 18, rv - 5, rv + 20, rv + 25]) graph.add("Sprite", [rv, rv + 6, rv - 9, rv + 5, rv + 12]) graph.add("Orange", [rv + 4, rv + 30, rv - 10, rv, rv + 8]) graph_data = graph.render_data_uri() msg = "Line" elif graph_style == "Bar": graph = pygal.Bar(fill=True, interpolate='cubic', style=DarkColorizedStyle) graph.title = "Timeline year soda sales in mil" graph.x_labels = ["2000", "2001", "2003", "2004", "2005"] graph.add("Pepsi", [rv, rv + 20, rv - 5, rv + 3, rv + 12]) graph.add("Coca Cola", [rv + 5, rv + 18, rv - 5, rv + 20, rv + 25]) graph.add("Sprite", [rv, rv + 6, rv - 9, rv + 5, rv + 12]) graph.add("Orange", [rv + 4, rv + 30, rv - 10, rv, rv + 8]) graph_data = graph.render_data_uri() msg = "Bar" elif graph_style == "Treemap": treemap = pygal.Treemap(fill=True, interpolate='cubic', style=DarkColorizedStyle) treemap.title = 'Binary TreeMap' treemap.add('A', [2, 1, 12, 4, 2, 1, 1, 3, 12, 3, 4, None, 9]) treemap.add('B', [4, 2, 5, 10, 3, 4, 2, 7, 4, -10, None, 8, 3, 1]) treemap.add('C', [3, 8, 3, 3, 5, 3, 3, 5, 4, 12]) treemap.add('D', [23, 18]) treemap.add('E', [1, 2, 1, 2, 3, 3, 1, 2, 3, 4, 3, 1, 2, 1, 1, 1, 1, 1]) treemap.add('F', [31]) treemap.add('G', [5, 9.3, 8.1, 12, 4, 3, 2]) treemap.add('H', [12, 3, 3]) graph_data = treemap.render_data_uri() msg = " Treemap" elif graph_style == "Gauge": gauge_chart = pygal.Gauge(human_readable=True, fill=True, interpolate='cubic', style=DarkColorizedStyle) gauge_chart.title = 'DeltaBlue V8 benchmark results' gauge_chart.range = [0, 10000] gauge_chart.add('Chrome', 8212) gauge_chart.add('Firefox', 8099) gauge_chart.add('Opera', 2933) gauge_chart.add('IE', 41) graph_data = gauge_chart.render_data_uri() msg = "Gauge" elif graph_style == "SolidGauge": gauge = pygal.SolidGauge(inner_radius=0.70, fill=True, interpolate='cubic', style=DarkColorizedStyle) percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) gauge.value_formatter = percent_formatter gauge.add('Series 1', [{ 'value': 225000, 'max_value': 1275000 }], formatter=dollar_formatter) gauge.add('Series 2', [{'value': 110, 'max_value': 100}]) gauge.add('Series 3', [{'value': 50}]) gauge.add('Series 4', [{ 'value': 51, 'max_value': 100 }, { 'value': 12, 'max_value': 100 }]) gauge.add('Series 5', [{'value': 79, 'max_value': 100}]) gauge.add('Series 6', 99) gauge.add('Series 7', [{'value': 100, 'max_value': 100}]) graph_data = gauge.render_data_uri() msg = "SolidGauge" elif graph_style == "XY": xy_chart = pygal.XY(fill=True, interpolate='cubic', style=DarkColorizedStyle) xy_chart.title = 'XY Cosinus' xy_chart.add('x = cos(y)', [(cos(x / 10.), x / 10.) for x in range(-50, 50, 5)]) xy_chart.add('y = cos(x)', [(x / 10., cos(x / 10.)) for x in range(-50, 50, 5)]) xy_chart.add('x = 1', [(1, -5), (1, 5)]) xy_chart.add('x = -1', [(-1, -5), (-1, 5)]) xy_chart.add('y = 1', [(-5, 1), (5, 1)]) xy_chart.add('y = -1', [(-5, -1), (5, -1)]) graph_data = xy_chart.render_data_uri() msg = "XY" elif graph_style == "Multi-series pie": pie_chart = pygal.Pie(fill=True, interpolate='cubic', style=DarkColorizedStyle) pie_chart.title = 'Browser usage by version in February 2012 (in %)' pie_chart.add('IE', [5.7, 10.2, 2.6, 1]) pie_chart.add('Firefox', [.6, 16.8, 7.4, 2.2, 1.2, 1, 1, 1.1, 4.3, 1]) pie_chart.add('Chrome', [.3, .9, 17.1, 15.3, .6, .5, 1.6]) pie_chart.add('Safari', [4.4, .1]) pie_chart.add('Opera', [.1, 1.6, .1, .5]) graph_data = pie_chart.render_data_uri() msg = "Multi-series pie" except Exception as e: msg = e t = (msg, graph_data) return t
def home(): custom_style = Style(colors=('#351508', '#404040', '#9BC850')) title = 'Screen ID analysis' bar_chart = pygal.Bar(width=400, height=400, explicit_size=True, title=title, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) for idx1 in list(range(0, len(data_time_spent))): var_x_labels.append(idx1 + 1) var_data.append(data_time_spent.Time[idx1]) # print(data_time_spent.Time[idx1]) bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_time_spent[x] for x in data_time_spent] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data # print ("==bar chart==",var_time_spent) bar_chart.add('Avg active session', var_time_spent) bar_chart.render_to_file('static/images/bar_chart.svg') ################################### title2 = 'Avg time spent on each screen' custom_style = Style(colors=('#059467', '#9BC850', '#E80080')) avg_bar_chart = pygal.Bar(width=1200, height=800, explicit_size=True, title=title2, x_label_rotation=90, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) # print ("avg time spent===",avg_time_spent) for idx1 in list(range(0, len(avg_time_spent))): var_x_labels.append(avg_time_spent.Screen_id[idx1]) var_data.append(avg_time_spent.Avg_Time[idx1]) # print(avg_time_spent.Avg_Time[idx1]) avg_bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_time_spent[x] for x in data_time_spent] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data avg_bar_chart.add('Avg time sec', var_data) avg_bar_chart.render_to_file('static/images/screen_avg_chart.svg') ################################### title2 = 'Least used screens' custom_style = Style(colors=('#1878f7', '#404040', '#E80080')) avg_bar_chart = pygal.Bar(width=1200, height=600, explicit_size=True, title=title2, x_label_rotation=90, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) for idx1 in list(range(0, len(data_tf_idf))): var_x_labels.append(data_tf_idf.GUI[idx1]) var_data.append(data_tf_idf.Priority[idx1]) # print(avg_time_spent.Avg_Time[idx1]) avg_bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_tf_idf[x] for x in data_tf_idf] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data avg_bar_chart.add('Least used screen', var_time_spent) avg_bar_chart.render_to_file('static/images/tf_idf_chart.svg') #====================Donut chart of the screens used================== print("===Dounut Chart==========") pie_chart = pygal.Pie(width=1200, height=600, explicit_size=True, inner_radius=.2) pie_chart.title = 'Time spent on each screen (in %)' var_x_labels = [] var_data = [] total_values = 0 for idx1 in list(range(0, len(avg_time_spent))): total_values = total_values + avg_time_spent.Avg_Time[idx1] # print ("Total value===",total_values) for idx1 in list(range(0, len(avg_time_spent))): prcnt = round((avg_time_spent.Avg_Time[idx1] / total_values) * 100, 2) # print ("% = ",prcnt) pie_chart.add(avg_time_spent.Screen_id[idx1], prcnt) pie_chart.render_to_file('static/images/donut.svg') #==================================Gauge chart========== gauge = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) gauge.value_formatter = percent_formatter #====scatter plot===================================== xy_chart = pygal.XY(width=1200, height=600, explicit_size=True, stroke=False) xy_chart.title = 'Scatter plot of the screen ID and time spent' # var_temp_lst = [] var_screen_id_dict = defaultdict(list) for idx in list(range(0, len(avg_time_spent))): # var_screen_id_dict[avg_time_spent.Screen_id[idx]]={} print(idx, "=>", avg_time_spent.Screen_id[idx]) for idx2 in list(range(0, len(var_full_data))): if (avg_time_spent.Screen_id[idx] == var_full_data.ScreenName[idx2] ): # print (idx2,"count=",var_full_data.ScreenName[idx2]) if (idx2 + 1 >= len(var_full_data)): continue else: var_screen_id_dict[avg_time_spent.Screen_id[idx]].append( var_full_data.ScreenName[idx2 + 1]) # print("screneid =",avg_time_spent.Screen_id[idx],"next id=",var_full_data.ScreenName[idx2+1]) print("The next screen list stored==========") print(var_screen_id_dict) # var_x_labels.append(var_full_data.ScreenName[idx1]) # var_data.append(data_tf_idf.ActiveTime[idx1]) # print ("===values==",var_data) # xy_chart.add('ScreenIds', var_data) xy_chart.add('2', [(.1, .15), (.12, .23), (.4, .3), (.6, .4), (.21, .21), (.5, .3), (.6, .8), (.7, .8)]) xy_chart.add('3', [(.05, .01), (.13, .02), (1.5, 1.7), (1.52, 1.6), (1.8, 1.63), (1.5, 1.82), (1.7, 1.23), (2.1, 2.23), (2.3, 1.98)]) xy_chart.render_to_file('static/images/scatter.svg') #====================================== my_html = create_html_page() # print ("==========================") # print (my_html) # print ("==========================") return my_html
def pdb_report(asn): pdb = PeeringDB() rdb = redis.Redis(host=config['redis_host'], port=config['redis_port'], db=config['redis_db']) total_peering = 0 total_peering_v4 = 0 total_peering_v6 = 0 total_capacity = 0 peering_org = {} peering_table = {} peering_ixlan = {} ixlan_table = {} peering_map = {} peering_map_capacity = {} date_format = '%Y-%m-%d' date_current = datetime.datetime.today() # Searching for ASN in the local db log(rdb, 'Querying Redis DB for ASN %s', (asn)) asn_info = rdb.get('as_' + str(asn)) if not asn_info: # ASN not found log(rdb, 'ASN %s not found in DB', (asn)) try: # Querying PeeringDB for the ASN log(rdb, 'Querying PeeringDB for ASN %s', (asn)) pdb_resp = pdb.all('net', asn=asn, depth=2) if not pdb_resp: return render_template( 'error.html', error='No entry in PeeringDB for ASN ' + str(asn), message='That AS operator has not published details yet :(' ) else: asn_info = pdb_resp[0] except: return render_template( 'error.html', error='No entry in PeeringDB for ASN ' + str(asn), message='That AS operator has not published details yet :(') else: rdb.set('as_' + str(asn), json.dumps(asn_info), ex=604800) else: # ASN is cached log(rdb, 'ASN %s is cached', (asn)) asn_info = json.loads(asn_info.decode()) log(rdb, '%s - ASN %s', (asn_info['name'], asn_info['asn'])) # Total number of peering points total_peering = len(asn_info['netixlan_set']) # Walking through all IXLANs the ASN is connected to for peering in asn_info['netixlan_set']: # Total aggregated capacity total_capacity += peering['speed'] # Counting IPv4 and IPv6 peering's if peering['ipaddr4']: total_peering_v4 += 1 if peering['ipaddr6']: total_peering_v6 += 1 # Searching for IX in the local db log(rdb, 'Querying Redis DB for IX %s', (peering['ix_id'])) ix_info = rdb.get('ix_' + str(peering['ix_id'])) if not ix_info: # IX not found log(rdb, 'IX ID %s not found in DB', (peering['ix_id'])) # Querying PeeringDB for IX log(rdb, 'Querying PeeringDB for IX ID %s', (peering['ix_id'])) ix_info = pdb.all('ix', id=peering['ix_id'], depth=2)[0] rdb.set('ix_' + str(peering['ix_id']), json.dumps(ix_info), ex=604800) else: # IX is cached log(rdb, 'IX %s is cached', (peering['ix_id'])) ix_info = json.loads(ix_info.decode()) # Generating stats for IXLAN if peering['ixlan_id'] in peering_ixlan: # Stats already generated log(rdb, 'Stats ready for IXLAN %s', (peering['ixlan_id'])) else: # Stats not found # Searching for IXLAN in the local db log(rdb, 'Querying Redis DB for IXLAN %s', (peering['ixlan_id'])) ixlan_info = rdb.get('ixlan_' + str(peering['ixlan_id'])) if not ixlan_info: # IXLAN not found log(rdb, 'IXLAN ID %s not found in DB', (peering['ixlan_id'])) # Querying PeeringDB for IXLAN log(rdb, 'Querying PeeringDB for IXLAN ID %s', (peering['ixlan_id'])) ixlan_info = pdb.all('ixlan', id=peering['ixlan_id'], depth=2)[0] rdb.set('ixlan_' + str(peering['ixlan_id']), json.dumps(ixlan_info), ex=604800) else: # IXLAN is cached log(rdb, 'IXLAN %s is cached', (peering['ixlan_id'])) ixlan_info = json.loads(ixlan_info.decode()) log(rdb, 'Generating stats for IXLAN %s', (peering['ixlan_id'])) # Counting IXLAN members' types and connected capacity peering_ixlan[peering['ixlan_id']] = {} peering_ixlan[peering['ixlan_id']]['name'] = peering['name'] peering_ixlan[peering['ixlan_id']]['peer_transit_access'] = 0 peering_ixlan[peering['ixlan_id']]['peer_content'] = 0 peering_ixlan[peering['ixlan_id']]['peer_enterprise'] = 0 peering_ixlan[peering['ixlan_id']]['peer_other'] = 0 peering_ixlan[peering['ixlan_id']]['capacity_transit_access'] = 0 peering_ixlan[peering['ixlan_id']]['capacity_content'] = 0 peering_ixlan[peering['ixlan_id']]['capacity_enterprise'] = 0 peering_ixlan[peering['ixlan_id']]['capacity_other'] = 0 # Walking through all members connected to IXP network (we need those details to see speed of each peering) isp_list = {} for isp in ixlan_info['net_set']: # Counting IXLAN members only once. PeeringDB returns members ID many time depends on number of peering links. if isp['asn'] not in isp_list: if isp['info_type'] == 'NSP': peering_ixlan[ peering['ixlan_id']]['peer_transit_access'] += 1 elif isp['info_type'] == 'Cable/DSL/ISP': peering_ixlan[ peering['ixlan_id']]['peer_transit_access'] += 1 elif isp['info_type'] == 'Content': peering_ixlan[peering['ixlan_id']]['peer_content'] += 1 elif isp['info_type'] == 'Enterprise': peering_ixlan[ peering['ixlan_id']]['peer_enterprise'] += 1 else: peering_ixlan[peering['ixlan_id']]['peer_other'] += 1 # Querying local cache for ASN log(rdb, 'Querying Redis DB for ASN %s', (isp['asn'])) isp_info = rdb.get('as_' + str(isp['asn'])) if not isp_info: # ASN not found log(rdb, 'ASN %s not found in DB', (isp['asn'])) # Querying PeeringDB for ASN log(rdb, 'Querying PeeringDB for ASN %s', (isp['asn'])) isp_info = pdb.all('net', asn=isp['asn'], depth=2)[0] rdb.set('as_' + str(isp['asn']), json.dumps(isp_info), ex=604800) else: # ASN is cached log(rdb, 'ASN %s is cached', (isp['asn'])) isp_info = json.loads(isp_info.decode()) # Accounting total capacity connected to IXLAN grouped by type of member for isp_netixlan_info in isp_info['netixlan_set']: if isp_netixlan_info['ixlan_id'] == peering[ 'ixlan_id']: if isp_info['info_type'] == 'NSP': peering_ixlan[peering['ixlan_id']][ 'capacity_transit_access'] += isp_netixlan_info[ 'speed'] elif isp_info['info_type'] == 'Cable/DSL/ISP': peering_ixlan[peering['ixlan_id']][ 'capacity_transit_access'] += isp_netixlan_info[ 'speed'] elif isp_info['info_type'] == 'Content': peering_ixlan[peering['ixlan_id']][ 'capacity_content'] += isp_netixlan_info[ 'speed'] elif isp_info['info_type'] == 'Enterprise': peering_ixlan[peering['ixlan_id']][ 'capacity_enterprise'] += isp_netixlan_info[ 'speed'] else: peering_ixlan[peering['ixlan_id']][ 'capacity_other'] += isp_netixlan_info[ 'speed'] isp_list[isp['asn']] = "" log(rdb, 'Generating stats for IXLAN %s complete', (isp_netixlan_info['ixlan_id'])) # Accounting numer of peering's and total connected capacity in each country if ix_info['country'].lower() in peering_map: peering_map[ix_info['country'].lower()] += 1 peering_map_capacity[ ix_info['country'].lower()] += peering['speed'] / 1000 else: peering_map[ix_info['country'].lower()] = 1 peering_map_capacity[ ix_info['country'].lower()] = peering['speed'] / 1000 # Couting unique IXP organizations/operators if ix_info['org_id'] in peering_org: # IXP operator already counted log(rdb, 'IXP org ID %s already counted', (ix_info['org_id'])) else: # A new IXP operator log(rdb, 'IXP org ID %s not yet counted', (ix_info['org_id'])) peering_org[ix_info['org_id']] = ix_info['name_long'] log(rdb, '%s - %s - %s - %s', (peering['name'].encode('utf8'), ix_info['name'].encode('utf8'), ix_info['org_id'], peering['speed'])) # Creating new entry for each unique IXP name if peering['name'] not in peering_table: peering_table[peering['name']] = {} # Calculating difference between current date and PeeringDB timestamps in days date_created = datetime.datetime.strptime(peering['created'][0:10], date_format) date_updated = datetime.datetime.strptime(peering['updated'][0:10], date_format) date_created_diff = abs((date_current - date_created).days) date_updated_diff = abs((date_current - date_updated).days) # peering_table[peering['name']]['name'] = peering['name'] peering_table[peering['name']][peering['id']] = {} peering_table[peering['name']][ peering['id']]['status'] = peering['status'] peering_table[peering['name']][ peering['id']]['speed'] = peering['speed'] if peering['is_rs_peer']: peering_table[peering['name']][peering['id']]['rs'] = 'Yes' else: peering_table[peering['name']][peering['id']]['rs'] = 'No' if peering['ipaddr4']: peering_table[peering['name']][ peering['id']]['ip4'] = peering['ipaddr4'] else: peering_table[peering['name']][peering['id']]['ip4'] = 'N/A' if peering['ipaddr6']: peering_table[peering['name']][ peering['id']]['ip6'] = peering['ipaddr6'] else: peering_table[peering['name']][peering['id']]['ip6'] = 'N/A' # Removing time from PeeringDB timestamps peering_table[peering['name']][ peering['id']]['created'] = peering['created'][0:10] peering_table[peering['name']][ peering['id']]['updated'] = peering['updated'][0:10] if date_created_diff <= config['days']: peering_table[peering['name']][ peering['id']]['created_warn'] = True else: peering_table[peering['name']][ peering['id']]['created_warn'] = False if date_updated_diff <= config['days']: peering_table[peering['name']][ peering['id']]['updated_warn'] = True else: peering_table[peering['name']][ peering['id']]['updated_warn'] = False ixlan_table[peering['name']] = peering['ixlan_id'] # Sort peering's table based on IXP name log(rdb, 'Sorting peering table for ASN %s', (asn)) peering_table = sorted(peering_table.items(), key=operator.itemgetter(0)) # Generating charts for IXLANs for ix in peering_ixlan: log(rdb, 'Generating network types chart for IXLAN %s', (ix)) # Calculating total number of members and total connected by members capacity total_number_of_members = peering_ixlan[ix][ 'peer_transit_access'] + peering_ixlan[ix][ 'peer_content'] + peering_ixlan[ix][ 'peer_enterprise'] + peering_ixlan[ix]['peer_other'] total_capacity_of_members = peering_ixlan[ix][ 'capacity_transit_access'] + peering_ixlan[ix][ 'capacity_content'] + peering_ixlan[ix][ 'capacity_enterprise'] + peering_ixlan[ix]['capacity_other'] # Generating chart showing number of members grouped by network type pie_chart_number_url = 'static/ixlan_' + str(ix) + '_number.svg' if not os.path.isfile(pie_chart_number_url): pie_chart_number = pygal.Bar(print_values=True) pie_chart_number.title = 'Network types at ' + peering_ixlan[ix][ 'name'] + ' [%]\n Total number of unique members: ' + str( total_number_of_members) pie_chart_number.value_formatter = percent_formatter pie_chart_number.add( 'NSP/ISP', 100 * peering_ixlan[ix]['peer_transit_access'] / total_number_of_members) pie_chart_number.add( 'Content', 100 * peering_ixlan[ix]['peer_content'] / total_number_of_members) pie_chart_number.add( 'Enterprise', 100 * peering_ixlan[ix]['peer_enterprise'] / total_number_of_members) pie_chart_number.add( 'Other', 100 * peering_ixlan[ix]['peer_other'] / total_number_of_members) pie_chart_number.render_to_file(pie_chart_number_url) # Generating chart showing total capacity grouped by network type pie_chart_capacity_url = 'static/ixlan_' + str(ix) + '_capacity.svg' if not os.path.isfile(pie_chart_capacity_url): pie_chart_capacity = pygal.Bar(print_values=True) pie_chart_capacity.value_formatter = capacity_gb_formatter pie_chart_capacity.title = 'Networt types at ' + peering_ixlan[ix][ 'name'] + ' [Gb]\n Total capacity of members: ' + capacity_gb_formatter( total_capacity_of_members / 1000) pie_chart_capacity.add( 'NSP/ISP', peering_ixlan[ix]['capacity_transit_access'] / 1000) pie_chart_capacity.add( 'Content', peering_ixlan[ix]['capacity_content'] / 1000) pie_chart_capacity.add( 'Enterprise', peering_ixlan[ix]['capacity_enterprise'] / 1000) pie_chart_capacity.add('Other', peering_ixlan[ix]['capacity_other'] / 1000) pie_chart_capacity.render_to_file(pie_chart_capacity_url) # Calculating total number of unique peering operators and total capacity in Gb & Tb total_unique_org = len(peering_org) total_capacity_gb = total_capacity / 1000 total_capacity_tb = round(float(total_capacity) / (1000 * 1000), 2) log(rdb, 'AS %s total number of peering points: %s', (asn, total_peering)) log(rdb, 'AS %s total number of unique organization peering: %s', (asn, total_unique_org)) # Generating world map with number and location of peering's log(rdb, 'Generating world map for AS %s with number of peering locations', (asn)) map_number_url = 'static/as' + asn + '_map_number.svg' if not os.path.isfile(map_number_url): map_number = pygal.maps.world.World(print_values=True) map_number.title = 'World map with number of peering locations' map_number.add('Peerings', peering_map) map_number.render_to_file(map_number_url) # Generating world map with total capacity of peering's log(rdb, 'Generating world map for AS %s with total capacity', (asn)) map_capacity_url = 'static/as' + asn + '_map_capacity.svg' if not os.path.isfile(map_capacity_url): map_capacity = pygal.maps.world.World(print_values=True) map_capacity.title = 'World map with total capacity' map_capacity.value_formatter = capacity_gb_formatter map_capacity.add('Capacity', peering_map_capacity) map_capacity.render_to_file(map_capacity_url) # Generating gauge graph with percentage of IPv4 and IPv6 peering's log( rdb, 'Generating gauge graph with percentage of IPv4 and IPv6 peering for AS %s', (asn)) gauge_v46_url = 'static/as' + asn + '_v46.svg' if not os.path.isfile(gauge_v46_url): gauge_v46 = pygal.SolidGauge(inner_radius=0.70, half_pie=True, print_values=True) gauge_v46.value_formatter = percent_formatter # Checking if accounted total capacity > 0. Some ASNs do publish any details about public peerings if total_peering > 0: gauge_v46.add('IPv4', [{ 'value': 100 * total_peering_v4 / total_peering, 'max_value': 100 }]) gauge_v46.add('IPv6', [{ 'value': 100 * total_peering_v6 / total_peering, 'max_value': 100 }]) else: gauge_v46.add('IPv4', [{'value': 0, 'max_value': 100}]) gauge_v46.add('IPv6', [{'value': 0, 'max_value': 100}]) gauge_v46.render_to_file(gauge_v46_url) # Rendering HTML template with the provided data log(rdb, 'Rendering HTML template with report for AS %s', (asn)) pdb_report = render_template( 'report.html', asn=asn, asn_name=asn_info['name'], total_peering=total_peering, total_unique_org=total_unique_org, total_capacity_gb=capacity_gb_formatter(total_capacity_gb), total_capacity_tb=capacity_tb_formatter(total_capacity_tb), peering=peering_table, ixlan=ixlan_table, days=config['days'], peering_v46="/" + gauge_v46_url, map_number="/" + map_number_url, map_capacity="/" + map_capacity_url) log(rdb, 'Report for AS %s has been generated', (asn)) return pdb_report
def pandas_to_pygal_Gauge(data, groupby1, aggregate, target_value=100, colourstyle=colour_dict["DarkGreenStyle"], decimal_places=0, value_suffix="", inner_rad=.5, half_pie=False, title="Pie Chart Title", absolute_values=True, fill=False, print_values=False, x_label_rotation=False, legend_at_bottom=False, legend_at_bottom_columns=3, agg_type="sum"): ''' Create pygal object ''' pyg = pygal.SolidGauge() ''' Transform data for pygal object ''' groupby1_distinct = sorted(data[groupby1].unique()) dict = {} for item in groupby1_distinct: dict[item] = [] total = return_aggregate_series(data[aggregate], agg_type) for name in groupby1_distinct: subset_data = data[(data[groupby1] == name)] aggregate_value = return_aggregate_series(subset_data[aggregate], agg_type) if absolute_values == True: dict[name] = aggregate_value else: dict[name] = aggregate_value / total * 100 ''' Add data to pygal object ''' zipped = sorted(zip(dict.values(), dict.keys()), reverse=True) for value_tuple in zipped: pyg.add(value_tuple[1], [{ "value": float(value_tuple[0]), "max_value": target_value }]) ''' Chart configuration options ''' pyg.config.legend_at_bottom = legend_at_bottom pyg.config.legend_at_bottom_columns = legend_at_bottom_columns pyg.config.half_pie = half_pie pyg.config.inner_radius = inner_rad pyg.config.style = colourstyle pyg.config.x_labels = groupby1_distinct pyg.config.fill = fill pyg.config.title = title pyg.config.x_label_rotation = x_label_rotation pyg.config.print_values = print_values formatter = "{:,." + str(decimal_places) + "f} " + str(value_suffix) pyg.config.formatter = lambda x: formatter.format(x) ''' Return chart object ''' return pyg
def stats(): user = '******' if 'username' in session: user = '******' + session['username'] else: return redirect(url_for('index')) # Only for registerred users #Collect data from mongo to variables dbrecipes = mongo.db.recipes published = dbrecipes.count_documents({"$and": [{"published": "publish"}]}) draft = dbrecipes.count_documents({"$and": [{"published": "draft"}]}) all_recipes = dbrecipes.count_documents({}) main_course = dbrecipes.count_documents( {"$and": [{ "published": "publish", "recipe-type": "Main course" }]}) starter = dbrecipes.count_documents( {"$and": [{ "published": "publish", "recipe-type": "Starter" }]}) dessert = dbrecipes.count_documents( {"$and": [{ "published": "publish", "recipe-type": "Desserts" }]}) juices = dbrecipes.count_documents( {"$and": [{ "published": "publish", "recipe-type": "Juices" }]}) # Draw draft vs publish chart line_chart = pygal.HorizontalBar() line_chart.title = 'Published recipes vs Draft recipes' line_chart.add('Published', published) line_chart.add('Draft', draft) line_chart.render_to_file('static/img/draft-publish.svg') # Draw recipe types % using half bars gauge = pygal.SolidGauge( half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) gauge.value_formatter = percent_formatter gauge.title = 'Percentage of published recipes by type on site' gauge.add('Published', [{ 'value': round((published / all_recipes) * 100, 2), 'max_value': 100 }]) gauge.add('Draft', [{ 'value': round((draft / all_recipes) * 100, 2), 'max_value': 100 }]) gauge.add('Main Course', [{ 'value': round((main_course / all_recipes) * 100, 2), 'max_value': 100 }]) gauge.add('Starters', [{ 'value': round((starter / all_recipes) * 100, 2), 'max_value': 100 }]) gauge.add('Desserts', [{ 'value': round((dessert / all_recipes) * 100, 2), 'max_value': 100 }]) gauge.add('Juices', [{ 'value': round((juices / all_recipes) * 100, 2), 'max_value': 100 }]) gauge.render_to_file('static/img/half.svg') return render_template("stats.html", user=user)
def hello(name=None): conn = sqlite3.connect( os.path.join(os.path.dirname(__file__), "Project_frauenloop2017")) #conn = sqlite3.connect('/Users/ograndberry/Documents/FrauenLoop/SQL/databases/Project_frauenloop2017') c = conn.cursor() #first graph movie_title = [] revenue = [] budget = [] profit = [] for row in c.execute( "Select movie_title, gross, budget,(gross-budget) as profit from movies where gross!='' and budget!='' order by profit desc limit 10;" ): movie_title.append(row[0]) revenue.append(float(row[1])) budget.append(float(row[2])) profit.append(float(row[3])) box = pygal.Bar(x_label_rotation=30) box.title = ' What are the most profitable movies?' box.x_labels = movie_title box.add('Revenue', revenue) box.add('Budget', budget) box.add('Profit', profit) box.value_formatter = lambda y: "${:,}".format(y) box.render_to_file('static/profitability.svg') #second graph year, score_c, score_b = [], [], [] for row in c.execute( "Select colorvsScoreBlack.year, colorvsScoreBlack.color, colorvsScoreBlack.Score_b, colorvsScoreColor.Color, colorvsScoreColor.Score_c from colorvsScoreBlack left join colorvsScoreColor on colorvsScoreBlack.Year=colorvsScoreColor.Year where colorvsScoreColor.Color is not null order by colorvsscoreblack.year desc limit 20;" ): year.append(float(row[0])) score_c.append(float(row[4])) score_b.append(float(row[2])) lines = pygal.Line(x_label_rotation=70) lines.title = "Does the color of a movie impact its Imdb score?" lines.x_labels = year lines.add('Color', score_c) lines.add('Black and White', score_b) lines.render_to_file('static/colorandscoreimpact.svg', format="svg") #third graph x_values = [] y_values = [] for row in c.execute( 'Select duration, movie_facebook_likes from movies where duration !="";' ): print(row) x_values.append(float(row[0])) y_values.append(float(row[1])) both = list(zip(x_values, y_values)) xy_chart = pygal.XY(stroke=False, x_title='Duration in minutes', y_title='Facebook Likes') xy_chart.title = 'What the impact of the Imdb score on the movie facebook likes?' xy_chart.add('Movies', both) xy_chart.render_to_file('static/facebookanddur.svg', format="svg") #fourth graph actor_name = [] revenue = [] for row in c.execute( "Select distinct actor_name, sum(gross) from actors left join movies on actors.movie_id=movies.id group by actor_name order by sum(gross) desc limit 10 ;" ): actor_name.append(row[0]) revenue.append(float(row[1])) deux = list(zip(actor_name, revenue)) line = pygal.HorizontalBar(x_label_rotation=30) line.title = 'What are the actors playing in movies with the higest revenue?' for r in deux: line.add(r[0], r[1]) line.value_formatter = lambda y: "${:,}".format(y) line.render_to_file('static/actorsandrevenuefrommovies.svg') #5 graph ge = [] revenue = [] for row in c.execute( "Select distinct movies_2.genres, sum(movies.gross)as mm from movies_2 left join movies on movies.id = movies_2.id group by movies_2.genres order by mm desc limit 10;" ): ge.append(row[0]) revenue.append(float(row[1])) both = list(zip(ge, revenue)) total_revenue = sum(revenue) pie_chart = pygal.Pie() pie_chart.title = 'What are the genres generating the highest revenues?' for r in both: pie_chart.add(r[0], [{ 'value': round(((r[1] / total_revenue) * 100)), 'label': str(r[1]) }]) pie_chart.value_formatter = lambda x: '%s%%' % x pie_chart.render_to_file('static/genresandrevenuefrommovie.svg') #6 gen = [] im = [] for row in c.execute( "Select distinct movies_2.genres, avg(movies.imdb_score) from movies_2 left join movies on movies.id = movies_2.id group by movies_2.genres order by avg(movies.imdb_score) desc limit 10;" ): gen.append(row[0]) im.append(float(row[1])) bol = list(zip(gen, im)) pibb = pygal.HorizontalBar() pibb.title = 'What are the genres generating the highest imdb score?' for r in bol: pibb.add(r[0], [{'value': r[1], 'label': r[0]}]) pibb.render_to_file('static/genresandscorefrommovie.svg') #7 year = [] medals_men = [] medals_women = [] for row in c.execute( "Select t1.yearmen, t1.Medals_men, t2.Medals_female from (Select distinct games.year as yearmen, count(medal) as Medals_men from unique_medals left join categories on categories.id= unique_medals.category_id left join games on games.id = categories.games_id left join countries on countries.id = games.countries_id left join athletes on unique_medals.athlete_id= athletes.id where athletes.gender='Men' group by games.year) t1 left join (Select distinct games.year as yearwomen, count(medal) as Medals_female from unique_medals left join categories on categories.id= unique_medals.category_id left join games on games.id = categories.games_id left join countries on countries.id = games.countries_id left join athletes on unique_medals.athlete_id= athletes.id where athletes.gender='Women' group by games.year) t2 on t1.yearmen = t2.yearwomen order by yearmen;" ): year.append(row[0]) medals_men.append(row[1]) medals_women.append(row[2]) line_chart = pygal.Bar(x_label_rotation=30) line_chart.title = "How did the victory by gender evolve with time? " line_chart.x_labels = year line_chart.add('Men', medals_men) line_chart.add('Women', medals_women) line_chart.render_to_file('static/yearandgenrechange.svg') #8 graph medals_summer_men = [] medals_winter_men = [] medals_summer_women = [] medals_winter_women = [] for row in c.execute( "Select distinct count(medal), unique_medals.gender, type from unique_medals left join athletes on athletes.id= unique_medals.athlete_id left join Categories on categories.ID = unique_medals.category_id left join games on games.ID = categories.games_id where country_code!= '' group by unique_medals.Gender, type order by count(medal)desc;" ): if row[1] == "Men" and row[2] == 'summer': medals_summer_men.append(float(row[0])) elif row[1] == "Men" and row[2] == 'winter': medals_winter_men.append(float(row[0])) elif row[1] == "Women" and row[2] == 'summer': medals_summer_women.append(float(row[0])) else: medals_winter_women.append(float(row[0])) total_summer = sum(medals_summer_women + medals_summer_men) total_winter = sum(medals_winter_women + medals_winter_men) gauge = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) gauge.value_formatter = percent_formatter men_win_summer = round(((medals_summer_men[0] / total_summer) * 100)) women_win_summer = round(((medals_summer_women[0] / total_summer) * 100)) men_win_winter = round(((medals_winter_men[0] / total_winter) * 100)) women_win_winter = round(((medals_winter_women[0] / total_winter) * 100)) gauge.add('Women summer', [{'value': women_win_summer, 'max_value': 100}]) gauge.add('Men summer', [{'value': men_win_summer, 'max_value': 100}]) gauge.title = "What is the Gender split by type of game?" gauge.add('Women winter', [{'value': women_win_winter, 'max_value': 100}]) gauge.add('Men winter', [{'value': men_win_winter, 'max_value': 100}]) gauge.render_to_file("static/worldglobalgendersplit.svg") #9 medals = [] sport = [] athlete = [] country = [] for row in c.execute( "Select athlete_id, category_id, medals,sport,cc, athlete from (Select athlete_id, medals,sport,athlete,cc, category_id, max(medals) as maximum from (Select athlete_id,count(medal) as medals, category_id, Sport, athletes.athlete,sumandwin.Country_code as cc from sumandwin left join athletes on sumandwin.athlete_id=athletes.id group by athletes.id, sport) group by sport) where medals = maximum order by medals desc limit 10 ;" ): medals.append(float(row[2])) sport.append(row[3]) athlete.append(row[5]) country.append(row[4]) bol = list(zip(sport, medals, athlete, country)) record = pygal.HorizontalBar() record.title = 'What are the best athletes by Sport?' for r in bol: record.add(r[0], [{'value': r[1], 'label': r[2]}]) record.render_to_file('static/bestplayyersbysport.svg') #10 #Create empty lists sport = [] country_name = [] country_code = [] medals = [] Taekwondo = [] Sailing = [] Athletics = [] Tennis = [] Wrestling = [] Skiing = [] Skating = [] Boxing = [] Volleyball = [] Weightlifting = [] Aquatics = [] Curling = [] Football = [] Rowing = [] Cycling = [] Equestrian = [] Handball = [] Shooting = [] Luge = [] Hockey = [] Canoe = [] Table_Tennis = [] Biathlon = [] Kayak = [] Fencing = [] #Append each sport least with the adequate countries from the database for row in c.execute( "select medals,sport,country,alpha_2 from (Select medals,sport,alpha_2,country, max(medals) as maximum from (Select count(medal) as medals,unique_medals.Sport, athletes.alpha_2, countries.country from unique_medals left join athletes on unique_medals.Athlete_id=athletes.id left join countries on countries.Code = athletes.Country_code group by alpha_2, sport) group by alpha_2) where medals = maximum and alpha_2 !='' and country!='' ;" ): pays = row[3].lower() medailles = float(row[0]) listo = (pays, medailles) if row[1] == 'Taekwondo': Taekwondo.append(listo) elif row[1] == 'Athletics': Athletics.append(listo) elif row[1] == 'Tennis': Tennis.append(listo) elif row[1] == 'Wrestling': Wrestling.append(listo) elif row[1] == 'Skiing': Skiing.append(listo) elif row[1] == 'Skating': Skating.append(listo) elif row[1] == 'Volleyball': Volleyball.append(listo) elif row[1] == 'Weightlifting': Weightlifting.append(listo) elif row[1] == 'Aquatics': Aquatics.append(listo) elif row[1] == 'Curling': Curling.append(listo) elif row[1] == 'Football': Football.append(listo) elif row[1] == 'Rowing': Rowing.append(listo) elif row[1] == 'Cycling': Cycling.append(listo) elif row[1] == 'Equestrian': Equestrian.append(listo) elif row[1] == 'Handball': Handball.append(listo) elif row[1] == 'Shooting': Shooting.append(listo) elif row[1] == 'Luge': Luge.append(listo) elif row[1] == 'Hockey': Hockey.append(listo) elif row[1] == 'Canoe': Canoe.append(listo) elif row[1] == 'Table Tennis': Table_Tennis.append(listo) elif row[1] == 'Biathlon': Biathlon.append(listo) elif row[1] == 'Kayak': Kayak.append(listo) elif row[1] == 'Fencing': Fencing.append(listo) elif row[1] == 'Sailing': Sailing.append(listo) elif row[1] == 'Wrestling': Wrestling.append(listo) else: sport.append(row[1]) medals.append(float(row[0])) country_code.append(row[3]) #Create a dictionnary with the countries as values and sports as keys. keys = [ 'Taekwondo', 'Sailing', 'Athletics', 'Tennis', 'Wrestling', 'Skiing', 'Skating', 'Boxing', 'Volleyball', 'Weightlifting', 'Aquatics', 'Curling', 'Football', 'Rowing', 'Cycling', 'Equestrian', 'Handball', 'Shooting', 'Luge', 'Hockey', 'Canoe', 'Table Tennis', 'Biathlon', 'Kayak', 'Fencing' ] values = [ Taekwondo, Sailing, Athletics, Tennis, Wrestling, Skiing, Skating, Boxing, Volleyball, Weightlifting, Aquatics, Curling, Football, Rowing, Cycling, Equestrian, Handball, Shooting, Luge, Hockey, Canoe, Table_Tennis, Biathlon, Kayak, Fencing ] dictionnary = dict(zip(keys, values)) #create the map custom_style = Style( colors=("#001f3f", '#FFDBE5', "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693", "#111111", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#F012BE", "#FFAA92", "#FF90C9", "#B903AA", "#D16100")) worldmap_chart = pygal.maps.world.World(style=custom_style) worldmap_chart.title = 'In which sport each country is better at?' for key, values in dictionnary.items(): worldmap_chart.add(key, values) worldmap_chart.render_to_file('static/best_country_game.svg') #11 Country = [] avg_abroad = [] avg_home = [] for row in c.execute( "Select t1.country_code, (t2.Medals_abroad/t3.games_abroad) as avg_medals_abroad,(t1.medals_all-t2.medals_abroad)/t4.games_home as avg_medals_home from(Select distinct country_code, count(medal) as Medals_all from unique_medals left join athletes on athletes.id= unique_medals.athlete_id left join categories on categories.id= unique_medals.category_id left join games on games.ID=categories.games_id left join countries on countries.id = games.countries_id group by country_Code) t1 left join (Select distinct country_code, count(medal) as Medals_abroad from unique_medals left join athletes on athletes.id= unique_medals.athlete_id left join categories on categories.id= unique_medals.category_id left join games on games.ID=categories.games_id left join countries on countries.id = games.countries_id where athletes.Country_code != countries.Code group by country_Code) t2 on t1.country_code = t2.country_code left join(Select distinct Country_code, count(distinct games_ID )as Games_abroad from sumandwin left join countries on countries.country = sumandwin.country where code!=country_code group by country_code ) t3 on t2.country_code = t3.country_code left join(Select distinct Country_code, count(distinct games_ID)as Games_home from sumandwin left join countries on countries.country = sumandwin.country where code=country_code group by country_code) t4 on t3.country_code = t4.country_code where avg_medals_home is not null order by avg_medals_home desc limit 17;" ): Country.append(row[0]) avg_abroad.append(row[1]) avg_home.append(row[2]) line_chart = pygal.StackedBar(x_title='Countries', y_title='Average Medals') line_chart.title = "Is the fact to host the games impacts a country general performance?" line_chart.x_labels = Country line_chart.add('Away Games', avg_abroad) line_chart.add('Home Games', avg_home) line_chart.render_to_file('static/hostingornot.svg') #12 country = [] men = [] women = [] for row in c.execute( "select t1.country_code, t1.Medals_men, t2.Medals_women from (Select distinct country_code, count(medal) as Medals_men from unique_medals left join athletes on athletes.id= unique_medals.athlete_id where athletes.gender = 'Men' group by country_Code order by medals_men desc) t1 left join (Select distinct country_code, count(medal) as Medals_women from unique_medals left join athletes on athletes.id= unique_medals.athlete_id where athletes.gender = 'Women' group by country_Code order by medals_women desc) t2 on t1.country_code = t2.country_code limit 30;" ): country.append(row[0]) men.append(row[1]) women.append(row[2]) radar_chart = pygal.Radar() radar_chart.title = 'What is the Men and Women victory split during the games?' radar_chart.x_labels = country radar_chart.add('Medals Men', men) radar_chart.add('Medals Women', women) radar_chart.render_to_file('static/sexeandvictory.svg') #13 Country = [] Medals_90 = [] Movies_90 = [] Medals_00 = [] Movies_00 = [] for row in c.execute( "select t1.country_code, t1.Medals, t2.Mo, t3.country_code, t3.medals,t4.movies from (Select distinct country_Code , count(medal) as Medals from unique_medals left join categories on categories.id= unique_medals.category_id left join games on games.id = categories.games_id left join countries on countries.id = games.countries_id left join athletes on unique_medals.athlete_id= athletes.id where games.year between 1990 and 1999 group by country_code) t1 left join (Select distinct code, count(movies.id) as Mo from movies left join countries on countries.country=movies.country where title_year between 1990 and 1999 group by code) t2 on t1.country_code = t2.code left join (Select distinct country_Code , count(medal) as Medals from unique_medals left join categories on categories.id= unique_medals.category_id left join games on games.id = categories.games_id left join countries on countries.id = games.countries_id left join athletes on unique_medals.athlete_id= athletes.id where games.year between 2000 and 2009 group by athletes.country_code) t3 on t2.code = t3.country_code left join (Select distinct code, count(movies.id) as Movies from movies left join countries on countries.country=movies.country where title_year between 2000 and 2009 group by code) t4 on t3.country_code = t4.code where mo is not null and movies is not null;" ): Country.append(row[0]) Medals_90.append(row[1]) Movies_90.append(row[2]) Medals_00.append(row[4]) Movies_00.append(row[5]) dot_chart = pygal.Box() dot_chart.title = 'How the 90s and 00s are different in terms of medals and movies?' dot_chart.add('Medals in 90s', Medals_90) dot_chart.add('Movies in 90s', Movies_90) dot_chart.add('Medals in 00s', Medals_00) dot_chart.add('Movies in 00s', Movies_00) dot_chart.render_to_file('static/00contre90.svg') return render_template('hello.html', name=name)
def handle_text(message): id = message.from_user.id write_diagram(pygal.SolidGauge(), id, 'Your diagrams')
def main(): """ main function for data and making charts """ """ Pulling Data from CSV file """ data = pd.read_csv("googleplaystore.csv", encoding="ISO-8859-1") #อ่านไฟล์ csv name_data = data["App"] #data ชื่อแอพพลิเคชั่น rating_data = data["Rating"] #data rating ของแต่ละแอพพลิเคชั่น category_data = data["Category"] #data หมวดหมู่ของแต่ละแอพพลิเคชั่น review_data = data["Reviews"] #data reviews ของแต่ละแอพพลิเคชั่น size_data = data["Size"] #data ขนาดของแต่ละแอพพลิเคชั่น install_data = data["Installs"] #data ยอดดาวน์โหลดของแต่ละแอพพลิเคชั่น """ Editing data into readable data """ size_data = [i.replace("M", "").replace("Varies with device", "0").replace("+", "").replace(',', '') for i in size_data] #ทำให้ data ของ Size เป็นตัวเลขทั้งหมด for i in range(len(size_data)): #ทำให้ขนาดของ data เป็นหน่วยเดียวกัน if 'k' in size_data[i]: size_data[i] = size_data[i].replace('k', "") size_data[i] = int(float(size_data[i])) size_data[i] /= 1024 size_data = [float(i) for i in size_data] install_data = [i.replace("+", "").replace(",", "") for i in install_data] #ทำให้ data ของยอดดาวน์โหลดเป็นตัวเลขทั้งหมด install_data = [int(i) for i in install_data] """ Using function and returning data in to variables """ category_review_data = category_review(category_data, review_data) category_rating_data = category_rating(category_data, rating_data) name_install_data = name_install(name_data, install_data) category_size_data = category_size(category_data, size_data) """ Making an Average reviews of each Categories chart """ category_review_chart = pg.Bar(style=CleanStyle) category_review_chart.title = "Average Reviews of each Categories" for i in category_review_data: category_review_chart.add(i, category_review_data[i]) category_review_chart.render_to_file("category_review_chart.svg") """ Making an Average ratings of each Categories chart """ category_rating_chart = pg.HorizontalBar(style=LightColorizedStyle) category_rating_chart.title = "Average Ratings of each Categories" for i in category_rating_data: category_rating_chart.add(i, category_rating_data[i]) category_rating_chart.render_to_file("category_rating_chart.svg") """ Making an Average sizes of each Categories chart """ category_size_chart = pg.Bar(style=BlueStyle) category_size_chart.title = "Average Sizes of Application of each Categories(MBs)" for i in category_size_data.keys(): category_size_chart.add(i, category_size_data[i]) category_size_chart.render_to_file("category_size_chart.svg") """ Making a Most install application chart """ name_install_chart = pg.SolidGauge(inner_radius=0.7, style=RedBlueStyle) name_install_chart.title = "The Most Installs Applications on Google Playstore" for i in name_install_data: name_install_chart.add(i, [{"value": name_install_data[i], 'max_value': 1000}]) percent_formatter = lambda x: '100%'.format(x) name_install_chart.legend_at_bottom = True name_install_chart.value_formatter = percent_formatter name_install_chart.render_to_file("name_install_chart.svg")
#using process method to aggregate and gather percentages from accidents.csv for speed zone columns result = process(data, columns_list=['SPEED_ZONE'], measure_column_name='NO_PERSONS_KILLED') for key in result.keys(): print(key) print(result[key]) speed = result['SPEED_ZONE'] #create new gauge object from pygal gauge = pygal.SolidGauge(half_pie=False, inner_radius=0.7, style=style.LightColorizedStyle) gauge.title = "VICROADS CRASH STATS FATALITIES BY SPEED ZONE" #iterate through results from process method and add to gauge as values for key in speed.keys(): if key <= 110 and key >= 0: gauge.add(str(key) + " km/h zone.", [{ 'value': speed[key] * 100, 'label': 'percentage of total fatalities.' }], formatter=lambda x: '{0:.2f}%'.format(x)) gauge.render_in_browser() gauge.render_to_file('gauge.svg')
def bar(): with open('pygaldatabar.json', 'r') as bar_file: data = json.load(bar_file) # custom_style = Style( # label_font_size = .75em )) chart = pygal.Bar( style=pygal.style.styles['default'](label_font_size=20, )) mark_list = [x['mark'] for x in data] chart.add('Annual Mark List', mark_list) chart.x_labels = [x['year'] for x in data] chart.render_to_file('static/images/bar_chart.svg') img_url = 'static/images/bar_chart.svg?cache=' + str(time.time()) with open('pygaldatamultibar.json', 'r') as multibar_file: multi_data = json.load(multibar_file) chartmutli = pygal.Bar() multi_mark_list = [y['mark'] for y in multi_data] tourn_list = [y['tournament'] for y in multi_data] chartmutli.add('Annual Mark List', multi_mark_list) chartmutli.add('Tournament Score', tourn_list) chartmutli.render_to_file('static/images/multi_bar_chart.svg') multi_img_url = 'static/images/multi_bar_chart.svg?cache=' + str( time.time()) with open('pygaldataline.json', 'r') as linechart_file: linechart_data = json.load(linechart_file) linechart = pygal.Line() linechart_list_names = [x['broswer'] for x in linechart_data] linechart_list = [y['values'] for y in linechart_data] for i in range(0, len(linechart_list_names)): linechart.add(linechart_list_names[i], linechart_list[i]) linechart.render_to_file('static/images/line_chart.svg') line_img_url = 'static/images/line_chart.svg?cache=' + str(time.time()) supra = pygal.maps.world.SupranationalWorld() supra.add('Asia', [('asia', 1)]) supra.add('Europe', [('europe', 1)]) supra.add('North america', [('north_america', 1)]) supra.add('South america', [('south_america', 1)]) supra.render_to_file('static/images/world_map.svg') worldmap_img_url = 'static/images/world_map.svg?cache=' + str(time.time()) with open('pygaldatapie.json', 'r') as piechart_file: piechart_data = json.load(piechart_file) pie_chart = pygal.Pie() piechart_list_names = [x['broswer'] for x in piechart_data] piechart_list_value = [y['value'] for y in piechart_data] pie_chart.title = 'Browser usage in February 2012 (in %)' for i in range(0, len(piechart_list_names)): pie_chart.add(piechart_list_names[i], piechart_list_value[i]) pie_chart.render_to_file('static/images/pie_chart.svg') piechart_img_url = 'static/images/pie_chart.svg?cache=' + str(time.time()) gaugechart = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) with open('pygaldatagauge.json', 'r') as gauagechart_file: gauagechart_data = json.load(gauagechart_file) gauagechart_series = [x['series'] for x in gauagechart_data] gauagechart_values = [y['value'] for y in gauagechart_data] gauagechart_max_values = [z['max_value'] for z in gauagechart_data] for i in range(0, len(gauagechart_series)): gaugechart.add(gauagechart_series[i], [{ 'value': gauagechart_values[i], 'max_value': gauagechart_max_values[i] }], formatter=dollar_formatter) gaugechart.render_to_file('static/images/gauge_chart.svg') gaugechart_img_url = 'static/images/gauge_chart.svg?cache=' + str( time.time()) return render_template('app.html', **locals())
def main(): firstTest = True gauge_chart = pygal.Gauge(human_readable=True) mainkWhConstant = myFunction("Main (kWh)") if mainkWhConstant == '' or mainkWhConstant is None: mainkWhConstant = 0 mainkWhConstant = int(mainkWhConstant) gymkWhConstant = myFunction("DG (kWh)") if gymkWhConstant == '' or gymkWhConstant is None: gymkWhConstant = 0 gymkWhConstant = int(gymkWhConstant) kitchenkWhConstant = myFunction("DE (kWh)") if kitchenkWhConstant == '' or kitchenkWhConstant is None: kitchenkWhConstant = 0 kitchenkWhConstant = int(kitchenkWhConstant) collinscenterkWhConstant = myFunction("AMDP (kWh)") if collinscenterkWhConstant == '' or collinscenterkWhConstant is None: collinscenterkWhConstant = 0 collinscenterkWhConstant = int(collinscenterkWhConstant) six = False seven = False eight = False nine = False ten = False eleven = False twelve = False one = False two = False three = False gauge_chart.title = 'Electricity used hourly in kWh all of ahs' gauge_chart.range = [0, 7000] try: percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) kW_formatter = lambda x: '{:.10g}kW'.format(x) kWh_formatter = lambda x: '{:.10g}kWh'.format(x) while True: currentDT = datetime.datetime.now() currentDT2 = datetime.date.today() mainkW = myFunction("Main (kW)") if mainkW == '' or mainkW is None: mainkW = 0 mainkW = int(mainkW) mainkWh = myFunction("Main (kWh)") if mainkWh == '' or mainkWh is None: mainkWh = 0 mainkWh = int(mainkWh) mainkWh = mainkWh - mainkWhConstant gymkW = myFunction("DG (kW)") if gymkW == '' or gymkW is None: gymkW = 0 gymkW = int(gymkW) gymkWh = myFunction("DG (kWh)") if gymkWh == '' or gymkWh is None: gymkWh = 0 gymkWh = int(gymkWh) gymkWh = gymkWh - gymkWhConstant kitchenkW = myFunction("DE (kW)") if kitchenkW == '' or kitchenkW is None: kitchenkW = 0 kitchenkW = int(kitchenkW) kitchenkWh = myFunction("DE (kWh)") if kitchenkWh == '' or kitchenkWh is None: kitchenkWh = 0 kitchenkWh = int(kitchenkWh) kitchenkWh = kitchenkWh - kitchenkWhConstant collinscenterkW = myFunction("AMDP (kW)") if collinscenterkW == '' or collinscenterkW is None: collinscenterkW = 0 collinscenterkW = int(collinscenterkW) collinscenterkWh = myFunction("AMDP (kWh)") if collinscenterkWh == '' or collinscenterkWh is None: collinscenterkWh = 0 collinscenterkWh = int(collinscenterkWh) collinscenterkWh = collinscenterkWh - collinscenterkWhConstant kW = pygal.SolidGauge( half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) kW.add('AHS MAIN aka all of AHS', [{'value': mainkW, 'max_value': 750}], formatter=kW_formatter) kW.add('AHS GYM', [{'value': gymkW, 'max_value': 200}], formatter=kW_formatter) kW.add('AHS COLLINS CENTER', [{'value': collinscenterkW, 'max_value': 250}], formatter=kW_formatter) kW.add('AHS KITCHEN', [{'value': kitchenkW, 'max_value': 150}], formatter=kW_formatter) kW.render_to_file("static/svg/kw.svg") kWh = pygal.SolidGauge(half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) kWh.add('AHS MAIN aka all of AHS', [{'value': mainkWh, 'max_value': 7500}], formatter=kWh_formatter) kWh.add('AHS GYM', [{'value': gymkWh, 'max_value': 2000}], formatter=kWh_formatter) kWh.add('AHS COLLINS CENTER', [{'value': collinscenterkWh, 'max_value': 2000}], formatter=kWh_formatter) kWh.add('AHS KITCHEN', [{'value': kitchenkWh, 'max_value': 1700}], formatter=kWh_formatter) kWh.render_to_file("static/svg/kwh.svg") dollar = pygal.SolidGauge(half_pie=True, inner_radius=0.70, style=pygal.style.styles['default'](value_font_size=10)) dollar.add('AHS MAIN aka all of AHS', [{'value': int(mainkWh * 0.12), 'max_value': int(0.12 * 7500)}], formatter=dollar_formatter) dollar.add('AHS GYM', [{'value': int(gymkWh * 0.12), 'max_value': int(0.12 * 2000)}], formatter=dollar_formatter) dollar.add('AHS COLLINS CENTER', [{'value': int(0.12 * collinscenterkWh), 'max_value': int(0.12 * 2000)}], formatter=dollar_formatter) dollar.add('AHS KITCHEN', [{'value': int(0.12 * kitchenkWh), 'max_value': int(0.12 * 1700)}], formatter=dollar_formatter) dollar.render_to_file("static/svg/dollars.svg") if currentDT.hour >= 5 and currentDT.hour < 16: if (int(currentDT.hour) == 6) and (six == False): firstTest = False; six = True gauge_chart.add('6 am', 0) sixamkWh = myFunction("Main (kWh)") if sixamkWh == '': sixamkWh = myFunction("Main (kWh)") sixamkWh = int(sixamkWh) gauge_chart.render_to_file("static/svg/kWhHourly.svg") if firstTest == False: if (int(currentDT.hour) == 7) and (seven == False): seven = True sevenamkWh = myFunction("Main (kWh)") if sevenamkWh == '': sevenamkWh = myFunction("Main (kWh)") sevenamkWh = int(sevenamkWh) gauge_chart.add('7 am', sevenamkWh - sixamkWh) if (int(currentDT.hour) == 8) and (eight == False): eight = True eightamkWh = myFunction("Main (kWh)") if eightamkWh == '': eightamkWh = myFunction("Main (kWh)") eightamkWh = int(eightamkWh) gauge_chart.add('8 am', eightamkWh - sixamkWh) if (int(currentDT.hour) == 9) and (nine == False): nine = True nineamkWh = myFunction("Main (kWh)") if nineamkWh == '': nineamkWh = myFunction("Main (kWh)") nineamkWh = int(nineamkWh) gauge_chart.add('9 am', nineamkWh - sixamkWh) if (int(currentDT.hour) == 10) and (ten == False): ten = True tenamkWh = myFunction("Main (kWh)") if tenamkWh == '': tenamkWh = myFunction("Main (kWh)") tenamkWh = int(tenamkWh) gauge_chart.add('10 am', tenamkWh - sixamkWh) if (int(currentDT.hour) == 11) and (eleven == False): eleven = True elevenamkWh = myFunction("Main (kWh)") if elevenamkWh == '': elevenamkWh = myFunction("Main (kWh)") elevenamkWh = int(elevenamkWh) gauge_chart.add('11 am', elevenamkWh - sixamkWh) if (int(currentDT.hour) == 12) and (twelve == False): twelve = True twelvepmkWh = myFunction("Main (kWh)") if twelvepmkWh == '': twelvepmkWh = myFunction("Main (kWh)") twelvepmkWh = int(twelvepmkWh) gauge_chart.add('12 pm', twelvepmkWh - sixamkWh) if (int(currentDT.hour) == 13) and (one == False): one = True onepmkWh = myFunction("Main (kWh)") if onepmkWh == '': onepmkWh = myFunction("Main (kWh)") onepmkWh = int(onepmkWh) gauge_chart.add('1 pm', onepmkWh - sixamkWh) if (int(currentDT.hour) == 14) and (two == False): two = True twopmkWh = myFunction("Main (kWh)") if twopmkWh == '': twopmkWh = myFunction("Main (kWh)") twopmkWh = int(twopmkWh) gauge_chart.add('2 pm', twopmkWh - sixamkWh) if (int(currentDT.hour) == 15) and (three == False): three = True threepmkWh = myFunction("Main (kWh)") if threepmkWh == '': threepmkWh = myFunction("Main (kWh)") threepmkWh = int(threepmkWh) gauge_chart.add('3 pm', threepmkWh - sixamkWh) firstTest = True; gauge_chart.render_to_file("static/svg/kWhHourly.svg") time.sleep(15) if (int(currentDT.hour) == 23): gauge_chart = pygal.Gauge(human_readable=True) try: open('static/svg/kWhHourly.svg', 'w').close() except: print("Exception") mainkWhConstant = myFunction("Main (kWh)") if mainkWhConstant == '': mainkWhConstant = 0 mainkWhConstant = int(mainkWhConstant) gymkWhConstant = myFunction("DG (kWh)") if gymkWhConstant == '': gymkWhConstant = 0 gymkWhConstant = int(gymkWhConstant) kitchenkWhConstant = myFunction("DE (kWh)") if kitchenkWhConstant == '': kitchenkWhConstant = 0 kitchenkWhConstant = int(kitchenkWhConstant) collinscenterkWhConstant = myFunction("AMDP (kWh)") if collinscenterkWhConstant == '': collinscenterkWhConstant = 0 collinscenterkWhConstant = int(collinscenterkWhConstant) six = False seven = False eight = False nine = False ten = False eleven = False twelve = False one = False two = False three = False sixamkWh = 0 except KeyboardInterrupt: kW.render_to_file("static/svg/kw.svg") kWh.render_to_file("static/svg/kwh.svg") dollar.render_to_file("static/svg/dollars.svg") if currentDT.hour > 5 and currentDT.hour < 16: gauge_chart.render_to_file("static/svg/kWhHourly.svg")
def __init__(self, **kwargs): self.gauge = pygal.SolidGauge(**kwargs)
def first_round_history(self): """ Analyzes the first round odds for each seed to win/lose (i.e. 1 vs 16, 2 vs 15, ...) Using Pygal SolidGuage charts because they look pretty good I guess... """ self.ts_dict = self.get_tourney_slots() self.tsr_dict = self.match_seeds() first_seed_win = 0 second_seed_win = 0 third_seed_win = 0 fourth_seed_win = 0 fifth_seed_win = 0 sixth_seed_win = 0 seventh_seed_win = 0 eighth_seed_win = 0 total_games = 128 for year1 in self.ts_dict: for slot, match_up in self.ts_dict[year1].items(): if slot[:2] == "R1": for year2 in self.tsr_dict: if year1 == year2: for winning, losing in self.tsr_dict[year2].items( ): if winning[5:] == match_up[:3]: seed = winning[6:] if seed == "01": first_seed_win += 1 elif seed == "02": second_seed_win += 1 elif seed == "03": third_seed_win += 1 elif seed == "04": fourth_seed_win += 1 elif seed == "05": fifth_seed_win += 1 elif seed == "06": sixth_seed_win += 1 elif seed == "07": seventh_seed_win += 1 elif seed == "08": eighth_seed_win += 1 #print(first_seed_win, second_seed_win, third_seed_win, fourth_seed_win, fifth_seed_win, sixth_seed_win, seventh_seed_win, eighth_seed_win, total_games) gauge = pygal.SolidGauge(inner_radius=0.70, title="NCAA First Round Results") ratio_first_seed = int(first_seed_win / total_games * 100) ratio_second_seed = int(second_seed_win / total_games * 100) ratio_third_seed = int(third_seed_win / total_games * 100) ratio_fourth_seed = int(fourth_seed_win / total_games * 100) ratio_fifth_seed = int(fifth_seed_win / total_games * 100) ratio_sixth_seed = int(sixth_seed_win / total_games * 100) ratio_seventh_seed = int(seventh_seed_win / total_games * 100) ratio_eighth_seed = int(eighth_seed_win / total_games * 100) percent_formatter = lambda x: '{:.10g}%'.format(x) gauge.value_formatter = percent_formatter gauge.add('1 vs. 16', [{'value': ratio_first_seed, 'max_value': 100}]) gauge.add('2 vs. 15', [{'value': ratio_second_seed, 'max_value': 100}]) gauge.add('3 vs. 14', [{'value': ratio_third_seed, 'max_value': 100}]) gauge.add('4 vs. 13', [{'value': ratio_fourth_seed, 'max_value': 100}]) gauge.add('5 vs. 12', [{'value': ratio_fifth_seed, 'max_value': 100}]) gauge.add('6 vs. 11', [{'value': ratio_sixth_seed, 'max_value': 100}]) gauge.add('7 vs. 10', [{ 'value': ratio_seventh_seed, 'max_value': 100 }]) gauge.add('8 vs. 9', [{'value': ratio_eighth_seed, 'max_value': 100}]) gauge.render_to_file('chart.svg')
def home(): # print ("__xlabels__",var_x_labels) #####################################Active session chart ##### custom_style = Style(colors=('#351508', '#404040', '#9BC850')) title = 'STB Uptime' bar_chart = pygal.Bar(width=400, height=400, explicit_size=True, title=title, x_label_rotation=90, style=custom_style) var_x_labels = [] var_data = [] for idx1 in list(range(0, len(data_time_spent))): var_end_time = datetime.utcfromtimestamp( int(data_time_spent.Date_Time[idx1])).strftime('%Y-%m-%d %H:%M:%S') var_x_labels.append(var_end_time) var_data.append(data_time_spent.Time_spent[idx1]) # print(data_time_spent.Time[idx1]) bar_chart.x_labels = var_x_labels var_time_spent = var_data bar_chart.add('Active session time', var_time_spent) bar_chart.render_to_file('static/images/bar_chart.svg') #####################################Active session line chart ##### line_chart = pygal.Line(width=400, height=400, explicit_size=True, title=title, x_label_rotation=90) line_chart.title = 'Time series' # line_chart.x_labels = map( # date.isoformat, # rrule(DAILY, dtstart=date(2010, 8, 1), until=date.today()) # ) # line_chart.y_labels = map(str,range(1,25)) var_date_series = [] var_start_time_series = [] var_end_time_series = [] var_time_series_plot = [] for idx1 in list(range(0, len(data_time_spent))): tmp_start_time = data_time_spent.Date_Time[ idx1] - data_time_spent.Time_spent[idx1] var_date_series.append( datetime.utcfromtimestamp( int(tmp_start_time)).strftime('%Y-%m-%d')) var_start_time_series.append( float( datetime.utcfromtimestamp( int(tmp_start_time)).strftime('%H.%M'))) var_end_time_series.append( float( datetime.utcfromtimestamp(int( data_time_spent.Date_Time[idx1])).strftime('%H.%M'))) line_chart.x_labels = map(str, var_date_series) line_chart.add("Start Time ", var_start_time_series) line_chart.add("End Time", var_end_time_series) line_chart.range = [0, 24] print("##########var date series####", var_date_series) print("##########var start time series####", var_start_time_series) print("##########var end time series####", var_end_time_series) line_chart.render_to_file('static/images/time_series_chart.svg') ################################### Avg time spent chart ####### title2 = 'Avg time spent on each screen' custom_style = Style(colors=('#059467', '#9BC850', '#E80080')) avg_bar_chart = pygal.Bar(width=1200, height=800, explicit_size=True, title=title2, x_label_rotation=70, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) # print ("avg time spent===",avg_time_spent) for idx1 in list(range(0, len(avg_time_spent))): var_x_labels.append(avg_time_spent.Screen_id[idx1]) var_data.append(avg_time_spent.Avg_Time[idx1]) # print(avg_time_spent.Avg_Time[idx1]) avg_bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_time_spent[x] for x in data_time_spent] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data avg_bar_chart.add('Avg time sec', var_data) avg_bar_chart.render_to_file('static/images/screen_avg_chart.svg') ################################### title2 = 'Least used screens' custom_style = Style(colors=('#1878f7', '#404040', '#E80080')) avg_bar_chart = pygal.Bar(width=1200, height=600, explicit_size=True, title=title2, x_label_rotation=90, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) for idx1 in list(range(0, len(data_tf_idf))): var_x_labels.append(data_tf_idf.GUI[idx1]) var_data.append(data_tf_idf.Priority[idx1]) # print(avg_time_spent.Avg_Time[idx1]) avg_bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_tf_idf[x] for x in data_tf_idf] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data avg_bar_chart.add('Least used screen', var_time_spent) avg_bar_chart.render_to_file('static/images/tf_idf_chart.svg') #====================Donut chart of the screens used================== pie_chart = pygal.Pie(width=1200, height=600, explicit_size=True, inner_radius=.2) pie_chart.title = 'Time spent on each screen (in %)' var_x_labels = [] var_data = [] total_values = 0 for idx1 in list(range(0, len(avg_time_spent))): total_values = total_values + avg_time_spent.Avg_Time[idx1] # print ("Total value===",total_values) for idx1 in list(range(0, len(avg_time_spent))): prcnt = round((avg_time_spent.Avg_Time[idx1] / total_values) * 100, 2) # print ("% = ",prcnt) pie_chart.add(avg_time_spent.Screen_id[idx1], prcnt) pie_chart.render_to_file('static/images/donut.svg') #==================================Gauge chart========== gauge = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) gauge.value_formatter = percent_formatter #====Screen Pattern Table Info===================================== for idx in list(range(0, len(avg_time_spent))): # print (idx,"=>",avg_time_spent.Screen_id[idx]) for idx2 in list(range(0, len(var_full_data))): if (avg_time_spent.Screen_id[idx] == var_full_data.ScreenName[idx2] ): # print (idx2,"count=",var_full_data.ScreenName[idx2]) if (idx2 + 1 >= len(var_full_data)): continue else: var_screen_id_dict[avg_time_spent.Screen_id[idx]].append( var_full_data.ScreenName[idx2 + 1]) # print("screneid =",avg_time_spent.Screen_id[idx],"next id=",var_full_data.ScreenName[idx2+1]) # print("The next screen list stored==========") # print (var_screen_id_dict) #====scatter plot===================================== # xy_chart = pygal.XY(width=1200,height=600,explicit_size=True,stroke=False) # xy_chart.title = 'Scatter plot of the screen ID and time spent' # xy_chart.add('2', [(.1, .15), (.12, .23), (.4, .3), (.6, .4), (.21, .21), (.5, .3), (.6, .8), (.7, .8)]) # xy_chart.add('3', [(.05, .01), (.13, .02), (1.5, 1.7), (1.52, 1.6), (1.8, 1.63), (1.5, 1.82), (1.7, 1.23), (2.1, 2.23), (2.3, 1.98)]) # xy_chart.render_to_file('static/images/scatter.svg') #====================================== my_html = create_html_page() return my_html
def main(): """ Get data from CSV and manage data """ #Manage File from .csv >>> DataFrame(by using pandas) data = pandas.read_csv('Health_AnimalBites.csv') #Clean data data_group = data['SpeciesIDDesc'].value_counts() print(data_group) #Species of dog dog_specie = data[data['SpeciesIDDesc'] == 'Dog'].groupby(['BreedIDDesc']).size() print(dog_specie) #Gender of dog dog_gen = data[data['SpeciesIDDesc'] == 'Dog'].groupby(['GenderIDDesc']).size() print(dog_gen) #Color of dog dog_color = data[data['SpeciesIDDesc'] == 'Dog'].groupby(['color']).size() print("---Color of Dog---", dog_color, sep="/n") #color of Pitbull pit_color = data[data['BreedIDDesc'] == 'Pit Bull'].groupby(['color']).size() print("---Color of Pit bull---", pit_color, sep="/n") """ Show data in graph(by using pygal) """ #Show most animal that bite people in U.S.A.(in pie chart) animal = data_group.sort_values(ascending=False).to_dict() sol = pygal.SolidGauge(half_pie=True, inner_radius=0.7) for i in animal: sol.add(i, animal[i]) sol.title = 'Most Animal that Bite People' sol.render_to_file('animals.svg') sol.render_in_browser() #Show top 10 species dog(in treemap chart) animalz = dog_specie.sort_values(ascending=False).to_dict() tree = pygal.Treemap() for i in animalz: tree.add(i, animalz[i]) tree.title = 'Species of Dogs that Bite People' tree.render_to_file('speciesdog.svg') tree.render_in_browser() #Show color of Dog(in horizontal bar chart) color_dog = dog_color.sort_values(ascending=False)[:10].to_dict() bar = pygal.HorizontalBar() bar.title = 'Top 10 colors of Dog' for i in color_dog: bar.add(i, color_dog[i]) bar.render_to_file('top10colorsdog.svg') bar.render_in_browser() #Show color of Pitbull(in horizontal bar chart) color_pit = pit_color.sort_values(ascending=False)[:10].to_dict() bar_ = pygal.HorizontalBar() bar_.title = 'Top 10 colors of Pitbull' for i in color_pit: bar_.add(i, color_pit[i]) bar_.render_to_file('top10colorspitbull.svg') bar_.render_in_browser() #Area zip_code = data[data['SpeciesIDDesc'] == 'Dog'].groupby(['victim_zip']).size().sort_values(ascending=False)
line_chart.add('2018', [ds2018.totalParticipantesJan(), ds2018.totalParticipantesFev(), ds2018.totalParticipantesMar(), ds2018.totalParticipantesAbr(), ds2018.totalParticipantesMai(), ds2018.totalParticipantesJun(), ds2018.totalParticipantesJul(), ds2018.totalParticipantesAgo(), ds2018.totalParticipantesSet(), ds2018.totalParticipantesOut(), ds2018.totalParticipantesNov(), ds2018.totalParticipantesDez()]) line_chart.render_to_png(filename="Bar", dpi= 2000) import pygal # First import pygal bar_chart = pygal.Bar() # Then create a bar graph object bar_chart.add('Fibonacci', [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]) # Add some values bar_chart.render_to_png(filename= "BarraTeste2",dpi= 1000) gauge = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) gauge.value_formatter = percent_formatter gauge.add('Series 1', [{'value': 225000, 'max_value': 1275000}], formatter=dollar_formatter) gauge.add('Series 2', [{'value': 110, 'max_value': 100}]) gauge.add('Series 3', [{'value': 3}]) gauge.add( 'Series 4', [ {'value': 51, 'max_value': 100}, {'value': 12, 'max_value': 100}]) gauge.add('Series 5', [{'value': 79, 'max_value': 100}]) gauge.add('Series 6', 99) gauge.add('Series 7', [{'value': 100, 'max_value': 100}])
#import pygal # #gauge=pygal.Gauge() # #gauge.title = 'Gauge Chart' # #gauge.range = [0, 5000] # ## Random data #gauge.add('A', 3000) #gauge.add('B', 2000) #gauge.add('C', 3500) #gauge.render_to_file('gauge.svg') # importing pygal import pygal # creating the chart object Solid_Gauge = pygal.SolidGauge(inner_radius=0.75, half_pie=True) # naming the title Solid_Gauge.title = 'SCP(Psi) Pressure Monitoring' # Random data Solid_Gauge.add('KINDP-A101', [{'value': 300, 'max_value': 2000}]) Solid_Gauge.add('KINDP-A104', [{'value': 520, 'max_value': 700}]) Solid_Gauge.add('KINDP-A105', [{'value': 75, 'max_value': 2000}]) Solid_Gauge.render_to_file('gauge.svg')