def __init__(self, **kwargs): self.chart = pygal.Pie(**kwargs) self.chart.title = 'Registered Users'
import pygal fruits = pygal.Pie() fruits.title = 'Favourite Foods' file = open('Fruits.txt', 'r') for line in file.read().splitlines(): if line: label, value = line.split(': ') fruits.add(label, int(value)) file.close() fruits.render_to_file('fruits_chart1.svg')
data = { 'java': 0.16881, 'c': 0.14996, 'C++': 0.07471, 'python': 0.06992, 'VB.net': 0.04762, 'C#': 0.03541, 'PHP': 0.02925, 'JavaScript': 0.02411, 'SQL': 0.02316, 'Assembly language': 0.01409, 'other': 0.36326 } #pygal.pie代表饼图,创建一个饼图 graph = pygal.Pie() graph.title = '2018年8月编程语言市场份额' for k in data.keys(): graph.add(k, data[k]) graph.legend_at_bottom = True #空心饼图 graph.inner_radius = 0.2 #半圆饼图 graph.half_pie = True graph.render_to_file('pie.svg')
import pygal piechart = pygal.Pie() piechart.title = 'TB IN STATES OF INDIA' piechart.add('Delhi', 36) piechart.add('Gujarat', 64) piechart.add('Haryana', 51) piechart.add('Madhya Pradesh', 49) piechart.add('Uttar Pradesh', 56) piechart.add('West Bengal', 60) piechart.add('Orissa', 54) piechart.add('kerala', 55) piechart.render()
#mant_cer_T4 = mant_T4[mant_T4['Fecha Termino6'].str.contains(fecha)] T = ["T1", "T2"] aux = {} #Ordenes de tipo T1 cerradas por técnico for s,t in enumerate(nom_tec): df_s = mant_cer_mes.loc[mant_cer_mes['Nombre Técnico'] == t] mant_T = [{t:np.shape(df_s[df_s['Tipo de mantención'] == t])[0]} for t in T] aux[s] = {t:mant_T} resultados = [aux[i] for i,j in enumerate(aux.items())] percent_formatter = lambda x: '{:.2g}%'.format(x) pie_chart = pygal.Pie(print_values = True) pie_chart.title = "% de OT T1 cerradas v/s total mes\n{}".format(fecha) if np.shape(mant_T1)[0]!=0: for x,y in enumerate(nom_tec): print(x,y,resultados[x][y][0]['T1']) #todas las generadas - las abiertas (generadas - cerradas) pie_chart.add(y,(resultados[x][y][0]['T1'])/np.shape(mant_T1)[0] * 100, formatter = percent_formatter) pie_chart.render_in_browser() else: print("\nEn esta fecha no existen OT de tipo T1") pie_chart = pygal.Pie(print_values = True) pie_chart .title = "% de OT T2 cerradas v/s total mes\n{}".format(fecha) if np.shape(mant_T2)[0]!=0:
def admin(): # conn = sqlite3.connect(db_location) conn = mysql.connector.connect(user=db_user, password=db_password, host=db_host, database=db_database) cursor = conn.cursor() # GET ALL THE PATIENTS AND PUT THE SENSORS INTO A LIST AND SPLIT AT ',' and sum the sensors # cursor.execute("""SELECT COUNT(*) FROM Patients""") cursor.execute(("""SELECT COUNT(*) FROM ban.Patients""")) num_patients = cursor.fetchone()[0] # cursor.execute("""SELECT COUNT(*) FROM Patients WHERE Gender = 'Male'""") cursor.execute( ("""SELECT COUNT(*) FROM ban.Patients WHERE Gender = 'Male'""")) num_males = cursor.fetchone() # cursor.execute("""SELECT COUNT(*) FROM Patients WHERE Gender = 'Female'""") cursor.execute( ("""SELECT COUNT(*) FROM ban.Patients WHERE Gender = 'Female'""")) num_females = cursor.fetchone() # i dont like this, but couldnt get SQL TO WORK # cursor.execute("""SELECT Sensors FROM Patients""") cursor.execute(("""SELECT Sensors FROM ban.Patients""")) sensors = [] for c in cursor: sensors += c[0].split(",") bp = sensors.count("Blood Pressure") hr = sensors.count("Heart Rate") gl = sensors.count("Glucose Levels") bo = sensors.count("Blood Oxygen Saturation") # cursor.execute("""SELECT COUNT(*) FROM Readings""") cursor.execute(("""SELECT COUNT(*) FROM ban.Readings""")) n_records = cursor.fetchone()[0] # cursor.execute("""SELECT COUNT(*) FROM Readings""") # n_records = cursor.fetchone()[0] # cursor.execute("""SELECT AVG(Age) FROM Patients""") cursor.execute(("""SELECT AVG(Age) FROM ban.Patients""")) x = cursor.fetchone()[0] if x: avg_age = round(x, 2) else: avg_age = "Na" conn.close() pie_chart = pygal.Pie(inner_radius=0.4, legend_at_bottom=True, half_pie=True) pie_chart.title = "Gender Distribution" pie_chart.add("Male", num_males) pie_chart.add("Female", num_females) pie_chart_data = pie_chart.render_data_uri() bar = pygal.Bar(legend_at_bottom=True, show_x_guides=True, show_y_guides=True) bar.title = "Sensor Distribution" bar.add("Blood Pressure", [bp]) bar.add("Heart Rate", [hr]) bar.add("Glucose Levels", [gl]) bar.add("Blood Oxygen Saturation", [bo]) bar_data = bar.render_data_uri() return render_template("admin.html", num_patients=num_patients, gender_chart=pie_chart_data, sensors_chart=bar_data, avg_age=avg_age, n_sensors=len(sensors), n_records=n_records, server_name=sys.argv[1])
import pygal pie_chart = pygal.Pie() pie_chart.title = 'Browser usage in February 2012 (in %)' pie_chart.add('IE', 19.5) pie_chart.add('Firefox', 36.6) pie_chart.add('Chrome', 36.3) pie_chart.add('Safari', 4.5) pie_chart.add('Opera', 2.3) pie_chart.render()
def gender(): import csv import pygal as pg # Open File csv file = open('AccidentDataset.csv') data = csv.reader(file) table = [row for row in data] # data_list = [[MONTH, YEAR, ADMIT, DEAD, GENDER, AGE], [MONTH, YEAR, ADMIT, DEAD, GENDER, AGE]] num = 0 data_list = [] for i in range(0, len(table)): # ----------> Check Year <---------- if table[i][4] == "15": num += 1 data_list.append([table[i][3], table[i][4], table[i][6], table[i][7], \ table[i][17], table[i][18]]) # Male Variable male_list = [] male_month1, male_month2, male_month3, male_month4, male_month5, male_month6, male_month7, \ male_month8, male_month9, male_month10, male_month11, male_month12 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 # Female Variable female_list = [] female_month1, female_month2, female_month3, female_month4, female_month5, female_month6, female_month7, \ female_month8, female_month9, female_month10, female_month11, female_month12 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 # Total Data for i in data_list: # Check -----> Month <----- if i[0] == "1": if i[4] == "1": male_month1 += 1 else: female_month1 += 1 elif i[0] == "2": if i[4] == "1": male_month2 += 1 else: female_month2 += 1 elif i[0] == "3": if i[4] == "1": male_month3 += 1 else: female_month3 += 1 elif i[0] == "4": if i[4] == "1": male_month4 += 1 else: female_month4 += 1 elif i[0] == "5": if i[4] == "1": male_month5 += 1 else: female_month5 += 1 elif i[0] == "6": if i[4] == "1": male_month6 += 1 else: female_month6 += 1 elif i[0] == "7": if i[4] == "1": male_month7 += 1 else: female_month7 += 1 elif i[0] == "8": if i[4] == "1": male_month8 += 1 else: female_month8 += 1 elif i[0] == "9": if i[4] == "1": male_month9 += 1 else: female_month9 += 1 elif i[0] == "10": if i[4] == "1": male_month10 += 1 else: female_month10 += 1 elif i[0] == "11": if i[4] == "1": male_month11 += 1 else: female_month11 += 1 elif i[0] == "12": if i[4] == "1": male_month12 += 1 else: female_month12 += 1 # 12 Month in male_list # male_list = [xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx] male_list.append([male_month1, male_month2, male_month3, male_month4, male_month5, \ male_month6, male_month7, male_month8, male_month9, male_month10, male_month11, male_month12]) male_list = male_list[0] # 12 Month in female_list # female_list = [xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx] female_list.append([female_month1, female_month2, female_month3, female_month4, female_month5, \ female_month6, female_month7, female_month8, female_month9, female_month10, female_month11, female_month12]) female_list = female_list[0] # Create a graph graph = pg.Bar(x_labels_major_count=12, show_minor_x_labels=True, truncate_legend=40, \ legend_at_bottom=False, truncate_label=100) # graph title graph.title = 'อัตราผู้เกิดอุบัติเหตุแยกตามเพศในปี 2015' # X-Axis Label ---> (Month) graph.x_labels = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', \ 'September', 'October', 'November', 'December'] # Y-Axis and label ---> (Gander) graph.add('Male', male_list) graph.add('Female', female_list) # Range of Y-Axis value graph.range = [1, 4000] # Save graph into file graph.render_to_file('graph_gender01.svg') # Total All data male_all = sum(male_list) female_all = sum(female_list) # Create a total graph graph = pg.Pie() # graph title graph.title = 'อัตราผู้เกิดอุบัติเหตุแยกตามเพศในปี 2015' # Variable ---> (Gander) graph.add('Male', male_all) graph.add('Female', female_all) # Save graph into file graph.render_to_file('graph_gender02.svg') #Show information print("Male\t:", male_list) print("Female\t:", female_list) print("Data\t: %d" % num) print("Year\t: 20%s" % data_list[0][1])
] for i in range(41360): a = str(df_initial['neighbourhood_cleansed'][i]) # print("a!!", a) a = a[0:3] df_initial['neighbourhood_cleansed'][i] = a # print("a", a) # print("数值:", (df_raw['neighbourhood_cleansed'][i + 1])) df_raw = df_initial[columns_to_keep].set_index('id') print("数据还有 {} 行和 {} 列(删去无关列)".format(*df_raw.shape)) print("各类房间类型:") print(df_raw.room_type.value_counts(normalize=True)) # print(type(df_raw.room_type.value_counts(normalize=True))) #各类房间类型画图 pie_chart1 = pygal.Pie() pie_chart1.title = '各类房间类型:' pie_chart1.add('Entire home/apt', df_raw.room_type.value_counts(normalize=True)[0]) pie_chart1.add('Private room', df_raw.room_type.value_counts(normalize=True)[1]) pie_chart1.add('Shared room', df_raw.room_type.value_counts(normalize=True)[2]) pie_chart1.render_to_file('各类房间类型bar_chart.svg') print("各类房屋类型") print(df_raw.property_type.value_counts(normalize=True)) print(len(df_raw.property_type.value_counts(normalize=True))) #各类房屋类型画图 pie_chart2 = pygal.Pie() pie_chart2.title = '各类房屋类型:' for i in range(len(df_raw.property_type.value_counts(normalize=True))): pie_chart2.add(
def bar(): with open('pygaldatabar.json', 'r') as bar_file: data = json.load(bar_file) # custom_style = Style( # label_font_size = .75em )) chart = pygal.Bar( style=pygal.style.styles['default'](label_font_size=20, )) mark_list = [x['mark'] for x in data] chart.add('Annual Mark List', mark_list) chart.x_labels = [x['year'] for x in data] chart.render_to_file('static/images/bar_chart.svg') img_url = 'static/images/bar_chart.svg?cache=' + str(time.time()) with open('pygaldatamultibar.json', 'r') as multibar_file: multi_data = json.load(multibar_file) chartmutli = pygal.Bar() multi_mark_list = [y['mark'] for y in multi_data] tourn_list = [y['tournament'] for y in multi_data] chartmutli.add('Annual Mark List', multi_mark_list) chartmutli.add('Tournament Score', tourn_list) chartmutli.render_to_file('static/images/multi_bar_chart.svg') multi_img_url = 'static/images/multi_bar_chart.svg?cache=' + str( time.time()) with open('pygaldataline.json', 'r') as linechart_file: linechart_data = json.load(linechart_file) linechart = pygal.Line() linechart_list_names = [x['broswer'] for x in linechart_data] linechart_list = [y['values'] for y in linechart_data] for i in range(0, len(linechart_list_names)): linechart.add(linechart_list_names[i], linechart_list[i]) linechart.render_to_file('static/images/line_chart.svg') line_img_url = 'static/images/line_chart.svg?cache=' + str(time.time()) supra = pygal.maps.world.SupranationalWorld() supra.add('Asia', [('asia', 1)]) supra.add('Europe', [('europe', 1)]) supra.add('North america', [('north_america', 1)]) supra.add('South america', [('south_america', 1)]) supra.render_to_file('static/images/world_map.svg') worldmap_img_url = 'static/images/world_map.svg?cache=' + str(time.time()) with open('pygaldatapie.json', 'r') as piechart_file: piechart_data = json.load(piechart_file) pie_chart = pygal.Pie() piechart_list_names = [x['broswer'] for x in piechart_data] piechart_list_value = [y['value'] for y in piechart_data] pie_chart.title = 'Browser usage in February 2012 (in %)' for i in range(0, len(piechart_list_names)): pie_chart.add(piechart_list_names[i], piechart_list_value[i]) pie_chart.render_to_file('static/images/pie_chart.svg') piechart_img_url = 'static/images/pie_chart.svg?cache=' + str(time.time()) gaugechart = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) dollar_formatter = lambda x: '{:.10g}$'.format(x) with open('pygaldatagauge.json', 'r') as gauagechart_file: gauagechart_data = json.load(gauagechart_file) gauagechart_series = [x['series'] for x in gauagechart_data] gauagechart_values = [y['value'] for y in gauagechart_data] gauagechart_max_values = [z['max_value'] for z in gauagechart_data] for i in range(0, len(gauagechart_series)): gaugechart.add(gauagechart_series[i], [{ 'value': gauagechart_values[i], 'max_value': gauagechart_max_values[i] }], formatter=dollar_formatter) gaugechart.render_to_file('static/images/gauge_chart.svg') gaugechart_img_url = 'static/images/gauge_chart.svg?cache=' + str( time.time()) return render_template('app.html', **locals())
def __init__(self, **kwargs): self.chart = pygal.Pie(**kwargs) self.chart.title = 'Amount of Fruits'
print("Query 5 finished successfully") ### 6 sql_query6 = '''select SUM(amount) as totalAmount, YEAR(paymentDate) from payments GROUP BY YEAR(paymentDate) ORDER BY YEAR(paymentDate) DESC;''' mycursor.execute(sql_query6) query_result = mycursor.fetchall() total_payments = [] years= [] for record in query_result: total_payments.append(record[0]) years.append(record[1]) pie_chart6 = pygal.Pie() pie_chart6.title = "6" pie_chart6.add("2005", total_payments[0]) pie_chart6.add("2004", total_payments[1]) pie_chart6.add("2003", total_payments[2]) pie_chart6.render_to_file("images/q6.svg") print("Query 6 finished successfully") ### 7 sql_query7 = '''select SUM(amount) as totalAmount, MONTH(paymentDate) from payments WHERE YEAR(paymentDate) = 2004 GROUP BY MONTH(paymentDate) ORDER BY MONTH(paymentDate) ASC;''' mycursor.execute(sql_query7) query_result = mycursor.fetchall() total_payments = [] months = []
def graph(dic_2, lis, lis_1): """ plot graph """ line_chart = pygal.Pie() for i in lis_1: line_chart.add(i[0], (int(((i[1]*100)/(len(lis))*100)))/100) line_chart.render_to_file('overallpopgame.svg')
def build_image_temp(coin, start, end, email): # 近期行情 if coin == 'BTC': hq = BitcoinChartsDatas.objects.using('btc').order_by('hisdate') else: hq = LitecoinChartsDatas.objects.using('ltc').order_by('hisdate') hq = hq.filter(hisdate__gte=start, hisdate__lte=end) hq_date = map(lambda x: x.hisdate, hq) if coin == 'BTC': hq_data = map(lambda x: x.price, hq) else: hq_data = map(lambda x: x.price_usd, hq) custom_css = ''' {{ id }}.graph > .background { fill: rgba(255,255,255,0); } {{ id }}.plot > .background { fill: rgba(255,255,255,0); } {{ id }}.title { font-family: Songti; } {{ id }}text { font-family: Songti; } {{ id }}.legends .legend text { font-family: Songti; } {{ id }}.axis text { font-family: Songti; } {{ id }}.axis.y text { font-family: Songti; } {{ id }}#tooltip text { font-family: Songti; } ''' custom_css_file = '/tmp/pygal_temp_custom_style.css' with open(custom_css_file, 'w') as f: f.write(custom_css) config = pygal.Config() config.css.append('file://' + custom_css_file) config.x_value_formatter = lambda x: x.strftime('%-m/%-d') config.max_scale = 7 config.min_scale = 7 config.legend_at_bottom = True hq_chart = pygal.Line(config, title=u'{}行情'.format(coin_name(coin)), show_legend=False) hq_chart.x_labels = hq_date hq_chart.add('', hq_data) hq_path = u'{}chart/{}_hq_{}_{}.png'.format(settings.MEDIA_ROOT, coin, start, end) hq_chart.render_to_png(hq_path) # 账户买卖比例 if coin == 'BTC': qs1 = TBitSpecialAddress.objects.using('btc').filter( address_type=1).only('address') qs2 = TBitSpecialAddress.objects.using('btc').filter( address_type=2).only('address') table = 'blockchain_db.bitcoin_cashflow_output' cursor = connections['btc'].cursor() else: qs1 = TLiteSpecialAddress.objects.using('ltc').filter( address_type=1).only('address') qs2 = TLiteSpecialAddress.objects.using('ltc').filter( address_type=2).only('address') table = 'ltc_db.litecoin_cashflow_output' cursor = connections['ltc'].cursor() addresses1 = map(lambda x: x.address, qs1) addresses2 = map(lambda x: x.address, qs2) sell_sql = ''' SELECT count(*) FROM {0} WHERE (output_value - input_value) < 0 AND address IN %s AND block_time > %s AND block_time < %s ORDER BY block_time DESC '''.format(table) buy_sql = ''' SELECT count(*) FROM {0} WHERE (output_value - input_value) > 0 AND address IN %s AND block_time > %s AND block_time < %s ORDER BY block_time DESC '''.format(table) cursor.execute(sell_sql, [tuple(addresses1), start, end]) sell1 = cursor.fetchone()[0] cursor.execute(sell_sql, [tuple(addresses2), start, end]) sell2 = cursor.fetchone()[0] cursor.execute(buy_sql, [tuple(addresses1), start, end]) buy1 = cursor.fetchone()[0] cursor.execute(buy_sql, [tuple(addresses2), start, end]) buy2 = cursor.fetchone()[0] bar_chart = pygal.HorizontalStackedBar(config, print_values=True, title=u'聪明账户与韭菜账户买卖比例') bar_chart.x_labels = ('', u'聪明账户', u'韭菜账户', '') if buy1 and buy2: bar_chart.add(u'买', (buy1 / (sell1 + buy1), buy2 / (sell2 + buy2))) bar_chart.add(u'卖', (sell1 / (sell1 + buy1), sell2 / (sell2 + buy2))) bar_chart_path = u'{}chart/{}_bar_{}_{}.png'.format( settings.MEDIA_ROOT, coin, start, end) bar_chart.render_to_png(bar_chart_path) # 前5大交易所充值提现 if coin == 'BTC': top5 = [19, 124, 84, 107, 602] recharge = BitExchangeRecharge.objects.using('btc').filter(groupind__in=top5, trans_date__gte=start, trans_date__lte=end)\ .values('trans_date').annotate(sum_recharge=Sum('tot_recharge')).order_by('trans_date') withdraw = BitExchangeWithdraw.objects.using('btc').filter(groupind__in=top5, trans_date__gte=start, trans_date__lte=end)\ .values('trans_date').annotate(sum_withdraw=Sum('tot_withdraw')).order_by('trans_date') else: top5 = [21, 1, 110, 101, 28] recharge = LiteExchangeRecharge.objects.using('ltc').filter(groupind__in=top5, trans_date__gte=start, trans_date__lte=end)\ .values('trans_date').annotate(sum_recharge=Sum('tot_recharge')).order_by('trans_date') withdraw = LiteExchangeWithdraw.objects.using('ltc').filter(groupind__in=top5, trans_date__gte=start, trans_date__lte=end)\ .values('trans_date').annotate(sum_withdraw=Sum('tot_withdraw')).order_by('trans_date') recharge_data = map(lambda x: x.get('sum_recharge'), recharge) x_labels = map(lambda x: x.get('trans_date'), recharge) withdraw_data = map(lambda x: x.get('sum_withdraw'), withdraw) tran_chart = pygal.Line(config, title=u'前5大交易所充值提现') tran_chart.x_labels = x_labels tran_chart.add(u'充值', recharge_data) tran_chart.add(u'提现', withdraw_data) tran_chart_path = u'{}chart/{}_tran_{}_{}.png'.format( settings.MEDIA_ROOT, coin, start, end) tran_chart.render_to_png(tran_chart_path) # 前1000持仓变化 if coin == 'BTC': qs = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end)\ .values('his_date').annotate(sum_balance=Sum('balance')).order_by('his_date') address_qs = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end)\ .values('address').annotate(count=Count('address')).order_by('address') addresses = set(map(lambda x: x.get('address'), address_qs)) org_qs = BitKnewAddress2.objects.using('btc').filter( onoroff=1, address__in=addresses) org_address = map(lambda x: x.address, org_qs) org_exclude_qs = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end).exclude(address__in=org_address)\ .values('his_date').annotate(sum_balance=Sum('balance')).order_by('his_date') else: qs = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end)\ .values('his_date').annotate(sum_balance=Sum('balance')).order_by('his_date') address_qs = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end)\ .values('address').annotate(count=Count('address')).order_by('address') addresses = set(map(lambda x: x.get('address'), address_qs)) org_qs = LiteKnewAddress2.objects.using('ltc').filter( onoroff=1, address__in=addresses) org_address = map(lambda x: x.address, org_qs) org_exclude_qs = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end).exclude(address__in=org_address)\ .values('his_date').annotate(sum_balance=Sum('balance')).order_by('his_date') hold_data = map(lambda x: x.get('sum_balance'), qs) org_exclude_hold_data = map(lambda x: x.get('sum_balance'), org_exclude_qs) x_labels = map(lambda x: x.get('his_date'), qs) hold_chart = pygal.Line(config, fill=True, title=u'前1000地址累计持仓') hold_chart.x_labels = x_labels hold_chart.add(u'前1000持仓', hold_data) hold_chart.add(u'前1000(排除交易所)', org_exclude_hold_data) hold_chart_path = u'{}chart/{}_hold_{}_{}.png'.format( settings.MEDIA_ROOT, coin, start, end) hold_chart.render_to_png(hold_chart_path) # 用户类型持仓 if coin == 'BTC': all_balance = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end)\ .aggregate(sum_balance=Sum('balance')).get('sum_balance') address_qs = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end)\ .values('address').annotate(count=Count('address')).order_by('address') addresses = set(map(lambda x: x.get('address'), address_qs)) miner_qs = BitMinerlist.objects.using('btc').filter( address__in=addresses) miner_address = map(lambda x: x.address, miner_qs) org_qs = BitKnewAddress2.objects.using('btc').filter( onoroff=1, address__in=addresses) org_address = map(lambda x: x.address, org_qs) org_balance = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end).filter(address__in=org_address)\ .aggregate(sum_balance=Sum('balance')).get('sum_balance', 0) miner_balance = TBitBalanceRank1000His.objects.using('btc').filter(his_date__gte=start, his_date__lte=end).filter(address__in=miner_address)\ .aggregate(sum_balance=Sum('balance')).get('sum_balance', 0) else: all_balance = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end)\ .aggregate(sum_balance=Sum('balance')).get('sum_balance') address_qs = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end)\ .values('address').annotate(count=Count('address')).order_by('address') addresses = set(map(lambda x: x.get('address'), address_qs)) miner_qs = LiteMinerlist.objects.using('ltc').filter( address__in=addresses) miner_address = map(lambda x: x.address, miner_qs) org_qs = LiteKnewAddress2.objects.using('ltc').filter( onoroff=1, address__in=addresses) org_address = map(lambda x: x.address, org_qs) org_balance = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end).filter(address__in=org_address)\ .aggregate(sum_balance=Sum('balance')).get('sum_balance', 0) miner_balance = TLiteBalanceRank1000His.objects.using('ltc').filter(his_date__gte=start, his_date__lte=end).filter(address__in=miner_address)\ .aggregate(sum_balance=Sum('balance')).get('sum_balance', 0) pie_chart = pygal.Pie(config, print_values=True, title=u'用户类型持仓占比') pie_chart.value_formatter = lambda x: '{:.2%}'.format(x) pie_chart.legend_at_bottom_columns = 3 if all_balance: org_data = org_balance / all_balance miner_data = miner_balance / all_balance pie_chart.add(u'交易所', org_data) pie_chart.add(u'矿机', miner_data) pie_chart.add(u'其他', 1 - org_data - miner_data) pie_chart_path = u'{}chart/{}_pie_{}_{}.png'.format( settings.MEDIA_ROOT, coin, start, end) pie_chart.render_to_png(pie_chart_path) send_mail_files(u'{}至{}{}模版'.format(start, end, coin_name(coin)), [ email, ], [ hq_path, bar_chart_path, tran_chart_path, hold_chart_path, pie_chart_path ])
def home(): custom_style = Style(colors=('#351508', '#404040', '#9BC850')) title = 'Screen ID analysis' bar_chart = pygal.Bar(width=400, height=400, explicit_size=True, title=title, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) for idx1 in list(range(0, len(data_time_spent))): var_x_labels.append(idx1 + 1) var_data.append(data_time_spent.Time[idx1]) # print(data_time_spent.Time[idx1]) bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_time_spent[x] for x in data_time_spent] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data # print ("==bar chart==",var_time_spent) bar_chart.add('Avg active session', var_time_spent) bar_chart.render_to_file('static/images/bar_chart.svg') ################################### title2 = 'Avg time spent on each screen' custom_style = Style(colors=('#059467', '#9BC850', '#E80080')) avg_bar_chart = pygal.Bar(width=1200, height=800, explicit_size=True, title=title2, x_label_rotation=90, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) # print ("avg time spent===",avg_time_spent) for idx1 in list(range(0, len(avg_time_spent))): var_x_labels.append(avg_time_spent.Screen_id[idx1]) var_data.append(avg_time_spent.Avg_Time[idx1]) # print(avg_time_spent.Avg_Time[idx1]) avg_bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_time_spent[x] for x in data_time_spent] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data avg_bar_chart.add('Avg time sec', var_data) avg_bar_chart.render_to_file('static/images/screen_avg_chart.svg') ################################### title2 = 'Least used screens' custom_style = Style(colors=('#1878f7', '#404040', '#E80080')) avg_bar_chart = pygal.Bar(width=1200, height=600, explicit_size=True, title=title2, x_label_rotation=90, style=custom_style) var_x_labels = [] var_data = [] # print ("__xlabels__",var_x_labels) for idx1 in list(range(0, len(data_tf_idf))): var_x_labels.append(data_tf_idf.GUI[idx1]) var_data.append(data_tf_idf.Priority[idx1]) # print(avg_time_spent.Avg_Time[idx1]) avg_bar_chart.x_labels = var_x_labels # bar_chart.y_labels = var_y_labels # var_time_spent = [data_tf_idf[x] for x in data_tf_idf] # print ("_Time spent_",var_time_spent) # print ("_Time spent_0___",var_time_spent[0]) # print ("_Time spent_1___",var_time_spent[1]) var_time_spent = var_data avg_bar_chart.add('Least used screen', var_time_spent) avg_bar_chart.render_to_file('static/images/tf_idf_chart.svg') #====================Donut chart of the screens used================== print("===Dounut Chart==========") pie_chart = pygal.Pie(width=1200, height=600, explicit_size=True, inner_radius=.2) pie_chart.title = 'Time spent on each screen (in %)' var_x_labels = [] var_data = [] total_values = 0 for idx1 in list(range(0, len(avg_time_spent))): total_values = total_values + avg_time_spent.Avg_Time[idx1] # print ("Total value===",total_values) for idx1 in list(range(0, len(avg_time_spent))): prcnt = round((avg_time_spent.Avg_Time[idx1] / total_values) * 100, 2) # print ("% = ",prcnt) pie_chart.add(avg_time_spent.Screen_id[idx1], prcnt) pie_chart.render_to_file('static/images/donut.svg') #==================================Gauge chart========== gauge = pygal.SolidGauge(inner_radius=0.70) percent_formatter = lambda x: '{:.10g}%'.format(x) gauge.value_formatter = percent_formatter #====scatter plot===================================== xy_chart = pygal.XY(width=1200, height=600, explicit_size=True, stroke=False) xy_chart.title = 'Scatter plot of the screen ID and time spent' # var_temp_lst = [] var_screen_id_dict = defaultdict(list) for idx in list(range(0, len(avg_time_spent))): # var_screen_id_dict[avg_time_spent.Screen_id[idx]]={} print(idx, "=>", avg_time_spent.Screen_id[idx]) for idx2 in list(range(0, len(var_full_data))): if (avg_time_spent.Screen_id[idx] == var_full_data.ScreenName[idx2] ): # print (idx2,"count=",var_full_data.ScreenName[idx2]) if (idx2 + 1 >= len(var_full_data)): continue else: var_screen_id_dict[avg_time_spent.Screen_id[idx]].append( var_full_data.ScreenName[idx2 + 1]) # print("screneid =",avg_time_spent.Screen_id[idx],"next id=",var_full_data.ScreenName[idx2+1]) print("The next screen list stored==========") print(var_screen_id_dict) # var_x_labels.append(var_full_data.ScreenName[idx1]) # var_data.append(data_tf_idf.ActiveTime[idx1]) # print ("===values==",var_data) # xy_chart.add('ScreenIds', var_data) xy_chart.add('2', [(.1, .15), (.12, .23), (.4, .3), (.6, .4), (.21, .21), (.5, .3), (.6, .8), (.7, .8)]) xy_chart.add('3', [(.05, .01), (.13, .02), (1.5, 1.7), (1.52, 1.6), (1.8, 1.63), (1.5, 1.82), (1.7, 1.23), (2.1, 2.23), (2.3, 1.98)]) xy_chart.render_to_file('static/images/scatter.svg') #====================================== my_html = create_html_page() # print ("==========================") # print (my_html) # print ("==========================") return my_html
fitxerOut.write("2. Numero de paraules del document: " + str(numeroParaules) + '\n\n') fitxerOut.write("3. Mitjana de paraules per linia: " + str(mitpar) + '\n\n') histogram = pygal.Bar(title='Paraules per linia') for x in paraulesXlinia: histogram.add("Linia " + str(x[0]), x[1]) histogram.render_in_browser() fitxerOut.write("4. Promig de lletres en el document: \n") for key, value in MapDeLletres.iteritems(): fitxerOut.write("Caracter: " + str(key) + " Valor: " + str(value) + "\n") histLletres = pygal.Pie(title='Caracters utilitzats en el text') for val in llet: k = val[0] v = val[1] histLletres.add(str(k), v) histLletres.render_in_browser() fitxerOut.write("\n5. Promig de longitud de paraules en el document: " + str(mitjanaCaracXPar) + "\n") histLong = pygal.HorizontalBar(title='Longitud de les paraules') for val in vecLongPar: k = val[0] v = val[1] histLong.add(str(k), v)
def draw(file_index_dict, q_id): # q_id is an int pic_dir = '../pics/' if not os.path.exists(pic_dir): os.makedirs(pic_dir) q_dir = pic_dir + str(q_id) + "/" if not os.path.exists(q_dir): os.makedirs(q_dir) # 1. use pygal to draw a bar of suspect_index (decrease) sorted_items = sorted(file_index_dict.items(), key=lambda x: x[1], reverse=True) x_ax, y_ax = [], [] for item in sorted_items: x_ax.append(item[0]) y_ax.append(item[1]) # visualize my_style = LS("#006670", base_style=LCS) my_config = pygal.Config() my_config.x_label_rotation = 45 my_config.show_legend = False my_config.title_font_size = 24 my_config.major_label_font_size = 18 # OF NO USE? my_config.label_font_size = 14 my_config.truncate_label = 15 my_config.show_y_guides = True my_config.width = 1000 chart = pygal.Bar(my_config, style=my_style) chart.title = "Most-suspected Files in Question NO." + str(q_id) chart.x_labels = x_ax chart.add("Suspect Index:", y_ax) # chart.render_to_file("C:\\Users\\11381\\Desktop\\dataBar.svg") chart.render_to_file(q_dir + "dataBar.svg") # 2. draw a pie of proportion of suspect-index at all levels pie = pygal.Pie(inner_radius=.4) pie.title = "Proportion of Suspect-Index at all levels (in %)" n_00_01 = count_range_num(y_ax, 0, 0.1) n_01_03 = count_range_num(y_ax, 0.1, 0.3) n_03_05 = count_range_num(y_ax, 0.3, 0.5) n_05_07 = count_range_num(y_ax, 0.5, 0.7) n_07_08 = count_range_num(y_ax, 0.7, 0.8) n_08_09 = count_range_num(y_ax, 0.8, 0.9) n_09_10 = count_range_num(y_ax, 0.9, 1) n_total = len(y_ax) pie.add("0.0 ~ 0.1", round(n_00_01 / n_total * 100, 2)) pie.add("0.1 ~ 0.3", round(n_01_03 / n_total * 100, 2)) pie.add("0.3 ~ 0.5", round(n_03_05 / n_total * 100, 2)) pie.add("0.5 ~ 0.7", round(n_05_07 / n_total * 100, 2)) pie.add("0.7 ~ 0.8", round(n_07_08 / n_total * 100, 2)) pie.add("0.8 ~ 0.9", round(n_08_09 / n_total * 100, 2)) pie.add("0.9 ~ 1.0", round(n_09_10 / n_total * 100, 2)) # pie.render_to_file("C:\\Users\\11381\\Desktop\\dataPie.svg") pie.render_to_file(q_dir + "dataPie.svg")
def pie_graph(data): graph = pygal.Pie() for i, item in enumerate(data): graph.add(str(item[0]), item[1]) return graph
def data_visualization(): ''' Add components to your pie chart 1. add title 2. Partition your pie chart ''' cur.execute(""" SELECT type, count(type) FROM public.inventories GROUP BY type; """) product_service = cur.fetchall() # print(product_service) # Initialize your pie chart pie_chart = pygal.Pie() # my_pie_data = [ # ('Nairobi', 63), # ('Mombasa', 20), # ('Kilifi', 17), # ('Machakos', 30), # ('Kiambu', 7) # ] pie_chart.title = 'Ratio of product and service' for each in product_service: pie_chart.add(each[0], each[1]) # pie_chart.add('Nairobi', 63) # pie_chart.add('Mombasa', 20) # pie_chart.add('Kilifi', 17) # pie_chart.add('Machakos', 30) # pie_chart.add('Kiambu', 7) pie_data = pie_chart.render_data_uri() # end of piechart # start of line graph # represennts sales made in every month cur.execute(""" SELECT EXTRACT(MONTH FROM s.created_at) as sales_month, sum(quantity * selling_price) as total_sales from sales as s join inventories as i on s.inv_id = i.id GROUP BY sales_month ORDER BY sales_month """) monthly_sales=cur.fetchall() # print(monthly_sales) data = [ {'month': 'January', 'total': 22}, {'month': 'February', 'total': 27}, {'month': 'March', 'total': 23}, {'month': 'April', 'total': 20}, {'month': 'May', 'total': 12}, {'month': 'June', 'total': 32}, {'month': 'July', 'total': 42}, {'month': 'August', 'total': 72}, {'month': 'September', 'total': 52}, {'month': 'October', 'total': 42}, {'month': 'November', 'total': 92}, {'month': 'December', 'total': 102} ] x = [] sales = [] for each in monthly_sales: x.append(each[0]) sales.append(each[1]) # print(x) # print(sales) line_graph = pygal.Line() line_graph.title = 'Total sales made in every month in the year 2019' line_graph.x_labels = map(str, x) # line_graph.add('Firefox', [None, None, 0, 16.6, 25, 31, 36.4, 45.5, 46.3, 42.8, 37.1, 42.8]) # line_graph.add('Chrome', [None, None, None, None, None, None, 0, 3.9, 10.8, 23.8, 35.3, 23.8]) # line_graph.add('IE', [85.8, 84.6, 84.7, 74.5, 66, 58.6, 54.7, 44.8, 36.2, 26.6, 20.1, 26.6]) # line_graph.add('Others', [14.2, 15.4, 15.3, 8.9, 9, 10.4, 8.9, 5.8, 6.7, 6.8, 7.5, 26.6]) line_graph.add('Total Sales', sales) line_data = line_graph.render_data_uri() ''' x = "tech" y = x y = x = 'tech' ''' return render_template('charts.html', pie=pie_data, line=line_data)
def predict(): id = [int(x) for x in request.form.values()] id_current = id[0] # Extract the ids features = pd.read_csv('../application_train.csv') test_features2 = pd.read_csv('../application_test.csv') test_features = test_features2.loc[test_features2['SK_ID_CURR'] == id_current] #print(row) missing_values = ( test_features.loc[test_features['SK_ID_CURR'] == id_current] ).iloc[0].isnull().sum() / len(test_features.columns) encoding = 'ohe' n_folds = 5 train_ids = features['SK_ID_CURR'] test_ids = test_features['SK_ID_CURR'] # Extract the labels for training labels = features['TARGET'] # Remove the ids and target features = features.drop(columns=['SK_ID_CURR', 'TARGET']) test_features = test_features.drop(columns=['SK_ID_CURR']) # One Hot Encoding if encoding == 'ohe': features = pd.get_dummies(features) test_features = pd.get_dummies(test_features) # Align the dataframes by the columns features, test_features = features.align(test_features, join='inner', axis=1) # No categorical indices to record cat_indices = 'auto' # Integer label encoding elif encoding == 'le': # Create a label encoder label_encoder = LabelEncoder() # List for storing categorical indices cat_indices = [] # Iterate through each column for i, col in enumerate(features): if features[col].dtype == 'object': # Map the categorical features to integers features[col] = label_encoder.fit_transform( np.array(features[col].astype(str)).reshape((-1, ))) test_features[col] = label_encoder.transform( np.array(test_features[col].astype(str)).reshape((-1, ))) # Record the categorical indices cat_indices.append(i) # Catch error if label encoding scheme is not valid else: raise ValueError("Encoding must be either 'ohe' or 'le'") print('Training Data Shape: ', features.shape) print('Testing Data Shape: ', test_features.shape) # Extract feature names feature_names = list(features.columns) # Convert to np arrays features = np.array(features) test_features = np.array(test_features) # Create the kfold object k_fold = KFold(n_splits=n_folds, shuffle=True, random_state=50) # Empty array for feature importances feature_importance_values = np.zeros(len(feature_names)) # Empty array for test predictions test_predictions = np.zeros(test_features.shape[0]) # Empty array for out of fold validation predictions out_of_fold = np.zeros(features.shape[0]) # Lists for recording validation and training scores valid_scores = [] train_scores = [] # Iterate through each fold for train_indices, valid_indices in k_fold.split(features): # Training data for the fold train_features, train_labels = features[train_indices], labels[ train_indices] # Validation data for the fold valid_features, valid_labels = features[valid_indices], labels[ valid_indices] # Create the model model = lgb.LGBMClassifier(n_estimators=100, objective='binary', class_weight='balanced', learning_rate=0.05, reg_alpha=0.1, reg_lambda=0.1, subsample=0.8, n_jobs=-1, random_state=50) # Train the model model.fit(train_features, train_labels, eval_metric='auc', eval_set=[(valid_features, valid_labels), (train_features, train_labels)], eval_names=['valid', 'train'], categorical_feature=cat_indices, early_stopping_rounds=100, verbose=200) #pickle.dump(model, open('model.pkl','wb')) # Record the best iteration best_iteration = model.best_iteration_ # Record the feature importances feature_importance_values += model.feature_importances_ / k_fold.n_splits # Make predictions test_predictions += model.predict_proba( test_features, num_iteration=best_iteration)[:, 1] / k_fold.n_splits # Record the out of fold predictions out_of_fold[valid_indices] = model.predict_proba( valid_features, num_iteration=best_iteration)[:, 1] # Record the best score valid_score = model.best_score_['valid']['auc'] train_score = model.best_score_['train']['auc'] valid_scores.append(valid_score) train_scores.append(train_score) # Clean up memory gc.enable() del model, train_features, valid_features gc.collect() # Make the submission dataframe submission = pd.DataFrame({ 'SK_ID_CURR': test_ids, 'TARGET': test_predictions }) # Make the feature importance dataframe feature_importances = pd.DataFrame({ 'feature': feature_names, 'importance': feature_importance_values }) # Overall validation score valid_auc = roc_auc_score(labels, out_of_fold) # Add the overall scores to the metrics valid_scores.append(valid_auc) train_scores.append(np.mean(train_scores)) # Needed for creating dataframe of validation scores fold_names = list(range(n_folds)) fold_names.append('Moyenne') # Dataframe of validation scores metrics = pd.DataFrame({ 'fold': fold_names, 'train': train_scores, 'valid': valid_scores }) #final_features = [np.array(int_features)] #prediction = model.metrics Row = submission print(submission.iloc[0]['TARGET']) #fig = Figure() #output = round(prediction[0], 2) #labels = 'Difficulté de paiement', 'Pas de difficulté' #sizes = [submission.iloc[0]['TARGET'], (1-submission.iloc[0]['TARGET'])] #explode = (0, 0.1) # only "explode" the 2nd slice (i.e. 'Hogs') #fig1, ax1 = plt.subplots() #ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',shadow=True, startangle=90) #ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. pngImage = io.BytesIO() labels = ['Difficulté de paiement', 'Pas de difficulté'] values = [submission.iloc[0]['TARGET'], (1 - submission.iloc[0]['TARGET'])] #pngImageB64String = "data:image/png;base64," #pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8') #fig = Figure() #axis = fig.add_subplot(1, 1, 1) #axis.set_title("title") #axis.set_xlabel("x-axis") #axis.set_ylabel("y-axis") #axis.grid() #axis.plot(range(5), range(5), "ro-") #labels = 'Frogs', 'Hogs', 'Dogs', 'Logs' #sizes = [15, 30, 45, 10] #explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs') #fig1, ax1 = plt.subplots() #ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', #shadow=True, startangle=90) #ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # Convert plot to PNG image #pngImage = io.BytesIO() #FigureCanvas(ax1).print_png(pngImage) # Encode PNG image to base64 string #pngImageB64String = "data:image/png;base64," #pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8') pie_chart = pygal.Pie(half_pie=True, legend_at_bottom=True) pie_chart.title = 'Prévisions sur les facilités de paiement (in %)' pie_chart.add('Pas Difficulté', (1 - submission.iloc[0]['TARGET'])) pie_chart.add('Difficulté', submission.iloc[0]['TARGET']) pie_chart_data = pie_chart.render_data_uri() pie_chart_missing = pygal.Pie(half_pie=True, legend_at_bottom=True) pie_chart_missing.title = '% de données manquantes' pie_chart_missing.add('% Value', (1 - missing_values)) pie_chart_missing.add('% Missing Value', (missing_values)) pie_chart_data_missing = pie_chart_missing.render_data_uri() return render_template('blank.html', pie_chart_data=pie_chart_data, pie_chart_data_missing=pie_chart_data_missing, prediction_text='Sales should be $ {}'.format( 1 - submission.iloc[0]['TARGET']))
def index(): tasks = [] api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) top_10 = twitter_trend(api, 10) if request.method == 'POST': tweet_query = request.form['content'] tweet_number = int(request.form['number']) texts = [] search_words = tweet_query new_search = search_words + " -filter:retweets" search = tweepy.Cursor(api.search, q=new_search, lang="th", tweet_mode="extended").items(tweet_number) texts = [item.full_text for item in search] clean_list = text_cleaning(texts) #return clean_list[3] term_weighting = tfidf_model.transform(clean_list) # from pythainlp.tag import pos_tag_sents # pos = pos_tag_sents(word_list,corpus='orchid_ud') # keep_tag = ['VERB', 'ADJ', 'ADV', 'INTJ', 'AUX'] # pos_list = [] # for sent in pos: # dt_tags = [t[0] for t in sent if t[1] in keep_tag] # pos_list.append(dt_tags) # clean_list = delete_stop(word_list) # new = tfidf_model.transform(clean_list) new = term_weighting.toarray() new = [n[support] for n in new] pred = clf_model.predict(new) #pred = clean_list #return pred[1] neg_count = 0 pos_count = 0 my_list = [] with open("dataset.csv", mode='w', newline='', encoding='utf-8') as f: thewriter = csv.writer(f) thewriter.writerow(["text", "sentiment"]) for i in range(len(texts)): my_list.append([texts[i], pred[i]]) if pred[i] == 1.0: neg_count = neg_count + 1 thewriter.writerow( [texts[i].replace('\n', ''), 'Negative']) else: pos_count = pos_count + 1 thewriter.writerow( [texts[i].replace('\n', ''), 'Positive']) custom_style = Style(colors=('#800000', '#3CB371')) pie_chart = pygal.Pie(style=custom_style) pie_chart.title = 'Sentiment ratio' pie_chart.add('Negative', neg_count) pie_chart.add('Positive', pos_count) chart_data = pie_chart.render_data_uri() #return str(len(top_10)) return render_template('index.html', tasks=my_list, trends=top_10, keyword=tweet_query, chart=chart_data) else: #tasks = Todo.query.order_by(Todo.date_created).all() top_10 = twitter_trend(api, 10) #return top_10[0] return render_template('index.html', tasks=[], trends=top_10)
def main(): """test""" file = open('D:/project/1.csv') data = (csv.reader(file)) listsum_reading = [] listmen_reading_bkk = [] listwomen_reading_bkk = [] listmen_unreading_bkk = [] listwomen_unreading_bkk = [] for i in data: listsum_reading.append(i[5]) listmen_reading_bkk.append(i[6]) listwomen_reading_bkk.append(i[7]) listmen_unreading_bkk.append(i[10]) listwomen_unreading_bkk.append(i[11]) line_chart = pygal.StackedLine(fill=True, interpolate='cubic', style=DarkStyle) line_chart = pygal.Bar() line_chart.title = ' Number of population, aged 6 years and over by reading' line_chart.x_labels = [ 'กรุงเทพมหานคร', 'ภาคกลาง', 'ภาคเหนือ', 'ภาคตะวันออกเฉียงเหนือ', 'ภาคใต้' ] line_chart.add('6-14 ปี', [ int(listsum_reading[9]), int(listsum_reading[26]), int(listsum_reading[42]), int(listsum_reading[57]), int(listsum_reading[71]) ]) line_chart.add('15-24 ปี', [ int(listsum_reading[10]), int(listsum_reading[27]), int(listsum_reading[43]), int(listsum_reading[58]), int(listsum_reading[72]) ]) line_chart.add('25-39 ปี', [ int(listsum_reading[11]), int(listsum_reading[28]), int(listsum_reading[44]), int(listsum_reading[59]), int(listsum_reading[73]) ]) line_chart.add('40-59 ปี', [ int(listsum_reading[12]), int(listsum_reading[29]), int(listsum_reading[45]), int(listsum_reading[60]), int(listsum_reading[74]) ]) line_chart.add('60andover', [ int(listsum_reading[13]), int(listsum_reading[30]), int(listsum_reading[46]), int(listsum_reading[61]), int(listsum_reading[75]) ]) line_chart.render_to_file('bar_area_1.svg') bkk_pie = pygal.Pie(inner_radius=.4) bkk_pie.title = 'Number of population, reading and unreading , age 6-14 yearsold' bkk_pie.add('ชาย อ่าน', int(listmen_reading_bkk[9])) bkk_pie.add('หญิง อ่าน', int(listwomen_reading_bkk[9])) bkk_pie.add('ชาย ไม่อ่าน', int(listmen_unreading_bkk[9])) bkk_pie.add('หญิง ไม่อ่าน', int(listwomen_unreading_bkk[9])) bkk_pie.render_to_file('bkk.svg')
def data_visualisation(): conn = psycopg2.connect( " dbname='inventory_management_system' user='******' host='localhost' port='5432' password='******' " ) cur = conn.cursor() cur.execute(""" SELECT type, count(type) FROM public.inventories group by type; """) product_service = cur.fetchall() print(product_service) pie_chart = pygal.Pie() '''my_pie_data = [ ('Nairobi', 63), ('Mombasa', 20), ('Kilifi', 17), ('Machakos', 30), ('Kiambu', 7)]''' pie_chart.title = 'Ratio of product and service' for each in product_service: pie_chart.add(each[0], each[1]) pie_data = pie_chart.render_data_uri() #end of pie chart #start of line graph cur.execute(""" SELECT EXTRACT(MONTH FROM s.created_at) as sales_month, sum(quantity*selling_price) as total_sales from sales as s join inventories as i on s.invid = i.id group by sales_month order by sales_month asc; """) monthly_sales = cur.fetchall() print(monthly_sales) data = [{ 'month': 'January', 'total': 22 }, { 'month': 'February', 'total': 27 }, { 'month': 'March', 'total': 23 }, { 'month': 'April', 'total': 20 }, { 'month': 'May', 'total': 12 }, { 'month': 'June', 'total': 32 }, { 'month': 'July', 'total': 42 }, { 'month': 'August', 'total': 72 }, { 'month': 'September', 'total': 52 }, { 'month': 'October', 'total': 42 }, { 'month': 'November', 'total': 92 }, { 'month': 'December', 'total': 102 }] a = [] b = [] for each in monthly_sales: a.append(each[0]) b.append(each[1]) line_chart = pygal.Line() line_chart.title = 'Total Sales in the year 2019' line_chart.x_labels = a line_chart.add('Total Sales', b) line_data = line_chart.render_data_uri() return render_template('chart.html', pie=pie_data, line=line_data)
import pandas as pd import pygal as pg import numpy as np from pygal.style import DarkStyle reading = pd.read_csv('../DATA/css.csv') pie_chart = pg.Pie(style=DarkStyle) pie_chart.title = 'Domestic Crime' dct = {} data = np.array( reading.groupby('Domestic', as_index=False).count()[['Domestic', 'Date']]).tolist() for i in data: pie_chart.add(str(i[0]), i[1]) pie_chart.render_to_file('../svg_grap/Domestic.svg')
def generate(self, from_date=None, to_date="Today", from_time=None, to_time="Now"): if from_time: self.from_time = abs(from_time) if from_date: self.from_date = abs(from_date) if from_date and to_date: if from_date == to_date and from_date == "Today": # Read the whole text. self.to_date = datetime.date.today() self.date_list = [(self.to_date - datetime.timedelta(x)) for x in range(-24, 1)] elif isinstance(from_date, int) and to_date == "Today": self.to_date = datetime.date.today() self.date_list = [(self.to_date + datetime.timedelta(x)) for x in range(from_date, 1)] if from_time and to_time: if isinstance(from_time, int) and to_time == "Now": self.to_date = datetime.datetime.now().replace( tzinfo=tz.tzlocal()) self.date_list = [ (self.to_date + datetime.timedelta(hours=x)).replace(tzinfo=tz.tzlocal()) for x in range(from_time, 1) ] tw_count = [] quotes_count = [] retweet_count = [] all_tweet_count = [] all_mention_count = [] all_media_count = [] for index, item in enumerate(self.date_list): if index == len(self.date_list) - 1: break tweets = Analysis.objects( Q(create_date__gte=self.date_list[index]) & Q(create_date__lt=self.date_list[index + 1]) & Q(retweet_count=0)).all() tw_count.append(tweets.count()) # count quotes quotes = Analysis.objects( Q(create_date__gte=self.date_list[index]) & Q(create_date__lt=self.date_list[index + 1]) & Q(is_quote_status=True)).all() quotes_count.append(quotes.count()) # count retweet retweet = Analysis.objects( Q(create_date__gte=self.date_list[index]) & Q(create_date__lt=self.date_list[index + 1]) & Q(retweet_count__gt=0)).all() retweet_count.append(retweet.count()) # user mention # all_mention = Analysis.objects( Q(create_date__gte=self.date_list[index]) & Q(create_date__lt=self.date_list[index + 1]) & Q(user_mentions__ne=[])).all() all_mention_count.append(all_mention.count()) # Media all_media = Analysis.objects( Q(create_date__gte=self.date_list[index]) & Q(create_date__lt=self.date_list[index + 1]) & Q(media_type__ne='')).all() all_media_count.append(all_media.count()) # all tweet all_tweet = Analysis.objects( Q(create_date__gte=self.date_list[index]) & Q(create_date__lt=self.date_list[index + 1])).all() all_tweet_count.append(all_tweet.count()) date_chart = pygal.Bar(margin=100, height=1000, width=1000, x_label_rotation=90) date_chart.x_labels = map( lambda d: jdatetime.datetime.fromgregorian(datetime=d).strftime( '%m/%d - %H:%m ') if isinstance(d, datetime.datetime) else jdatetime.date. fromgregorian(date=d).strftime('%a %m/%d'), self.date_list[:-1]) date_chart.title = 'Count of ALL' date_chart.add("all_tweet_count", all_tweet_count) date_chart.add("tw", tw_count) date_chart.add("retweet", retweet_count) date_chart.add("quotes", quotes_count) date_chart.add("mention", all_mention_count) date_chart.add("all_media", all_media_count) # # create pie chart self.all_tweets_count = sum(all_tweet_count) pie_chart = pygal.Pie(inner_radius=.4) pie_chart.title = 'From All - More than 100% - {} tweet'.format( self.all_tweets_count) pie_chart.add( 'tw {0:.2f} %'.format(100 * sum(tw_count) / self.all_tweets_count), 100 * float(sum(tw_count)) / float(self.all_tweets_count)) pie_chart.add( 'quotes {0:.2f} %'.format(100 * sum(quotes_count) / self.all_tweets_count), 100 * float(sum(quotes_count)) / float(self.all_tweets_count)) pie_chart.add( 'retweet {0:.2f} %'.format(100 * sum(retweet_count) / self.all_tweets_count), 100 * float(sum(retweet_count)) / float(self.all_tweets_count)) pie_chart.add( 'mention {0:.2f} %'.format(100 * sum(all_mention_count) / self.all_tweets_count), 100 * float(sum(all_mention_count)) / float(self.all_tweets_count)) pie_chart.add( 'media {0:.2f} %'.format(100 * sum(all_media_count) / self.all_tweets_count), 100 * float(sum(all_media_count)) / float(self.all_tweets_count)) # create file filename = datetime.datetime.today().strftime('%Y-%m-%d-%H:%m') date_chart.render_to_png(path.join(self.d, 'tmp/' + filename + '-chart.png'), dpi=600) self.file_names.append( path.join(self.d, 'tmp/' + filename + '-chart.png')) pie_chart.render_to_png(path.join(self.d, 'tmp/' + filename + '-pie-chart.png'), dpi=300) self.file_names.append( path.join(self.d, 'tmp/' + filename + '-pie-chart.png'))
#!/user/bin/python3 # -*- coding: utf-8 -*- # Donut 圆环图 import pygal # inner_radius 内半径 donut_chart = pygal.Pie(inner_radius=.75) # 内半径越大 圆环图就越像一个环 .4 -> .75 .75的时候就很像一个环 donut_chart.title = 'Browser usage in February 2012 (in %)' donut_chart.add('IE', 19.5) donut_chart.add('Firefox', 36.6) donut_chart.add('Chrome', 36.3) donut_chart.add('Safari', 4.5) donut_chart.add('Opera', 2.3) donut_chart.render_to_file('./image/pie_donut.svg')
bar_chart.render_to_file("images/q9.svg") print("question nine executed successfully") query = [{"$group":{"_id":"$state", "count":{"$sum":1}}}, {"$sort":{"count":-1}}] cursor = customersCollection.aggregate(query) customers2 = [] customers = [] for query in cursor: customers2.append(query["_id"]) customers.append(query["count"]) bar_chart = pygal.Pie() bar_chart.title = "customers by state" bar_chart.x_labels = map(str,customers) bar_chart.add("payments collected",customers) bar_chart.render_to_file("images/q10.svg") print("question ten executed successfully") query = [{"$group":{"_id":"$reportsTo","count":{"$sum":1}}},{"$sort":{"count":1}}] cursor = employeesCollection.aggregate(query) employee2 = [] employee = [] for query in cursor:
def index(): user_id = session.get('user_id') form = login_form.LoginForm() db = GraficoModel() now = datetime.now() ano = now.strftime('%Y') mes = now.strftime('%m') este_mes = db.get_levyings_sum(mes) if este_mes[0]: este_mes2 = este_mes[0] este_mes = real_br_money_mask(este_mes[0]) else: este_mes = '0' este_mes2 = 0.0 if mes != 1: mes = str(int(mes) - 1) ultimo_mes = db.get_levyings_sum(mes) if ultimo_mes[0]: ultimo_mes2 = ultimo_mes[0] ultimo_mes = real_br_money_mask(ultimo_mes[0]) else: ultimo_mes = '0' ultimo_mes2 = 0.0 else: ultimo_mes = db.get_levyings_sum(mes) ultimo_mes = real_br_money_mask(ultimo_mes[0]) ultimo_mes2 = ultimo_mes[0] if este_mes2 > ultimo_mes2: text_info = "text-info" text_primary = "text-primary" else: text_info = "text-primary" text_primary = "text-info" result = db.get_numero_empresas(user_id) numero_clientes = result[0] if not numero_clientes: numero_clientes = "0" total_clientes = result[1] porcentagem = int(ceil((numero_clientes * 100) / total_clientes)) porcentagem = porcentagem - (porcentagem % 10) result = db.get_tributacao('SIMPLES NACIONAL', 'PRESUMIDO', 'REAL', user_id) chart = pygal.Pie(style=CleanStyle) chart.force_uri_protocol = 'http' chart.title = 'Faturamento por Cliente' chart.add('Simples Nacional', result[0]) chart.add('Lucro Presumido', result[1]) chart.add('Lucro Real', result[2]) graph_data = chart.render_data_uri() result = db.get_ocorrencias(user_id) custom_style = Style(colors=('#9e0808', '#041e70', '#04753c')) chart = pygal.Bar(style=custom_style) chart.force_uri_protocol = 'http' chart.title = 'Ocorrências em Aberto / Fechado / Andamento' chart.add('Aberto', result[0]) chart.add('Andamento', result[2]) chart.add('Fechado', result[1]) graph_data2 = chart.render_data_uri() result1 = db.get_cobrancas('Continuo') result2 = db.get_cobrancas('Nao_Continuo') chart = pygal.Bar(style=CleanStyle) chart.force_uri_protocol = 'http' chart.title = 'Cobranças, ano ' + str(ano) + ':' chart.x_labels = ('Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun', 'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez') chart.add('Contínuo', result1) chart.add('Não Contínuo', result2) graph_data3 = chart.render_data_uri() return render_template('index/index.html', form=form, graph_data=graph_data, graph_data2=graph_data2, graph_data3=graph_data3, total_clientes=total_clientes, numero_clientes=numero_clientes, porcentagem=porcentagem, este_mes=este_mes, ultimo_mes=ultimo_mes, flag_index=1, text_info=text_info, text_primary=text_primary)
import pygal '''这个是半圆饼图''' # 写入数据 data = [2365,5414] # 写入数据对应的标签,如果想在饼图上显示具体数值,在对应的标签后填入相应数值 labels = ['男生','女生'] # 创建pygal.Pie对象(饼图) pie = pygal.Pie() # 采用循环为饼图添加数据 for i, per in enumerate(data): pie.add(labels[i], per) # 标题 pie.title = '男女比例' # 设置将图例放在底部 pie.legend_at_bottom = True # 设置内圈的半径长度 pie.inner_radius = 0.4 # 创建半圆数据图 pie.half_pie = True # 指定将数据图输出到SVG文件中,保存到本地 pie.render_to_file('language_percent1.svg')
def __init__(self, **kwargs): self.chart = pygal.Pie(**kwargs) self.chart.title = 'Listed properties'