def charts(request): phone_number = request.session.get('phone_number') user = UserModel.objects.get(phone_number=phone_number) if user.is_admin: data = DataModel.objects.order_by('data_time')[0:21] if user.device_id: device = DeviceModel.objects.get(device_id=user.device_id) status = device.status else: device = '管理员设备' status = True else: device = DeviceModel.objects.get(device_id=user.device_id) data = DataModel.objects.filter(device_id=device.device_id).order_by('data_time')[0:21] status = device.status status = 'offline' if status is False else 'online' attr = [str(dat.data_time).split('.')[0] for dat in data] voltage_value = [float(dat.voltage_value) for dat in data] current_value = [float(dat.current_value) for dat in data] switch_status_value = [int(dat.switch_status) for dat in data] active_power_value = [float(dat.active_power_value) for dat in data] total_active_power_value = [float(dat.total_active_power_value) for dat in data] power_factor_value = [float(dat.power_factor_value) for dat in data] co2_emission_value = [float(dat.co2_emission_value) for dat in data] frequency_value = [float(dat.frequency_value) for dat in data] voltage = Line(height=200, width=400) voltage.add("电压值", attr, voltage_value) current = Line(height=200, width=400) current.add("电流值", attr, current_value) switch_status = Line(height=200, width=400) switch_status.add("开关状态", attr, switch_status_value) active_power = Line(height=200, width=400) active_power.add("有功功率", attr, active_power_value) total_active_power = Line(height=200, width=400) total_active_power.add("有功总电量", attr, total_active_power_value) power_factor = Line(height=200, width=400) power_factor.add("功率因数", attr, power_factor_value) co2_emission = Line(height=200, width=400) co2_emission.add("二氧化碳排放量", attr, co2_emission_value) frequency = Line(height=200, width=400) frequency.add("频率", attr, frequency_value) context_data = { 'name': user.name, 'device_id': device.device_id, 'device_status': status, 'voltage': voltage.render_embed(), 'current': current.render_embed(), 'switch_status': switch_status.render_embed(), 'active_power': active_power.render_embed(), 'total_active_power': total_active_power.render_embed(), 'power_factor': power_factor.render_embed(), 'co2_emission': co2_emission.render_embed(), 'frequency': frequency.render_embed(), } return render(request, 'figures_base.html', context=context_data)
def build_count_perday(df, logs): df_empty = None df_count_perday = None if not df.empty: df_count_perday = df.groupby('timestamp_day').agg({ 'msg': 'count' }).sort_values(by='msg').tail(10) start_day = datetime.now() + timedelta(days=-7) dates = [(start_day + timedelta(days=i)).strftime('%Y-%m-%d') for i in range(0, 7)] df_empty = pandas.DataFrame({'timestamp_day': dates, 'msg': 0}) df_count_perday = pandas.merge(df_empty, df_count_perday, how='left', on='timestamp_day') df_count_perday = df_count_perday.fillna(value=0) attr = list(df_empty['timestamp_day']) if df_empty is not None else {} v = list(df_count_perday['msg_y']) if df_count_perday is not None else {} line = Line() line.add("威胁检测", attr, v) logs.update(count_perday=line.render_embed()) logs.update(echart_line_script_list=line.get_js_dependencies())
def line(title, data): """ 画线 :param title: :param data: :return: """ """ '#0f000000',""" line = Line(title, title_pos="center", ) for _dot in data: line.add(_dot['title'], _dot['attr'], _dot['data'], is_fill=True, line_opacity=0.8, area_color=_dot['color'], area_opacity=0.4, is_smooth=True) line.options['toolbox']['show'] = False return line.render_embed()
def create_charts(data): # data字典格式(三个表的情况下): # {'charcloud':[str:表一的前描述,str:表一的后描述,数据1,数据2,...,数据n],'...':[...]} html = '' page = Page() style = Style(width=900, height=600) # 本页面包含:1:所有字的词云charcloud(两个数据chars,values)、 # 表一: # 获取表一的数据 html_before = data['charcloud'][0] html_after = data['charcloud'][1] chars = data['charcloud'][2] values = data['charcloud'][3] wordcloud = WordCloud("唐诗用字云图", **style.init_style) wordcloud.add("字云", chars, values, word_size_range=[10, 100], shape='pentagon') java_script = wordcloud.render_embed() html += html_before + java_script + html_after page.add(wordcloud) # 表二: html_before = data['chartop10'][0] html_after = data['chartop10'][1] chars = data['chartop10'][2] values = data['chartop10'][3] bar = Bar("唐诗高频十字", **style.init_style) bar.add("柱状图", chars, values) java_script = bar.render_embed() html += html_before + java_script + html_after page.add(bar) # 表三: html_before = data['frequency×'][0] html_after = data['frequency×'][1] keys = data['frequency×'][2] values = data['frequency×'][3] line = Line("唐诗字频-字数", **style.init_style) line.add("字频--字数", keys, values, is_smooth=True, is_fill=True, area_opacity=0.2, is_datazoom_show=True, datazoom_type="both", datazoom_range=[0, 60], xaxis_interval=1, yaxis_formatter="字", xaxis_name="频次", yaxis_name="字数", xaxis_name_pos="end", yaxis_name_pos="end", is_more_utils=True) java_script = line.render_embed() html += html_before + java_script + html_after page.add(line) # 最后 script = page.get_js_dependencies() return html, script
def line_three_html(self, title, key, val_min, val_max, val_avg): line = Line(title, title_pos="left", width="100%", height=300) line.add( "最小值", key, val_min, mark_point=["average", "max", "min"], is_datazoom_show=True, # 是否支持缩放 datazoom_range=[0, 100], # 缩放范围 is_smooth=True) line.add( "最大值", key, val_max, mark_point=["average", "max", "min"], is_datazoom_show=True, # 是否支持缩放 datazoom_range=[0, 100], # 缩放范围 is_smooth=True) line.add( "平均值", key, val_avg, mark_point=["average", "max", "min"], is_datazoom_show=True, # 是否支持缩放 datazoom_range=[0, 100], # 缩放范围 is_smooth=True) return line.render_embed()
def show(request, id): score_list = agent_score.objects.get(pk=id) data = { 'observe': eval(score_list.observe_score), 'evaluate': eval(score_list.evaluate_score), 'predict': eval(score_list.predict_score) } columns = { 'observe': [str(i) for i in range(len(data['observe']))], 'evaluate': [str(i) for i in range(len(data['evaluate']))], #'predict': [str(i) for i in range(len(data['observe']), (len(data['observe'])+len(data['predict'])))] } line = Line("Line Chart", "Agent_Score", width=2000, height=800) line.add('Observe', columns['observe'], data['observe'], mark_line=['average']) line.add('Evaluate', columns['evaluate'], data['evaluate'], mark_line=['average']) #line.add('Predict', columns['predict'], data['predict']) context = dict(myechart=line.render_embed(), host=Remote_host, script_list=line.get_js_dependencies()) return render(request, 'agent_score/show.html', context)
class PlotLine: def __init__(self): self.data = '' self.sort = '' self.line = Line() self.script_list = self.line.get_js_dependencies() def plot_line(self, dat, sort): self.data = dat self.sort = sort self.line = Line() try: for label in list(self.data.columns): if label != 'index': print(list(self.data['index'])) print(list(self.data[label])) self.line.add(label, list(self.data['index']), list(self.data[label]), is_smooth=True, mark_line=['average'] ) print(label) return self.line.render_embed() except Exception as e: print('{} in plot line'.format(e))
def rms(request): template = loader.get_template('rms.html') db = mongodb_op.MongodbOp() db.query_dict = {'label': {'$eq': 1}} res = db.query() file_list = set() for r in res: file = os.path.join(r['a_uri'], r['filename']) file_list.add(file) df_lst = preprocess.DataPreprocess( filename=file_list.pop()).origin_data().data_split() signal = signalProcess.SignalProcess(df=df_lst[1]['z']).t_v_rms() x = signal.index.values y = signal.values line = Line('RMS') line.add('z', x, y, xaxis_type='value', is_datazoom_show=True, is_datazoom_extra_show=True, datazoom_range=[0, 100], datazoom_extra_range=[0, 100]) context = dict( script_list=line.get_js_dependencies(), filelist=file_list, myechart=line.render_embed(), ) return HttpResponse(template.render(context, request))
def _get_weibull_dist(self, qty, mean=None, std=None, scale=1.0, shape=5.0): x_line = np.arange(mean - std * 4.0, mean + std * 5.0, 1 * std) if self.weibull_dist_method == 'double': _data = dweibull(shape, loc=mean, scale=std) y_line = _data.pdf(x_line) * qty if self.weibull_dist_method == 'inverted': _data = invweibull(shape, loc=mean, scale=std) y_line = _data.pdf(x_line) * qty if self.weibull_dist_method == 'exponential': _data = exponweib(scale, shape, loc=mean, scale=std) y_line = _data.pdf(x_line) * qty if self.weibull_dist_method == 'min': _data = weibull_min(shape, loc=mean, scale=std) y_line = _data.pdf(x_line) * qty if self.weibull_dist_method == 'max': _data = weibull_max(shape, loc=mean, scale=std) y_line = _data.pdf(x_line) * qty line = Line(width=1280, height=600) line.add(u'{0}'.format(self.spc_target), x_line, y_line, xaxis_name=u'{0}'.format(self.spc_target), yaxis_name=u'数量(Quantity)', line_color='rgba(0 ,255 ,127,0.5)', legend_pos='center', is_smooth=True, line_width=2, tooltip_tragger='axis', is_fill=True, area_color='#20B2AA', area_opacity=0.4) pyecharts.configure(force_js_embed=True) return line.render_embed()
def store_interest(): sql = "SELECT actual_repayment_time,sum(back_Interest) from busy_admin_add_transaction where actual_repayment_time BETWEEN '2017-07-01' AND '2018-04-09' GROUP BY actual_repayment_time" cursor = conn_db().cursor() cursor.execute(sql) sqlResults = cursor.fetchall() date = [] interest = [] for sqlResult in sqlResults: date.append(sqlResult[0]) interest.append(sqlResult[1]) line = Line(width='100%', height=400) line.add("所有用户", x_axis=date, y_axis=interest, yaxis_name='金额', xaxis_name='日期', yaxis_name_pos='end', xaxis_name_pos='end', xaxis_name_gap=35, is_smooth=True, is_datazoom_show=True, datazoom_range=[0, 100], mark_line=["max", "average"], is_label_emphasis=True) return line.render_embed()
def img10(): attr = ["A", "O1", "O2", "O3", "O4"] ph = [8.5, 8.3, 7.9, 7.8, 7.5] do = ['nan', 0.15, 0.65, 0.2, 0.35] line = Line("On-line Monitoring") line.add("pH", attr, ph, is_smooth=True, is_label_show=True) line.add("DO", attr, do, is_smooth=True, is_label_show=True) return line.render_embed()
def show_data(request): template = loader.get_template('meetings/show_data.html') attr = [u"members", "non-members"] value = [ Members.objects.filter(on_activate=True).count(), Members.objects.filter(on_activate=False).count() ] pie = Pie(u"Members and non-members") pie.add("Count", attr, value) meeting_attendace = Counter(MeetingInfo.objects.values_list('attendance')) attr_attendace = OrderedDict() attr_attendace.update({ Members.objects.get(id=key[0]).name: value for key, value in meeting_attendace.items() }) wordcloud = WordCloud() wordcloud.add("attendace", list(attr_attendace.keys()), list(attr_attendace.values())) bar = Bar("attendaces") bar.add("attendaces", list(attr_attendace.keys()), list(attr_attendace.values()), xaxis_interval=0, xaxis_rotate=-90) meeting_info = MeetingInfo.objects.values_list('date', 'count', 'theme').annotate( Count('attendance')) meeting_info_dict = OrderedDict() for m in meeting_info: meeting_info_dict[str(m[0]) + '#' + str(m[1]) + str(m[2])] = int(m[3]) print(str(m[0]) + '_' + str(m[1]) + str(m[2]), m[3]) line = Line("Meeting attendance number") line.add("ESHTMC", list(meeting_info_dict.keys()), list(meeting_info_dict.values()), mark_point=["average"], xaxis_interval=0, xaxis_rotate=-45) context = dict( host=REMOTE_HOST, pie=pie.render_embed(), pie_script_list=pie.get_js_dependencies(), wordcloud=wordcloud.render_embed(), wordcloud_script_list=wordcloud.get_js_dependencies(), bar=bar.render_embed(), bar_script_list=bar.get_js_dependencies(), line=line.render_embed(), line_script_list=line.get_js_dependencies(), ) return HttpResponse(template.render(context, request))
def scalar(title, x, y, mark_point=['min', 'max']): ''' title: str x: list of strs y: list of number ''' line = Line(title) line.add('line', x, y, mark_point) return line.render_embed(), line.get_js_dependencies()
def lines(data, width=None, height=None): _max = 0 _t = [] for _d in data: for _dd in data[_d]: if _dd not in _t: _t.append(_dd) if data[_d][_dd] > _max: _max = data[_d][_dd] if (width is None) or (height is None): _width = 1200 _height = 600 else: _width = width _height = height _line = Line(u"", width=_width, height=_height, # background_color='#b0bab9', title_pos="center" ) _title = [] for __t in sorted(_t): _title.append(__t) _title = _title[-12:] print _title for _d in data: _v = [] for _i in range(len(_title)): _v.append(0) for _dd in sorted(data[_d]): if _dd not in _title: continue _idx = _title.index(_dd) _v[_idx] = float(data[_d][_dd])/float(_max) print _v _line.add(_d, _title, _v, is_fill=True, is_stack=True, line_opacity=0.2, area_opacity=0.4, # is_smooth=True, legend_top='top', is_label_show=True, is_focusnode=True, # mark_line=['average'], # mark_point=['max', 'min'], symbol=None) _line.options['toolbox']['show'] = False return _line.render_embed()
def _get_scatter(self, data): qty = len(data) x_line = np.arange(1, qty + 1, 1) y_line = data line = Line(width=1280, height=800) line.add(u'{0}'.format(self.spc_target), x_line, y_line, xaxis_name=u'Sequence', yaxis_name=u'{0}'.format(self.spc_target), mark_line=["min", "average", "max"], line_color='rgba(0 ,255 ,127,0.5)', legend_pos='center', is_smooth=True, line_width=2, is_more_utils=True, tooltip_tragger='axis') pyecharts.configure(force_js_embed=True) return line.render_embed()
def data_statistics(): """ 数据统计 """ acount = len(Agency.query.all()) ccount = len(Client.query.all()) mcount = len(Medicine.query.all()) line = Line("数据统计折线图", width=1100, height=550) line.add("", ["药品数", "经办人数", "顾客数"], [mcount, acount, ccount], is_more_utils=True, is_label_show=True) return render_template('pyecharts.html', myechart=line.render_embed(), script_list=line.get_js_dependencies())
def appl_avarge_store(): sqlResults = get_appl_date() date = [] appl_avarge = [] for sqlResult in sqlResults: date.append(sqlResult[0]) appl_avarge.append(sqlResult[6]) line = Line(width='100%', height=400) line.add("所有用户", date, appl_avarge, is_smooth=True, mark_line=["max", "average"], is_label_emphasis=True) return line.render_embed()
def line_html(self, title, key, value, color=None): line = Line(title, title_pos="center", width="100%", height=300) line.add( "", key, value, mark_point=["average", "max", "min"], mark_line=["average", "max", "min"], area_color=color, line_opacity=0.2, #透明度 area_opacity=0.4, is_datazoom_show=True, #是否支持缩放 datazoom_range=[0, 100], #缩放范围 symbol=None) return line.render_embed()
def lines_member_by_pj(pj, data): """ 展示指定项目的分布 :param pj: 指定的项目 :param data: 数据集 :return: 展示图 """ _t = [] for _d in data: for _dd in data[_d]: if _dd not in _t: _t.append(_dd) _line = Line(u"", width=100, height=60, background_color='#b0bab9', title_pos="center", ) _title = [] for __t in sorted(_t): _title.append(__t) _title = _title[-12:] print _title _v = [] for _i in range(len(_title)): _v.append(0) for _dd in sorted(data[pj]): if _dd not in _title: continue _idx = _title.index(_dd) _v[_idx] = data[pj][_dd]*10 print _v _line.add(pj, _title, _v, is_fill=True, # is_stack=True, line_opacity=0.2, area_opacity=0.4, # is_smooth=True, is_legend_show=False, # is_label_show=False, is_focusnode=True, symbol=None) _line.options['toolbox']['show'] = False return _line.render_embed()
def bar_line(request): try: template = loader.get_template('echarts/line.html') line = Line('第一张图标', '副标题') line.add("服装", ["衣服", "鞋子", "袜子", "帽子", "眼镜"], [2, 4, 15, 6, 23], is_convert=True) context = dict( myechart=line.render_embed(), host=REMOTE_HOST, script_list=line.get_js_dependencies() ) except Exception: raise Http404 return render(request, 'echarts/line.html', context)
def build_top10_risk_diagram(df, logs): df_empty = None df_top10_high_risk_grouped_by_hour = None df_top10_low_risk_grouped_by_hour = None if not df.empty: df_top10_high_risk_grouped_by_hour = df[df.priority.isin( [0, 1])].groupby('timestamp_hour').agg({ 'msg': 'count' }).sort_values(by='msg').tail(10) df_top10_low_risk_grouped_by_hour = df[df.priority == 2].groupby( 'timestamp_hour').agg({ 'msg': 'count' }).sort_values(by='msg').tail(10) start_time = datetime.now() + timedelta(hours=-24) dates = [(start_time + timedelta(hours=i)).strftime('%H:00') for i in range(0, 24)] # dates = [(start_time + timedelta(hours=i)).strftime('%Y-%m-%d %H:00:00') for i in range(0, 23)] df_empty = pandas.DataFrame({'timestamp_hour': dates, 'msg': 0}) df_top10_high_risk_grouped_by_hour = pandas.merge( df_empty, df_top10_high_risk_grouped_by_hour, how='left', on='timestamp_hour') df_top10_high_risk_grouped_by_hour = df_top10_high_risk_grouped_by_hour.fillna( value=0) df_top10_low_risk_grouped_by_hour = pandas.merge( df_empty, df_top10_low_risk_grouped_by_hour, how='left', on='timestamp_hour') df_top10_low_risk_grouped_by_hour = df_top10_low_risk_grouped_by_hour.fillna( value=0) attr = list(df_empty['timestamp_hour']) if df_empty is not None else {} v1 = list(df_top10_high_risk_grouped_by_hour['msg_y'] ) if df_top10_high_risk_grouped_by_hour is not None else {} v2 = list(df_top10_low_risk_grouped_by_hour['msg_y'] ) if df_top10_low_risk_grouped_by_hour is not None else {} line = Line() line.add("高威胁", attr, v1) line.add("中低威胁", attr, v2) logs.update(top10_risk_diagram=line.render_embed()) logs.update(echart_line_script_list=line.get_js_dependencies())
def profitstat_chart(user_id): query = list(collect_profitstat.find()) user_profithist = [data for data in query if data['user_id'] == user_id][0]['stat'] # 创建图表 line = Line("您的收益变化") attrs = [i['date'] for i in user_profithist] x_dates = [i.split(' ')[0] for i in attrs] y_profit = [i['AllrateR'] for i in user_profithist] line.add("总收益", x_dates, y_profit, is_label_show=True, is_datazoom_show=True, mark_point=['average'], is_more_utils=True) return line.render_embed()
def line(title, data): """ 画线 :param title: :param data: :return: """ """ '#0f000000',""" from pyecharts import Line line = Line(title) for _dot in data: line.add(_dot['title'], _dot['attr'], _dot['data'], is_fill=True, line_opacity=0.8, area_color=_dot['color'], area_opacity=0.4, is_smooth=True) return line.render_embed()
def appl_avarge(): sqlResults = get_appl_date() date = [] appl_avarge = [] for sqlResult in sqlResults: date.append(sqlResult[0]) appl_avarge.append(sqlResult[4]) line = Line(width='100%', height=500) line.add("所有用户", x_axis=date, y_axis=appl_avarge, yaxis_name='金额', xaxis_name='日期', yaxis_name_pos='end', xaxis_name_pos='end', xaxis_name_gap=35, is_smooth=True, is_datazoom_show=True, datazoom_range=[0, 100], mark_line=["max", "average"], is_label_emphasis=True) return line.render_embed()
def index_line(): data = pd.read_csv(r"./static/data/1Child mortality in China.csv") columns = data.columns.values data = data.values.tolist() bar = Line("2009-2018儿童死亡率", width="50%") bar.add(data[0][0], columns[1:], data[0][1:]) for i in data[1:]: bar.add(i[0], columns[1:], i[1:]) return render_template( 'index.html', myechart=bar.render_embed(), script_list=bar.get_js_dependencies(), text=''' 从图可以看出,中国5岁以下儿童死亡率在过去10年间总体趋于下降,农村5岁以下儿童的死亡率一直以较大幅度高于城市同类儿童死亡率。 联系图表,可以发现2011年是一个节点年份,11年以后我国5岁以下儿童死亡率降幅明显。将此节点信息联系国务院在2011年根据我国儿童发展过程中面临的突出问题,针对性发布的《中国儿童发展纲要》可知,在2011年以来,我国在儿童相关的健康、福利、社会环境等领域有了长足发展。 该纲要首次将降低儿童死亡率作为儿童健康领域的主要目标之一,此外还在其他领域增补了大量与儿童安全相关的内容,包括强化国家和政府在不同类别弱势儿童保护方面的责任,建立和完善国家、省(自治区、直辖市)、市(区、县)三级儿童发展监测数据库。 从数据来看,这项举措的收效甚佳,相比于2011年,2018中国5岁以下儿童的死亡率下降了一半之多,虽然其中肯定包括了医疗水平不断提高等原因,但即使在城市地区,也成果喜人。 ''', text1= '''这次项目的数据采集主要围绕中国儿童死亡情况展开,搜集了近10年来中国5岁以下儿童(包含全体、城市、农村)死亡率数据、其主要死因的分析数据及可能存在的预防控制和专科救助情况数据。 总体来说我国5岁以下儿童死亡率在医疗水平提高以及国家专项纲要推动等因素下已经实现较大幅度的降低,但是农村儿童较高的死亡率仍然可以作为儿童生命安全健康任务的核心突破点。而对于儿童生存环境中存在的各种危险因素,人们的重视程度还是不够,要切实解决这些危险问题,首要做到的就是具体情况具体分析,切不可以以偏概全,对于家庭因素、地区因素、环境因素都要点对点提出宣传及解决建议。而针对疾病预防控制中心及专科疾病防治院的发展,最大问题是发展遇到瓶颈,被暂时性的饱和假象拖慢了发展脚步。儿童的疾病防治与专科诊疗问题从来不是“医疗机构基本覆盖”、“救援需求基本满足”可以解决,更充分的发展,更尖端技术的推广仍然任重道远。''' )
def line_three_html(self, title, key, val_min, val_max, val_avg): """折线面积图""" line = Line( title, title_pos='left', width='100%', height=300, ) line.add( "最小值", key, val_min, mark_point=['average', 'max', 'min'], # 标记点 is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], is_smooth=True, ) line.add( "最大值", key, val_max, mark_point=['average', 'max', 'min'], # 标记点 is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], is_smooth=True, ) line.add( "平均值", key, val_avg, mark_point=['average', 'max', 'min'], # 标记点 is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], is_smooth=True, ) return line.render_embed()
def line_html(self, title, key, val, color=None): """折线面积图""" line = Line( title, title_pos='center', width='100%', height=300, ) line.add( "", key, val, is_fill=True, # 是否填充曲线面积 mark_point=['average', 'max', 'min'], # 标记点 mark_line=['average'], # 标记线 line_opacity=0.2, area_opacity=0.4, is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], symbol=None, # 标记图形 area_color=color, ) return line.render_embed()
def dashboard(request): phone_number = request.session.get('phone_number', '') if phone_number == '': return redirect('/') user = UserModel.objects.get(phone_number=phone_number) time_delta = datetime.datetime.now() - datetime.datetime(2019, 1, 1) if user.is_admin: data = DataModel.objects.order_by('data_time')[0:51] user_number = UserModel.objects.count() device_number = DeviceModel.objects.count() data_number = DataModel.objects.count() if user.device_id: device = DeviceModel.objects.get(device_id=user.device_id) status = device.status else: device = '管理员设备' status = True else: user_number = UserModel.objects.filter( phone_number=phone_number).count() device_number = DeviceModel.objects.filter( device_id=user.device_id).count() data_number = DataModel.objects.filter( device_id=user.device_id).count() device = DeviceModel.objects.get(device_id=user.device_id) data = DataModel.objects.filter( device_id=device.device_id).order_by('data_time')[0:51] status = device.status status = 'offline' if status is False else 'online' attr = [str(dat.data_time).split('.')[0] for dat in data] current_values = [float(dat.current_value) for dat in data] active_power_values = [float(dat.active_power_value) for dat in data] total_active_power_values = [ float(dat.total_active_power_value) for dat in data ] line = Line("近50条数据", "电流、有功功率和有功总电量") line.add("电流", attr, current_values, is_fill=True, mark_point=["max", "min"]) line.add("有功功率", attr, active_power_values, mark_point=["max", "min"]) line.add("有功总电量", attr, total_active_power_values, mark_point=["max", "min"]) # if user.is_admin: # devices = DeviceModel.objects.filter(device_id=user.device_id) # status = 'offline' # if devices: # status = devices[0].status # else: # status = 'online' # user_number = UserModel.objects.count() # device_number = DeviceModel.objects.count() # data_number = DataModel.objects.count() context_data = { 'name': user.name, 'device_id': user.device_id, 'device_status': status, 'instruction': '若系统使用时遇到问题,请及时向系统管理员进行反馈。谢谢大家的配合~', 'user_number': user_number, 'device_number': device_number, 'running_days': time_delta.days, 'data_number': data_number, 'current_peak_value': max([dat.current_value for dat in data]) if len(data) > 0 else 0, 'active_power_peak_value': max([dat.active_power_value for dat in data]) if len(data) > 0 else 0, 'total_active_power_peak_value': max([dat.total_active_power_value for dat in data]) if len(data) > 0 else 0, 'dashboard_chart': line.render_embed(), } # else: # device = DeviceModel.objects.get(device_id=user.device_id) # data = DataModel.objects.filter(device_id=device.device_id).order_by('data_time')[0:50] # print(data) # # attr = [str(dat.data_time).split('.')[0] for dat in data] # current_values = [dat.current_value for dat in data] # active_power_values = [dat.active_power_value for dat in data] # total_active_power_values = [dat.total_active_power_value for dat in data] # # line = Line("近50条数据", "电流、有功功率和有功总电量") # # line.use_theme('macarons') # line.add("电流", attr, current_values, is_fill=True, mark_point=["max", "min"]) # line.add("有功功率", attr, active_power_values, mark_point=["max", "min"]) # line.add("有功总电量", attr, total_active_power_values, mark_point=["max", "min"]) # # context_data = { # 'name': user.name, # 'device_id': user.device_id, # 'device_status': 'online' if device.status == '1' else 'offline', # 'instruction': '若系统使用时遇到问题,请及时向系统管理员进行反馈。谢谢大家的配合~', # 'user_number': 1, # 'device_number': 1, # 'running_days': time_delta.days, # 'data_number': len(data), # 'current_peak_value': max([dat.current_value for dat in data]), # 'active_power_peak_value': max([dat.active_power_value for dat in data]), # 'total_active_power_peak_value': max([dat.total_active_power_value for dat in data]), # 'dashboard_chart': line.render_embed(), # } # print(line.get_js_dependencies()) return render(request, 'dashboard_homepage.html', context=context_data)
def home(request): # return render(request,'home.html') # if request.method == 'POST': # username = request.POST.get('username',None) # age = request.POST.get("age",None) # gender = request.POST.get("gender",None) # usergroup_id = request.POST.get("group",None) # models.UserInfo.objects.create(username=username, age=age, gender=gender,usergroup_id=usergroup_id) # USER_LIST = list(models.UserInfo.objects.all()) # return render(request,"./node_modules/gentelella/production/anom_total.html",{"userlist":USER_LIST}) conn = database_method.initial_connect('dmuser', 'dmuser', 'dmtest') conn = conn.create_conn() cursor = conn.cursor() cpu_outlier_result = """ select "TIME", "SNAP_ID", "DB_CPU", db_id, '/ora_dual/load_profile_trend/?snapid='||SNAP_ID||'&dbid='||db_id from ( SELECT "TIME", "SNAP_ID", "DB_CPU", db_id, PREDICTION_PROBABILITY(dmuser.ANOM_SVM_1_6 USING *) ANOM_SVM_1_6_PROB, PREDICTION(dmuser.ANOM_SVM_1_6 USING *) ANOM_SVM_1_6_PRED FROM dmuser.stat_all_pivot_data) where ANOM_SVM_1_6_PRED=1 and rownum=1 """ sql_outlier_result = """ select * from ( select type from topsql_all_data_his_view ) pivot ( count(*) for type in ('CPU Time' as "CPU_TIME",'Elapse Time' as "ELAPSE_TIME",'Buffer Reads' as "BUFFER_READS",'Physical Reads' as "PHYSICAL_READS",'Executions' as "EXECUTIONS") ) """ cpu_result = """ select db_cpu,time from dmuser.stat_all_pivot_data """ # 执行异常探测 cursor.callproc('dmuser.cpu_outlier_apply_model') # 执行异常分析 cursor.execute(cpu_outlier_result) data_result_ = cursor.fetchall() data_result = list(data_result_) cursor.execute(sql_outlier_result) sql_result_ = cursor.fetchall() sql_result = list(sql_result_) outlier_sql = [] for idx in range(len(sql_result)): outlier_sql.append({ 'CPU': sql_result[idx][0], 'ELA': sql_result[idx][1], 'BUFFER': sql_result[idx][2], 'READ': sql_result[idx][3], 'EXE': sql_result[idx][4] }) # 提取异常原因 reasons = [] for idx in range(len(data_result)): url = [] # reason_sql = """ # select extractValue(value(reason_name),('//Attribute/@name')) # from # ( # select FEATURE_DETAILS(dmuser.feat_pca_1_6, 1, 10 USING *) data # from dmuser.stat_all_pivot_data # where snap_id = # """ + str(data_result[idx][1]) + """ ) t,TABLE(XMLSequence(Extract(t.data,'//Attribute'))) reason_name where rownum<4 # """ reason_sql = """ select stat_name from ( select * from DBA_HIST_SYS_TIME_MODEL where snap_id=""" + str( data_result[idx] [1]) + """ and stat_name not in ('DB time','DB CPU') order by value desc) where rownum < 4 """ cursor.execute(reason_sql) reason_result_ = cursor.fetchall() for reaon_idx in range(len(reason_result_)): url.append( data_result[idx][4] + "&reason=" + str(reason_result_[reaon_idx]).upper().replace(' ', '_'). replace('(', '').replace(')', '').replace(',', '').replace( '[', '').replace(']', '').replace('''''', '')) # url.append(data_result[idx][4] + "&reason=" + str(reason_result_[reaon_idx])) reasons.append({ "TIME": data_result[idx][0], "snap_id": data_result[idx][1], "DB_CPU": data_result[idx][2], "URL": url, "reason": reason_result_ }) cursor.execute(cpu_result) cpu_all_result_ = cursor.fetchall() cpu_all_result = list(cpu_all_result_) normal = [] normal_tiem = [] outlier = [] outlier_time = [] timeid = [] for idx_1 in range(len(cpu_all_result)): # outlier.append({'time':cpu_all_result[idx_1][0],'ANOM_SVM_1_6_PROB':cpu_all_result[idx_1][3]}) outlier.append(cpu_all_result[idx_1][0]) outlier_time.append(cpu_all_result[idx_1][1]) # else: # #normal.append({'time':cpu_all_result[idx_1][0],'ANOM_SVM_1_6_PROB':cpu_all_result[idx_1][3]}) # normal.append( cpu_all_result[idx_1][3]) # normal_tiem.append(cpu_all_result[idx_1][0]) template = loader.get_template( './node_modules/gentelella/production/anom_total.html') timeline = Timeline(is_auto_play=True, timeline_bottom=0) cpu_line = Line(title_pos='center') # cpu_line.add( # "正常值", # normal_tiem, # normal, # is_smooth=True, # mark_point=["max", "min"], # mark_line=["average"], # legend_top = "50%" # ) cpu_line.add("DB_CPU", outlier_time, outlier, is_smooth=True, mark_point=["max", "min"], mark_line=["average"]) context = dict( # title = [], cpu_line=cpu_line.render_embed(), data_result=reasons, sql_result=outlier_sql, # metric_data = load_profile_per_hour, myechart=timeline.render_embed(), # host=DEFAULT_HOST,#这句改为下面这句 host=REMOTE_HOST, # <-----修改为这个 script_list=timeline.get_js_dependencies()) return HttpResponse(template.render(context, request)) # return render(request, "./node_modules/gentelella/production/sel_cpuoutlier_data.html", {'data_result': data_result}) cursor.close()
def translate_char(requests): context = "" conn = sqlite3.connect("db.sqlite3") c = conn.cursor() if requests.POST["type"] == "Line": str_sql = "select date(updated) as d ,count(*) as c from Mytest_translate group by d" cursor = c.execute(str_sql) data_arr = [] data_num_arr = [] for row in cursor: # result[row[0]] = row[1] data_arr.append(row[0]) data_num_arr.append(row[1]) line = Line("歷史查詢統計","單位: 次") line.add("times",data_arr,data_num_arr,mark_point=["max","min"],is_label_show=True,is_datazoom_show=True,xaxis_rotate=30,xaxis_interval=2,datazoom_extra_type = "both",xaxis_margin=20,is_more_utils=True) # line.add("times",data_arr,data_num_arr) context = line.render_embed() elif requests.POST["type"] == "Cloud": str_sql = "select word,translate from Mytest_translate" cursor = c.execute(str_sql) en = "" zh_CN = "" en_result={} zh_result = {} for row in cursor: word = row[0].encode("utf-8") word_tr = row[1].encode("utf-8") for ch in r'!"@#$%^&\*()_\?:;,<>\\/\|{}': word.replace(ch,"") word_tr.replace(ch,"") if u'\u4e00' <= row[0][0] <= u'\u9fff': zh_CN += word en += word_tr + " " else: zh_CN += word_tr en += word + " " # 英文統計 word_arr = en.split() for e in word_arr: en_result[e] = en_result.get(e,0) + 1 en_res = list(en_result.items()) en_res.sort(key=lambda x:x[1],reverse=True) # 中文統計 zh_CN_arr = jieba.cut(zh_CN,cut_all=False) zh_CN_res = {} for d in zh_CN_arr: zh_CN_res[d] = zh_CN_res.get(d,0) + 1 zh_CN_res = list(zh_CN_res.items()) zh_CN_res.sort(key=lambda x:x[1],reverse=True) en_name_arr = [] en_num_arr = [] zh_CN_name_arr = [] zh_CN_num_arr = [] for i in en_res[:100]: en_name_arr.append(i[0]) en_num_arr.append(i[1]) for d in zh_CN_res[10:110]: zh_CN_name_arr.append(d[0]) zh_CN_num_arr.append(d[1]) en_char = WordCloud() # ,word_size_range=[20,100] try: if requests.POST["k"] == u"漢字": en_char.add("zh_CN",zh_CN_name_arr,zh_CN_num_arr,shape=requests.POST["t"]) else: en_char.add("En",en_name_arr,en_num_arr,shape=requests.POST["t"]) except: en_char.add("En",en_name_arr,en_num_arr,shape="diamond") context = en_char.render_embed() # return HttpResponse(json.dumps({"context":{"en_name":en_name_arr,"en_num":en_num_arr,"zh_CN_name":zh_CN_name_arr,"zh_CN_num":zh_CN_num_arr}})) # return HttpResponse(json.dumps({"context":en_res[:10]})) else: pass return HttpResponse(json.dumps({"context":context}))