def test_line_type_fil(): line = Line("折线图-面积图示例") line.add("商家A", CLOTHES, clothes_v1, is_fill=True, line_opacity=0.2, area_opacity=0.4, symbol=None) line.add("商家B", CLOTHES, clothes_v2, is_fill=True, area_color='#000', area_opacity=0.3, is_smooth=True) assert '"step": true' not in line._repr_html_()
def test_grid_line_pie(): line = Line("折线图示例", width=1200) line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_pos="20%", ) v1 = [11, 12, 13, 10, 10, 10] pie = Pie("饼图示例", title_pos="55%") pie.add( "", CLOTHES, v1, radius=[45, 65], center=[65, 50], legend_pos="80%", legend_orient="vertical", ) grid = Grid() grid.add(line, grid_right="55%") grid.add(pie, grid_left="60%") grid.render()
def test_line_negative_value(): line = Line("折线图示例") line.add("最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add("最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"]) line.render()
def test_grid_properties(): v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", height=720) bar.add("商家A", CLOTHES, v1, is_stack=True) bar.add("商家B", CLOTHES, v2, is_stack=True) line = Line("折线图示例", title_top="50%") line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_top="50%", ) grid = Grid(width=1024, height=768) grid.add(bar, grid_bottom="60%") grid.add(line, grid_top="60%") eq_(grid.width, 1024) eq_(grid.height, 768) assert ( ("echarts" in bar.js_dependencies) or ("echarts.min" in bar.js_dependencies) )
def test_grid_inverse_yaxis(): attr = ["{}天".format(i) for i in range(1, 31)] line_top = Line("折线图示例", width=1200, height=700) line_top.add( "最高气温", attr, [random.randint(20, 100) for i in range(30)], mark_point=["max", "min"], mark_line=["average"], legend_pos="38%", ) line_bottom = Line() line_bottom.add( "最低气温", attr, [random.randint(20, 100) for i in range(30)], mark_point=["max", "min"], mark_line=["average"], is_yaxis_inverse=True, xaxis_pos="top", ) grid = Grid() grid.add(line_top, grid_bottom="60%") grid.add(line_bottom, grid_top="50%") grid.render()
def test_grid_top_bottom(): v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", height=720) bar.add("商家A", CLOTHES, v1, is_stack=True) bar.add("商家B", CLOTHES, v2, is_stack=True) line = Line("折线图示例", title_top="50%") line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_top="50%", ) grid = Grid() grid.add(bar, grid_bottom="60%") grid.add(line, grid_top="60%") grid.render()
def test_timeline_label_color(): attr = ["{}月".format(i) for i in range(1, 7)] bar = Bar("1 月份数据", "数据纯属虚构") bar.add( "bar", attr, [randint(10, 50) for _ in range(6)], label_color=["red", "#213", "black"], ) line = Line() line.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_0 = Overlap() overlap_0.add(bar) overlap_0.add(line) bar_1 = Bar("2 月份数据", "数据纯属虚构") bar_1.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_1 = Line() line_1.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_1 = Overlap() overlap_1.add(bar_1) overlap_1.add(line_1) timeline = Timeline(timeline_bottom=0) timeline.add(overlap_0, "1 月") timeline.add(overlap_1, "2 月") content = timeline._repr_html_() assert '"color": [' in content assert "red" in content assert "#213" in content assert "black" in content
def test_grid_add_overlap(): from pyecharts import Overlap grid = Grid() attr = ["{}月".format(i) for i in range(1, 13)] v1 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3] v2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3] v3 = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2] bar = Bar("Overlap+Grid 示例", width=1200, height=600, title_pos="40%") bar.add("蒸发量", attr, v1) bar.add( "降水量", attr, v2, yaxis_formatter=" ml", yaxis_max=250, legend_pos="85%", legend_orient="vertical", legend_top="45%", ) line = Line() line.add("平均温度", attr, v3, yaxis_formatter=" °C") overlap = Overlap() overlap.add(bar) overlap.add(line, is_add_yaxis=True, yaxis_index=1) grid.add(overlap, grid_right="20%") grid.render()
def test_grid_four_direction(): v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", height=720, width=1200, title_pos="65%") bar.add("商家A", CLOTHES, v1, is_stack=True) bar.add("商家B", CLOTHES, v2, is_stack=True, legend_pos="80%") line = Line("折线图示例") line.add("最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add("最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_pos="20%") v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] scatter = Scatter("散点图示例", title_top="50%", title_pos="65%") scatter.add("scatter", v1, v2, legend_top="50%", legend_pos="80%") es = EffectScatter("动态散点图示例", title_top="50%") es.add("es", [11, 11, 15, 13, 12, 13, 10], [1, -2, 2, 5, 3, 2, 0], effect_scale=6, legend_top="50%", legend_pos="20%") grid = Grid() grid.add(bar, grid_bottom="60%", grid_left="60%") grid.add(line, grid_bottom="60%", grid_right="60%") grid.add(scatter, grid_top="60%", grid_left="60%") grid.add(es, grid_top="60%", grid_right="60%") grid.render()
def test_line_user_define_markpoint(): line = Line("折线图示例") line.add("商家A", CLOTHES, clothes_v1, mark_point=["average", { "coord": ["裤子", 10], "name": "这是我想要的第一个标记点"}]) line.add("商家B", CLOTHES, clothes_v2, is_smooth=True, mark_point=[{ "coord": ["袜子", 80], "name": "这是我想要的第二个标记点"}]) line.render()
def draw_sentiment_pic(csv_file): attr, val = [], [] info = count_sentiment(csv_file) info = sorted(info.items(), key=lambda x: x[0], reverse=False) # dict的排序方法 for each in info[:-1]: attr.append(each[0]) val.append(each[1]) line = Line(csv_file+":影评情感分析") line.add("", attr, val, is_smooth=True, is_more_utils=True) line.render(csv_file+"_情感分析曲线图.html")
def test_line_user_define_marks(): line = Line("折线图示例") line.add("商家A", CLOTHES, clothes_v1, mark_point=["average", "max", "min"], mark_point_symbol='diamond', mark_point_textcolor='#40ff27') line.add("商家B", CLOTHES, clothes_v2, mark_point=["average", "max", "min"], mark_point_symbol='arrow', mark_point_symbolsize=40) line.show_config() line.render()
def test_line_log_yaxis(): import math import random line = Line("折线图示例") line.add("商家A", CLOTHES, [math.log10(random.randint(1, 99999)) for _ in range(6)]) line.add("商家B", CLOTHES, [math.log10(random.randint(1, 99999999)) for _ in range(6)], yaxis_type="log") line.render()
def test_line_es(): v1 = [5, 20, 36, 10, 10, 100] line = Line("line-EffectScatter 示例") line.add("", CLOTHES, v1, is_random=True) es = EffectScatter() es.add("", CLOTHES, v1, effect_scale=8) overlap = Overlap() overlap.add(line) overlap.add(es) overlap.render()
def test_line_marks(): line = Line("折线图示例") line.add("商家A", CLOTHES, clothes_v1, mark_point=["average"]) line.add( "商家B", CLOTHES, clothes_v2, is_smooth=True, mark_line=["max", "average"], ) line.render()
def test_overlap_bar_line(): attr = ['A', 'B', 'C', 'D', 'E', 'F'] v1 = [10, 20, 30, 40, 50, 60] v2 = [38, 28, 58, 48, 78, 68] bar = Bar("Line-Bar 示例") bar.add("bar", attr, v1) line = Line() line.add("line", attr, v2) overlap = Overlap() overlap.add(bar) overlap.add(line) overlap.render()
def test_overlap_bar_line(): attr = ["A", "B", "C", "D", "E", "F"] v1 = [10, 20, 30, 40, 50, 60] v2 = [38, 28, 58, 48, 78, 68] bar = Bar("Line-Bar 示例") bar.add("bar", attr, v1) line = Line() line.add("line", attr, v2) overlap = Overlap() overlap.add(bar) overlap.add(line) overlap.render()
def test_overlap_kline_line(): import random v1 = [ [2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22], ] attr = ["2017/7/{}".format(i + 1) for i in range(31)] kline = Kline("Kline-Line 示例") kline.add("日K", attr, v1) line_1 = Line() line_1.add("line-1", attr, [random.randint(2400, 2500) for _ in range(31)]) line_2 = Line() line_2.add("line-2", attr, [random.randint(2400, 2500) for _ in range(31)]) overlap = Overlap() overlap.add(kline) overlap.add(line_1) overlap.add(line_2) overlap.render()
def test_timeline_bar_line(): attr = ["{}月".format(i) for i in range(1, 7)] bar = Bar("1 月份数据", "数据纯属虚构") bar.add("bar", attr, [randint(10, 50) for _ in range(6)]) line = Line() line.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_0 = Overlap() overlap_0.add(bar) overlap_0.add(line) bar_1 = Bar("2 月份数据", "数据纯属虚构") bar_1.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_1 = Line() line_1.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_1 = Overlap() overlap_1.add(bar_1) overlap_1.add(line_1) bar_2 = Bar("3 月份数据", "数据纯属虚构") bar_2.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_2 = Line() line_2.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_2 = Overlap() overlap_2.add(bar_2) overlap_2.add(line_2) bar_3 = Bar("4 月份数据", "数据纯属虚构") bar_3.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_3 = Line() line_3.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_3 = Overlap() overlap_3.add(bar_3) overlap_3.add(line_3) bar_4 = Bar("5 月份数据", "数据纯属虚构") bar_4.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_4 = Line() line_4.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_4 = Overlap() overlap_4.add(bar_4) overlap_4.add(line_4) timeline = Timeline(timeline_bottom=0) timeline.add(overlap_0, "1 月") timeline.add(overlap_1, "2 月") timeline.add(overlap_2, "3 月") timeline.add(overlap_3, "4 月") timeline.add(overlap_4, "5 月") timeline.render()
def test_page(): page = Page() line = Line("折线图示例") line.chart_id = "id_my_cell_line" line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], ) # pie v1 = [11, 12, 13, 10, 10, 10] pie = Pie("饼图-圆环图示例", title_pos="center", width="600px") pie.add( "", CLOTHES, v1, radius=[40, 75], label_text_color=None, is_label_show=True, legend_orient="vertical", legend_pos="left", ) page.add([line, pie, create_a_bar(TITLE)]) # Start render and test html = page._repr_html_() # Test base html structure assert html.count("<script>") == html.count("</script>") == 2 assert html.count("<div") == html.count("</div>") == 3 assert html.count("require.config") == html.count("function(echarts)") == 1 # Test some chart attributes json_encoded_title = json.dumps(TITLE) assert json_encoded_title in html assert "nbextensions/echarts" in html # default jshost assert html.count("height:400px") == 3 assert html.count("width:600px") == 1 assert html.count("width:800px") == 2 assert html.count("id_my_cell_line") == 6
def test_grid_multiple_datazoom_index(): line = Line("折线图示例", width=1200, height=700) line.add("最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add("最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], legend_top="50%", mark_line=["average"], is_datazoom_show=True, datazoom_xaxis_index=[0, 1]) v1 = [[2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22]] kline = Kline("K 线图示例", title_top="50%") kline.add("日K", ["2017/7/{}".format(i + 1) for i in range(31)], v1, is_datazoom_show=True) grid = Grid() grid.add(line, grid_top="60%") grid.add(kline, grid_bottom="60%") grid.render()
def test_overlap_two_yaxis(): attr = ["{}月".format(i) for i in range(1, 13)] v1 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3] v2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3] v3 = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2] bar = Bar(width=1200, height=600) bar.add("蒸发量", attr, v1) bar.add("降水量", attr, v2, yaxis_formatter=" ml", yaxis_max=250) line = Line() line.add("平均温度", attr, v3, yaxis_formatter=" °C") overlap = Overlap() overlap.add(bar) overlap.add(line, yaxis_index=1, is_add_yaxis=True) overlap.render()
def iplot(indicator, new=True, axes=None, legend_on=False, text_on=False, text_color='k', zero_on=False, label=None, *args, **kwargs): """绘制indicator曲线 :param Indicator indicator: indicator实例 :param axes: 指定的坐标轴 :param new: pyecharts中无效 :param legend_on: 是否打开图例 :param text_on: 是否在左上角显示指标名称及其参数 :param text_color: 指标名称解释文字的颜色,默认为黑色 :param zero_on: 是否需要在y=0轴上绘制一条直线 :param str label: label显示文字信息,text_on 及 legend_on 为 True 时生效 :param args: pylab plot参数 :param kwargs: pylab plot参数,如:marker(标记类型)、 markerfacecolor(标记颜色)、 markeredgecolor(标记的边缘颜色) """ if not indicator: print("indicator is None") return if axes is None: axes = create_figure() if new else gca() if not label: label = "%s %.2f" % (indicator.long_name, indicator[-1]) x_list = gcf().get_xaxis() if x_list is None: x_list = [i for i in range(len(indicator))] y_list = [ '-' if x == constant.null_price else round(x,3) for x in indicator] line = Line() style = gcf().get_style(axes, **kwargs) line.add(label, x_list, y_list, yaxis_min=min(indicator), is_legend_show=legend_on, *args, **style) axes.add(line) gcf().add_axis(axes) return gcf()
def test_line_user_define_markpoint(): line = Line("折线图示例") line.add( "商家A", CLOTHES, clothes_v1, mark_point=["average", {"coord": ["裤子", 10], "name": "这是我想要的第一个标记点"}], ) line.add( "商家B", CLOTHES, clothes_v2, is_smooth=True, mark_point=[{"coord": ["袜子", 80], "name": "这是我想要的第二个标记点"}], ) html_content = line._repr_html_() assert '"value": 80' in html_content assert '"value": 10' in html_content
def test_line_user_define_marks(): line = Line("折线图示例") line.add( "商家A", CLOTHES, clothes_v1, mark_point=["average", "max", "min"], symbol_size=50, mark_point_symbol="diamond", mark_point_textcolor="#40ff27", ) line.add( "商家B", CLOTHES, clothes_v2, mark_point=["average", "max", "min"], mark_point_symbol="arrow", mark_point_symbolsize=40, ) assert '"symbolSize":50' not in line._repr_html_()
def TimeFilter(): #优化后选择13:45-09:45作为交易时间 #首次交易时间点为14:00,最后一次交易时间点为09:45(15分钟窗口等待新的数据) from pyecharts import Line json_path = "E:/Program Files/other/TimeFilter/" netprofit_data,maxdd_data,sharpratio_data = read_indicator_TimeFilter(json_path) line = Line("TimeFilter") line.add("netprofit", [item[0] for item in netprofit_data], [item[1] for item in netprofit_data]) line.add("maxdd", [item[0] for item in maxdd_data], [item[1] for item in maxdd_data]) line.add("sharpratio", [item[0] for item in sharpratio_data], [item[1] for item in sharpratio_data] ,xaxis_interval=0,xaxis_name="Time",xaxis_rotate=30,xaxis_name_pos="end") line.render("TimeFilter.html") #选择13:45-09:45作为交易时间 table_result = read_json.read_backtest_json("E:/Program Files/other/交易系统作业/代码/作业二/回测-TimeFilter.json") table_result.to_excel("project2.3_table3.1.xlsx")
def CPUintoChart(cpufile, name): row = [] row_cpu = 1 for line in cpufile: if row_cpu%2 == 1: line = line.split(' ') cpudata = [line[i] for i in (2, 4, 7)] row.append(cpudata) row_cpu += 1 data = [] for i in row: if i[1][0].isdigit() is False: pass else: data.append(i) package = [] user = [] kernel = [] for j, k, l in data: package.append(j[0:-1]) user.append(k[0:-1]) kernel.append(l[0:-1]) package.insert(0, 'package') user.insert(0, 'user') kernel.insert(0, 'kernel') print(package) print(user) print(kernel) id = [] for i in range(len(package) - 1): id.append(i) bar = Line(f"{name}__CPU数据图形报表", "图表纵轴为数据大小,横轴为时间节点,直线为平均值") bar.add("package", id, package[1:], label_color=['#800080'], mark_line=["average"], mark_point=["max", "min"], xaxis_interval=0, xaxis_rotate=90) bar.add("user", id, user[1:], label_color=['#0000FF'], mark_line=["average"], mark_point=["max", "min"], xaxis_interval=0, xaxis_rotate=90) bar.add("kernel", id, kernel[1:], label_color=['#2E8B57'], mark_line=["average"], mark_point=["max", "min"], xaxis_interval=0, xaxis_rotate=90) bar.use_theme("vintage") t = time.time() bar.render('%s\\cpu%s.html' % (setting.data, int(t)))
def nomoral_line(stock_num): v2, v3, v4 = predictation(stock_num) attr = [] for i in range(1, 11): attr.append(i) predict_line = Line("Prediction") predict_line.add("Close price(linear regression)", attr, v2, mark_point=["max", "min"]) predict_line.add("Close price(Neural Network)", attr, v4, mark_point=["max", "min"]) predict_line.add("Close price(SVM-poly_kernal)", attr, v3, yaxis_min=round(min(min(v3), min(v2), min(v4))) - 0.01 * round(min(min(v3), min(v2), min(v4))), mark_point=["max", "min"]) return predict_line
def sameUserDataframe(df, numlist): df_new = df[df['用户标识'] == numlist] list3 = coltype_filter(df, '账期') list3.sort() n = len(list3) yearIncome = month_Chuzhang(df_new) # monthIncome = yearIncome/n list5 = All_DOU(df) list6 = all_MOU(df) list7 = all_APRU(df) page = Page() charts1 = Line("DOU") charts1.add(str(numlist) + "的DOU", list3, list5) # charts2 = Line("MOU") charts1.add(str(numlist) + "的MOU", list3, list6) # charts3 =Line('APRU') charts1.add(str(numlist) + "的APRU", list3, list7) page.add(charts1) # page.add(charts2) # page.add(charts3) return page
def line_three_html(self, title, key, val_min, val_max, val_avg): """折线面积图""" line = Line( title, title_pos='left', width='100%', height=300, ) line.add( "最小值", key, val_min, mark_point=['average', 'max', 'min'], # 标记点 is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], is_smooth=True, ) line.add( "最大值", key, val_max, mark_point=['average', 'max', 'min'], # 标记点 is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], is_smooth=True, ) line.add( "平均值", key, val_avg, mark_point=['average', 'max', 'min'], # 标记点 is_datazoom_show=True, # 缩放 datazoom_range=[0, 100], is_smooth=True, ) return line.render_embed()
from pyecharts import Bar, Line, Overlap import pandas as pd sheet0 = pd.read_excel("./data_resources/4.1各月出差天数.xls") month0 = sheet0["2018年"].values.tolist() days0 = sheet0["出差天数"].values.tolist() line0 = Line('', title_pos="10%") line0.add('', month0, days0, yaxis_name='出差数(天)', yaxis_name_pos='middle', yaxis_name_gap=40, legend_pos="35%", is_label_show=True, label_text_size=10) group = [] sheet1 = pd.read_excel("./data_resources/4.2各月每个组出差天数统计表格.xls") dates1 = sheet1["2018年月份"].values.tolist() for i in ["综合管理", "领导组", "硬件组", "销售组", "售前组", "市场商务", "软件组", "大数据云计算组"]: group.append((i, sheet1[i].values.tolist())) bar1 = Bar('各月各组出差人天数情况', title_pos="0%") for i in group: bar1.add(i[0], dates1, i[1], label_text_size=6, yaxis_name='出差数(天)', yaxis_name_pos='middle',
def test_line_type_stack(): line = Line("折线图-数据堆叠示例") line.add("商家A", CLOTHES, clothes_v1, is_stack=True, is_label_show=True) line.add("商家B", CLOTHES, clothes_v2, is_stack=True, is_label_show=True) line.render()
for item in site_dist.keys(): site_name = str(item) attr = [] v = [] for a in data_sort.keys(): key_value = str(a) + "," + str(data_sort[a]) string = key_value.replace('\'', '').replace('[', '').replace( ']', '').replace(' ', '') last_data = string.split(',') site = last_data[1] for value in Webmaster_value.keys(): if str(site) == str(value): site = Webmaster_value[value] if site == site_name: attr.append(last_data[0]) v.append(last_data[2]) try: line.add( site_name, attr, v, is_label_show=True, yaxis_formatter=" IP", is_datazoom_show=True, ) except BaseException: a = 1 line.render("/opt/jumpserver/apps/templates/daystat.html")
def echart2(request): template = loader.get_template('ewpyecharts.html') line = Line("压力分析", width=1200, height=700) attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] line.add("最高压力", attr, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add( "最低压力", attr, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], legend_top="50%", mark_line=["average"], # 设置 dataZoom 控制索引为 0,1 的 x 轴,即第一个和第二个 is_datazoom_show=True, datazoom_xaxis_index=[0, 1]) v1 = [[2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22]] kline = Kline("考核表分析", title_top="50%") kline.add("日K", ["2017/7/{}".format(i + 1) for i in range(31)], v1, is_datazoom_show=True) grid = Grid() grid.add(line, grid_top="60%") grid.add(kline, grid_bottom="60%") # grid.render() context = dict(myechart=grid.render_embed(), host=REMOTE_HOST, script_list=grid.get_js_dependencies()) return HttpResponse(template.render(context, request))
#提取中国从疫情开始每一疫情具体数据 tempBaseData = data["data"]["chinaDayList"] #遍历每天具体数据编程pandas for item in tempBaseData: line = [ item["date"], item["confirm"], item["suspect"], item["dead"], item["heal"], item["nowConfirm"], item["nowSevere"] ] #创建pandas中的行 series = pd.Series(line, chinaDayColumnList) chinaDaySeriesList.append(series) #创建表格 chainDayDateFrame = pd.DataFrame(chinaDaySeriesList) print(chainDayDateFrame) #生成全国实时疫情趋势图 #创建折线图 line = Line("全国实时疫情趋势图", "人数", width=1200, height=600) #从表格中拿到所以日期 dateAttr = list(chainDayDateFrame.loc[chainDayDateFrame.index, "date"]) confirm = list(chainDayDateFrame.loc[chainDayDateFrame.index, "confirm"]) suspect = list(chainDayDateFrame.loc[chainDayDateFrame.index, "suspect"]) nowConfirm = list(chainDayDateFrame.loc[chainDayDateFrame.index, "nowConfirm"]) nowSevere = list(chainDayDateFrame.loc[chainDayDateFrame.index, "nowSevere"]) line.add("累计确诊", dateAttr, confirm, mark_point=['max']) line.add("现有疑似", dateAttr, suspect, mark_point=['max']) line.add("现有确诊", dateAttr, nowConfirm, mark_point=['max']) line.add("现有重症", dateAttr, nowSevere, mark_point=['max']) line.show_config() line.render(path="./output/全国实时疫情趋势图.html")
from pyecharts import Pie attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [11, 12, 13, 10, 10, 10] pie = Pie("饼图示例") pie.add("", attr, v1, is_label_show=True) pie.render("pie.html") from pyecharts import Gauge gauge = Gauge("仪表盘示例") gauge.add("业务指标", "完成率", 66.66) gauge.show_config() gauge.render("gauge.html") from pyecharts import Line, Overlap, Bar attr = ['A', 'B', 'C', 'D', 'E', 'F'] v1 = [10, 20, 30, 40, 50, 60] v2 = [38, 28, 58, 48, 78, 68] bar = Bar('Line-Bar') bar.add('bar', attr, v1) line = Line() line.add('line', attr, v2) overlap = Overlap() overlap.add(bar) overlap.add(line) overlap.render('bar-line.html')
def test_line_type_step(): line = Line("折线图-阶梯图示例") line.add("商家A", CLOTHES, clothes_v1, is_step=True, is_label_show=True) assert '"step": true' in line._repr_html_()
def graphing(self): df = self.df df2 = self.df2 # df3 = self.df3 frequency = self.freqency if frequency == 'daily': dateSearch = pd.to_datetime(df_raw.FailureTaskGeneratedTime.max()) #datetime.now() elif frequency == 'weekly': dateSearch = pd.to_datetime(df_raw.FailureTaskGeneratedTime.max()) - timedelta(weeks=1) #datetime.now() - timedelta(weeks=1) elif frequency == 'monthly': dateSearch = pd.to_datetime(df_raw.FailureTaskGeneratedTime.max()) - relativedelta(months=1) #datetime.now() - relativedelta(months=1) elif frequency == 'quarterly': dateSearch = pd.to_datetime(df_raw.FailureTaskGeneratedTime.max()) - relativedelta(months=3) #datetime.now() - relativedelta(months=3) df.loc[:, 'FailureTaskGeneratedTime'] = pd.to_datetime(df.loc[:, 'FailureTaskGeneratedTime']).copy() df2.loc[:, 'FailureTaskGeneratedTime'] = pd.to_datetime(df2.loc[:, 'FailureTaskGeneratedTime']).copy() df.loc[:, 'FailureTaskGeneratedDate'] = df.loc[:, 'FailureTaskGeneratedTime'].map(lambda x: x.date()).copy() df2.loc[:, 'FailureTaskGeneratedDate'] = df2.loc[:, 'FailureTaskGeneratedTime'].map(lambda x: x.date()).copy() if frequency == 'daily': df = df[df.loc[:, 'FailureTaskGeneratedDate'] == dateSearch.date()].copy() df2 = df2[df2.loc[:, 'FailureTaskGeneratedDate'] == dateSearch.date()].copy() else: df = df[(dateSearch.date() <= df.loc[:, 'FailureTaskGeneratedDate']) & (df.loc[:, 'FailureTaskGeneratedDate'] <= datetime.now().date())].copy() df2 = df2[(dateSearch.date() <= df2.loc[:, 'FailureTaskGeneratedDate']) & (df2.loc[:, 'FailureTaskGeneratedDate'] <= datetime.now().date())].copy() # -----------------------------count weekend to SZ df_copy = df.copy() df2_copy = df2.copy() weekend_inscope = df_copy['InvestmentId'][df_copy.loc[:, 'workday'] == 'weekend'].count() weekend_outofscope = df2_copy['InvestmentId'][df2_copy.loc[:, 'workday'] == 'weekend'].count() df = df[df.loc[:, 'workday'] == 'weekday'] df2 = df2[df2.loc[:, 'workday'] == 'weekday'] # --------------------------------- df_time_list = [] df_time_list2 = [] time_list_name = [] # You can set your time here: if tm.localtime().tm_isdst == 1: time_set = datetime.now().strftime("%Y/%m/%d 19/30/00") print("Now is Summer time") else: time_set = datetime.now().strftime("%Y/%m/%d 18/30/00") print("Now is Winter time") # -------------------------- time_set = dt.datetime.strptime(time_set, "%Y/%m/%d %H/%M/%S") for i in range(24): df_time_list.append(time_set.strftime("%I:%M%p")) df_time_list2.append(time_set.strftime("df2_%I_%M%p")) time_list_name.append(time_set.strftime("%H:%M")) time_set = time_set + timedelta(hours=1) df.loc[:,'FailureTaskGeneratedTime'] = pd.to_datetime(df.loc[:,'FailureTaskGeneratedTime']).copy() df.loc[:,'FailureTaskGeneratedTime'].dropna(inplace=True) df.loc[:,'FailureTaskGeneratedTime_time'] = df.loc[:,'FailureTaskGeneratedTime'].apply(lambda x: x.strftime("%d:%H:%M:%S")).copy() date_series = df.groupby('FailureTaskGeneratedTime_time')['InvestmentId'].count() df2.loc[:,'FailureTaskGeneratedTime'] = pd.to_datetime(df2.loc[:,'FailureTaskGeneratedTime']).copy() df2.loc[:,'FailureTaskGeneratedTime'].dropna(inplace=True) df2.loc[:,'FailureTaskGeneratedTime_time'] = df2.loc[:,'FailureTaskGeneratedTime'].apply(lambda x: x.strftime("%d:%H:%M:%S")).copy() date_series2 = df2.groupby('FailureTaskGeneratedTime_time')['InvestmentId'].count() timezone_list = list(date_series.index) timezone_list = [dt.datetime.strptime(x, "%d:%H:%M:%S") for x in timezone_list] timez_list = [x.strftime("%H:%M") for x in timezone_list] day_list = [x.strftime("%d") for x in timezone_list] day_num = len(set(day_list)) timezone_list2 = list(date_series2.index) timezone_list2 = [dt.datetime.strptime(x, "%d:%H:%M:%S") for x in timezone_list2] timez_list2 = [x.strftime("%H:%M") for x in timezone_list2] day_list2 = [x.strftime("%d") for x in timezone_list2] day_num2 = len(set(day_list2)) time_list = [] for i in range(len(timez_list)): time_list.append(dt.time(int(timez_list[i].split(':')[0]), int(timez_list[i].split(':')[1]))) time_list2 = [] for i in range(len(timez_list2)): time_list2.append(dt.time(int(timez_list2[i].split(':')[0]), int(timez_list2[i].split(':')[1]))) time_list = np.array(time_list) date_series.index = time_list date_frame = date_series.to_frame() date_frame = date_frame.reset_index() time_list2 = np.array(time_list2) date_series2.index = time_list2 date_frame2 = date_series2.to_frame() date_frame2 = date_frame2.reset_index() average_list = [] average_list2 = [] # ---------------------------------------------making & count dataframe by date if len(df_time_list) == len(time_list_name): for i in range(0, len(df_time_list)): if i + 1 > len(df_time_list) - 1: globals()[df_time_list[i]] = date_frame[ (dt.time(int(time_list_name[i].split(':')[0]), 30) <= date_frame['index']) & ( date_frame['index'] < dt.time(int(time_list_name[0].split(':')[0]), 30))] globals()[df_time_list2[i]] = date_frame2[ (dt.time(int(time_list_name[i].split(':')[0]), 30) <= date_frame2['index']) & ( date_frame2['index'] < dt.time(int(time_list_name[0].split(':')[0]), 30))] elif i == 5: globals()[df_time_list[i]] = date_frame[ (dt.time(int(time_list_name[i].split(':')[0]), 30) <= date_frame['index']) | ( date_frame['index'] < dt.time(int(time_list_name[i + 1].split(':')[0]), 30))] globals()[df_time_list2[i]] = date_frame2[ (dt.time(int(time_list_name[i].split(':')[0]), 30) <= date_frame2['index']) | ( date_frame2['index'] < dt.time(int(time_list_name[i + 1].split(':')[0]), 30))] else: globals()[df_time_list[i]] = date_frame[ (dt.time(int(time_list_name[i].split(':')[0]), 30) <= date_frame['index']) & ( date_frame['index'] < dt.time(int(time_list_name[i + 1].split(':')[0]), 30))] globals()[df_time_list2[i]] = date_frame2[ (dt.time(int(time_list_name[i].split(':')[0]), 30) <= date_frame2['index']) & ( date_frame2['index'] < dt.time(int(time_list_name[i + 1].split(':')[0]), 30))] # ---------------------------------------------make the cake to "average_list" globals()[df_time_list[i]] = globals()[df_time_list[i]]['InvestmentId'].sum() globals()[df_time_list2[i]] = globals()[df_time_list2[i]]['InvestmentId'].sum() average_list.append(globals()[df_time_list[i]]) average_list2.append(globals()[df_time_list2[i]]) average_array = np.array(average_list) average_array[np.isnan(average_array)] = 0 average_list_inscope = [int(x) for x in list(average_array)] average_array2 = np.array(average_list2) average_array2[np.isnan(average_array2)] = 0 average_list_outofscope = [int(x) for x in list(average_array2)] average_list_inscope[0] = average_list_inscope[0] + weekend_inscope average_list_outofscope[0] = average_list_outofscope[0] + weekend_outofscope try: average_inscope = list(np.array(average_list_inscope) // day_num) average_outofscope = list(np.array(average_list_outofscope) // day_num) except: pass #------------------------------------------cut the cake for sz, mardrid and mumbai sz_average_list_inscope = average_list_inscope.copy() sz_average_list_inscope[int(len(df_time_list) / 3):] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) mardrid_average_list_inscope = average_list_inscope.copy() mardrid_average_list_inscope[:int(len(df_time_list) / 3)] = [0] * (int(len(df_time_list) / 3)) mardrid_average_list_inscope[int(len(df_time_list) * 2 / 3):] = [0] * (int(len(df_time_list) / 3)) mumbai_average_list_inscope = average_list_inscope.copy() mumbai_average_list_inscope[:int(len(df_time_list) * 2 / 3)] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) sz_average_list_outofscope = average_list_outofscope.copy() sz_average_list_outofscope[int(len(df_time_list) / 3):] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) mardrid_average_list_outofscope = average_list_outofscope.copy() mardrid_average_list_outofscope[:int(len(df_time_list) / 3)] = [0] * (int(len(df_time_list) / 3)) mardrid_average_list_outofscope[int(len(df_time_list) * 2 / 3):] = [0] * (int(len(df_time_list) / 3)) mumbai_average_list_outofscope = average_list_outofscope.copy() mumbai_average_list_outofscope[:int(len(df_time_list) * 2 / 3)] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) if len(np.array(average_list_inscope)) == len(np.array(average_list_outofscope)): total_list = list(np.array(average_list_inscope) + np.array(average_list_outofscope)) if len(Pre_value)>len(np.array(average_list_inscope)): pre_total_list = Pre_value[len(Pre_value)-len(np.array(average_list_inscope)):].tolist() if len(Pre_value)==len(np.array(average_list_inscope)): pre_total_list = Pre_value.tolist() # ------------------------------------------cut the cake for sz, mardrid and mumbai second graph bar1 = Bar(title = 'Failure Total (%s) from %s to %s'%(frequency,dateSearch.strftime('%Y-%m-%d'),datetime.now().strftime('%Y-%m-%d'))) bar1.add("%s"%'SZ', df_time_list,sz_average_list_inscope, is_stack=True,is_label_show=False,is_label_emphasis = True,legend_pos = "88%",legend_orient = 'vertical') bar1.add("%s"%'outofscope', df_time_list,sz_average_list_outofscope, is_stack=True,is_label_show=False,is_label_emphasis = True,legend_pos = "88%",legend_orient = 'vertical') bar2 = Bar() bar2.add("%s"%'Madrid', df_time_list,mardrid_average_list_inscope, is_stack=True,is_label_show=False,is_label_emphasis = True) bar2.add("%s"%'outofscope', df_time_list,mardrid_average_list_outofscope, is_stack=True,is_label_show=False,is_label_emphasis = True) bar3 = Bar() bar3.add("%s"%'Mumbai', df_time_list,mumbai_average_list_inscope, is_stack=True,is_label_show=False,is_label_emphasis = True) bar3.add("%s"%'outofscope', df_time_list,mumbai_average_list_outofscope, is_stack=True,is_label_show=False,is_label_emphasis = True) line =Line('failure_line',background_color = 'white',title_text_size = 20,width = '100%') line.add("Actual",df_time_list,total_list) line.add("Prediction",df_time_list,pre_total_list,is_more_utils = True,is_splitline_show = False, is_label_show = False,is_smooth=False,line_color = [' red'], legend_pos = 'left',legend_orient = 'vertical',line_type = 'dotted',line_width = '2') overlap = Overlap(width='100%',height=360) overlap.add(bar1) overlap.add(bar2) overlap.add(bar3) overlap.add(line) # ------------------------------------------cut the cake for sz, mardrid and mumbai second graph sz_avg_list_inscope = average_inscope.copy() sz_avg_list_inscope[int(len(df_time_list) / 3):] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) mardrid_avg_list_inscope = average_inscope.copy() mardrid_avg_list_inscope[:int(len(df_time_list) / 3)] = [0] * (int(len(df_time_list) / 3)) mardrid_avg_list_inscope[int(len(df_time_list) * 2 / 3):] = [0] * (int(len(df_time_list) / 3)) mumbai_avg_list_inscope = average_inscope.copy() mumbai_avg_list_inscope[:int(len(df_time_list) * 2 / 3)] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) sz_avg_list_outofscope = average_outofscope.copy() sz_avg_list_outofscope[int(len(df_time_list) / 3):] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) mardrid_avg_list_outofscope = average_outofscope.copy() mardrid_avg_list_outofscope[:int(len(df_time_list) / 3)] = [0] * (int(len(df_time_list) / 3)) mardrid_avg_list_outofscope[int(len(df_time_list) * 2 / 3):] = [0] * (int(len(df_time_list) / 3)) mumbai_avg_list_outofscope = average_outofscope.copy() mumbai_avg_list_outofscope[:int(len(df_time_list) * 2 / 3)] = [0] * (len(df_time_list) - int(len(df_time_list) / 3)) if len(np.array(average_inscope)) == len(np.array(average_outofscope)): avg_num = list(np.array(average_inscope) + np.array(average_outofscope)) # ------------------------------------------cut the cake for sz, mardrid and mumbai second graph bar1 = Bar(title = 'Failure Average (%s) from %s to %s'%(frequency,dateSearch.strftime('%Y-%m-%d'),datetime.now().strftime('%Y-%m-%d'))) bar1.add("%s"%'SZ', df_time_list,sz_avg_list_inscope, is_stack=True,is_label_show=False,is_label_emphasis = True,legend_pos = "88%",legend_orient = 'vertical') bar1.add("%s"%'outofscope', df_time_list,sz_avg_list_outofscope, is_stack=True,is_label_show=False,is_label_emphasis = True,legend_pos = "88%",legend_orient = 'vertical') bar2 = Bar() bar2.add("%s"%'Madrid', df_time_list,mardrid_avg_list_inscope, is_stack=True,is_label_show=False,is_label_emphasis = True) bar2.add("%s"%'outofscope', df_time_list,mardrid_avg_list_outofscope, is_stack=True,is_label_show=False,is_label_emphasis = True) bar3 = Bar() bar3.add("%s"%'Mumbai', df_time_list,mumbai_avg_list_inscope, is_stack=True,is_label_show=False,is_label_emphasis = True) bar3.add("%s"%'outofscope', df_time_list,mumbai_avg_list_outofscope, is_stack=True,is_label_show=False,is_label_emphasis = True) line =Line('failure_line',background_color = 'white',title_text_size = 20,width = '100%') # total line.add("Actual",df_time_list,avg_num) overlap2 = Overlap(width='100%',height=360) overlap2.add(bar1) overlap2.add(bar2) overlap2.add(bar3) overlap2.add(line) return overlap,overlap2,frequency
def draw_new_weeks(x, y): line = Line("最近七日访问量") line.add("新增用户数", x, y, is_smooth=True, mark_line=["max", "average"]) line.render('../my_result/trend_weeks.html')
def index(request): over_list = [] style_coding_list = [] err = '' batchs = '' style = '' shops_list = '' shops = '' batchs_msg_dic = '' if request.method == 'POST': batchs = request.POST.get('batch') batchs_msg = BatchComparison.objects.filter(batch=batchs).all() batchs_msg_dic = {} for batch in batchs_msg: batchs_msg_dic['model'] = batch.model batchs_msg_dic['batch'] = batch.batch batchs_msg_dic['remark'] = batch.remark batchs_msg_dic['type'] = batch.type batchs_msg_dic['stylist'] = batch.stylist batchs_msg_dic['shooting_date'] = str(batch.shooting_date) batchs_msg_dic['location'] = batch.location # 判断款式号是否有误 if len(batchs) > 0: # 查询款式 batch_data = BatchComparison.objects.filter(batch=batchs).all() style_coding_data = SpuidComparison.objects.filter( batch=batchs).values('style_coding').all() # 判断是否存在对应款式 if len(style_coding_data) > 0: # 该款式的所有查询spuid的 if request.method == 'POST': style = request.POST.get('style_coding') spuid_data = SpuidComparison.objects.filter( style_coding=style).all() else: spuid_data = SpuidComparison.objects.filter( style_coding=style_coding_data.first()).all() spuid_list = [] for spuids in spuid_data: spuid_dic = {} spuid_dic['spuid'] = spuids.spuid spuid_dic['brand'] = spuids.brand spuid_list.append(spuid_dic) # 款式列表 style_coding_data_2 = SpuidComparison.objects.filter( batch=batchs).values('style_coding').distinct().all() style_coding_list = [ i['style_coding'] for i in style_coding_data_2 ] # 店铺列表 shops_data = SpuidComparison.objects.filter( batch=batchs, style_coding=style).all() shops_list = [] for i in shops_data: tmp = {} tmp['brand'] = i.brand tmp['spuid'] = i.spuid shops_list.append(tmp) shops = request.POST.get('shopname') # 判断该款式是否存在spuid号 if len(spuid_list) > 0: # 查询data并生成图表 over_list = [] if shops == '全部': for shop in shops_list: data = StoreDailyData.objects.filter( spuid=shop['spuid'], brand=shop['brand']).values( 'uv', 'title', 'status', 'spuid', 'conversion_rate_of_order_payment', 'conversion_rate_of_payment', 'number_of_additional_purchases', 'collection_number', 'number_of_order_items', 'date').all() x_list = [] y_list_1 = [] y_list_1_2 = [] y_list_1_3 = [] for i in data: x_list.append(i['date']) y_list_1.append( '%.2f' % (i['conversion_rate_of_payment'] * 100)) if i['uv'] == 0: tmp = '0.00' else: tmp = '%.2f' % (i['collection_number'] / i['uv'] * 100) tmp_2 = '%.2f' % ( i['number_of_additional_purchases'] / i['uv'] * 100) y_list_1_2.append(tmp) y_list_1_3.append(tmp_2) overlap = Overlap() line = Line() line.add('转化率', x_list, y_list_1, yaxis_formatter='%', is_smooth=True) line.add('收藏率', x_list, y_list_1_2, yaxis_formatter='%', is_smooth=True) line.add('加购率', x_list, y_list_1_3, yaxis_formatter='%', is_smooth=True) line_2 = Line(shop['brand'] + '-' + shop['spuid']) y_list_2 = [i['uv'] for i in data] y_list_2_2 = [i['number_of_order_items'] for i in data] line_2.add('UV', x_list, y_list_2, is_smooth=True) line_2.add('成交量', x_list, y_list_2_2, is_smooth=True) overlap.add(line_2) overlap.add(line, is_add_yaxis=True, yaxis_index=1) ds = overlap.render_embed() over_list.append(ds) else: spuid_all = SpuidComparison.objects.filter( batch=batchs, style_coding=style, brand=shops).all() spuid_list = [i.spuid for i in spuid_all] for spuid_ in spuid_list: data = StoreDailyData.objects.filter( spuid=spuid_, ).values( 'uv', 'conversion_rate_of_order_payment', 'conversion_rate_of_payment', 'number_of_additional_purchases', 'collection_number', 'number_of_order_items', 'date').all() x_list = [] y_list_1 = [] y_list_1_2 = [] y_list_1_3 = [] for i in data: x_list.append(i['date']) y_list_1.append( '%.2f' % (i['conversion_rate_of_payment'] * 100)) if i['uv'] == 0: tmp = '0.00' else: tmp = '%.2f' % (i['collection_number'] / i['uv'] * 100) tmp_2 = '%.2f' % ( i['number_of_additional_purchases'] / i['uv'] * 100) y_list_1_2.append(tmp) y_list_1_3.append(tmp_2) overlap = Overlap() line = Line() line.add('转化率', x_list, y_list_1, yaxis_formatter='%', is_smooth=True) line.add('收藏率', x_list, y_list_1_2, yaxis_formatter='%', is_smooth=True) line.add('加购率', x_list, y_list_1_3, yaxis_formatter='%', is_smooth=True) line_2 = Line(shops + '-' + spuid_) y_list_2 = [i['uv'] for i in data] y_list_2_2 = [i['number_of_order_items'] for i in data] line_2.add('UV', x_list, y_list_2, is_smooth=True) line_2.add('成交量', x_list, y_list_2_2, is_smooth=True) overlap.add(line_2) overlap.add(line, is_add_yaxis=True, yaxis_index=1) ds = overlap.render_embed() over_list.append(ds) shops_list.append({'brand': '全部'}) else: err = '该款式没有对应的spuid' else: err = '该批次没有对应的款式' # else: # err = '批次号有误' batch_form = Batch() return render( request, 'index.html', { 'style_coding': style_coding_list, "line_list": over_list, 'err': err, 'batch_form': batch_form, 'batch': batchs, 'style': style, 'style_coding_list': style_coding_list, 'shops_list': shops_list, 'shops': shops, 'batchs_msg_dic': batchs_msg_dic })
# ) # pie1.render('scores.html') ########################################################################### # 评论时间分布 data['dates'] = data.date.apply(lambda x: pd.Timestamp(x).date()) data['time'] = data.date.apply(lambda x: pd.Timestamp(x).time().hour) # print(data.author) num_date = data.author.groupby(data['dates']).count() chart = Line("评论数时间分布") chart.use_theme('dark') chart.add('评论时间', num_date.index, num_date.values, is_fill=True, line_opacity=0.2, area_opacity=0.4, symbol=None) chart.render('comment_time_stamp.html') # 好评字数分析 datalikes = ['num', 'likes'] datalikes = data.loc[data.likes > 5] datalikes['num'] = datalikes.content.apply(lambda x: len(x)) chart = Scatter("likes") chart.use_theme('dark') chart.add('likes', np.log(datalikes.likes), datalikes.num,
import json import sys import glob import os from pyecharts import Line def load_js(jsfile): x_axis = [] points = [] with open(jsfile, 'r') as fp: while True: l = fp.readline() if not l: break deal_dict = json.loads(l) x_axis.append(deal_dict['dealDate'][0]) points.append(deal_dict['unitPrice'][0]) return x_axis, points if __name__ == '__main__': dir_path = sys.argv[1] curve_name = '' line = Line("Deal Curve") cwd = os.getcwd() os.chdir(dir_path) for js in glob.glob("*.json"): x_axis, points = load_js(js) curve_name = js[:js.find(".json")] line.add(curve_name, x_axis, points, is_stack=True, is_label_show=True) os.chdir(cwd) line.render()
def test_page_grid_timeline_overlap(): # Grid v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", height=720, width=1200, title_pos="65%") bar.add("商家A", CLOTHES, v1, is_stack=True) bar.add("商家B", CLOTHES, v2, is_stack=True, legend_pos="80%") line = Line("折线图示例") line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_pos="20%", ) v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] scatter = Scatter("散点图示例", title_top="50%", title_pos="65%") scatter.add("scatter", v1, v2, legend_top="50%", legend_pos="80%") es = EffectScatter("动态散点图示例", title_top="50%") es.add( "es", [11, 11, 15, 13, 12, 13, 10], [1, -2, 2, 5, 3, 2, 0], effect_scale=6, legend_top="50%", legend_pos="20%", ) grid = Grid() grid.add(bar, grid_bottom="60%", grid_left="60%") grid.add(line, grid_bottom="60%", grid_right="60%") grid.add(scatter, grid_top="60%", grid_left="60%") grid.add(es, grid_top="60%", grid_right="60%") # Timeline bar_1 = Bar("2012 年销量", "数据纯属虚构") bar_1.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_1.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_1.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_1.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_2 = Bar("2013 年销量", "数据纯属虚构") bar_2.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_2.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_2.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_2.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_3 = Bar("2014 年销量", "数据纯属虚构") bar_3.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_3.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_3.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_3.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_4 = Bar("2015 年销量", "数据纯属虚构") bar_4.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_4.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_4.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_4.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_5 = Bar("2016 年销量", "数据纯属虚构", height=720, width=1200) bar_5.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_5.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_5.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)]) bar_5.add( "冬季", CLOTHES, [randint(10, 100) for _ in range(6)], is_legend_show=True, ) timeline = Timeline(is_auto_play=True, timeline_bottom=0) timeline.add(bar_1, "2012 年") timeline.add(bar_2, "2013 年") timeline.add(bar_3, "2014 年") timeline.add(bar_4, "2015 年") timeline.add(bar_5, "2016 年") # Overlap attr = ["{}月".format(i) for i in range(1, 13)] v1 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3] v2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3] v3 = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2] bar = Bar(height=720, width=1200) bar.add("蒸发量", attr, v1) bar.add("降水量", attr, v2, yaxis_formatter=" ml", yaxis_max=250) line = Line() line.add("平均温度", attr, v3, yaxis_formatter=" °C") overlap = Overlap() overlap.add(bar) overlap.add(line, yaxis_index=1, is_add_yaxis=True) page = Page() page.add(grid) page.add(timeline) page.add(overlap) page.render()
def test_more(): page = Page() # line line = Line("折线图示例") line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], ) # pie v1 = [11, 12, 13, 10, 10, 10] pie = Pie("饼图-圆环图示例", title_pos="center") pie.add( "", CLOTHES, v1, radius=[40, 75], label_text_color=None, is_label_show=True, legend_orient="vertical", legend_pos="left", ) page.add([line, pie]) # kline v1 = [ [2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22], ] kline = Kline("K 线图示例") kline.add( "日K", ["2017/7/{}".format(i + 1) for i in range(31)], v1, is_datazoom_show=True, ) page.add(kline) # radar schema = [ ("销售", 6500), ("管理", 16000), ("信息技术", 30000), ("客服", 38000), ("研发", 52000), ("市场", 25000), ] v1 = [[4300, 10000, 28000, 35000, 50000, 19000]] v2 = [[5000, 14000, 28000, 31000, 42000, 21000]] radar = Radar("雷达图示例") radar.config(schema) radar.add("预算分配", v1, is_splitline=True, is_axisline_show=True) radar.add( "实际开销", v2, label_color=["#4e79a7"], is_area_show=False, legend_selectedmode="single", ) page.add(radar) # scatter3d import random data = [ [ random.randint(0, 100), random.randint(0, 100), random.randint(0, 100), ] for _ in range(80) ] scatter3D = Scatter3D("3D 散点图示例", width=1200, height=600) scatter3D.add("", data, is_visualmap=True, visual_range_color=RANGE_COLOR) page.add(scatter3D) # wordcloud name = [ "Sam S Club", "Macys", "Amy Schumer", "Jurassic World", "Charter Communications", "Chick Fil A", "Planet Fitness", "Pitch Perfect", "Express", "Home", "Johnny Depp", "Lena Dunham", "Lewis Hamilton", "KXAN", "Mary Ellen Mark", "Farrah Abraham", "Rita Ora", "Serena Williams", "NCAA baseball tournament", "Point Break", ] value = [ 10000, 6181, 4386, 4055, 2467, 2244, 1898, 1484, 1112, 965, 847, 582, 555, 550, 462, 366, 360, 282, 273, 265, ] wordcloud = WordCloud(width=1300, height=620) wordcloud.add("", name, value, word_size_range=[30, 100], rotate_step=66) page.add(wordcloud) # liquid liquid = Liquid("水球图示例") liquid.add("Liquid", [0.6]) page.add(liquid) assert len(page) == 7 assert isinstance(page[0], Line) assert ( ("echarts" in page.js_dependencies) or ("echarts.min" in page.js_dependencies) ) page.render()
for i in range(4): filename = 'D:/data/' + citys[i] + '_AQI' + '_2018.csv' aqi_data = pd.read_csv(filename) get_data = aqi_data[['Date', 'AQI']] month_for_data = [] for j in get_data['Date']: time = j.split('/')[1] month_for_data.append(time) aqi_data['Month'] = month_for_data #AQI平均值 month_data = aqi_data.groupby(['Month']) month_AQI = month_data['AQI'].agg(['mean']) month_AQI.reset_index(inplace=True) month_AQI_average = month_AQI.sort_index() month_AQI_data = np.array(month_AQI_average['mean']) month_AQI_data_int = ["{}".format(int(i)) for i in month_AQI_data] cityes_AQI.append(month_AQI_data_int) months = ["{}".format(str(i) + '月') for i in range(1, 13)] line = Line("2018年北上广深AQI全年走势图", title_pos='center', title_top='0', width=800, height=400) line.add("北京", months, cityes_AQI[0], line_color='red', legend_top='8%') line.add("上海", months, cityes_AQI[1], line_color='purple', legend_top='8%') line.add("广州", months, cityes_AQI[2], line_color='blue', legend_top='8%') line.add("深圳", months, cityes_AQI[3], line_color='orange', legend_top='8%') line.render("2018年北上广深AQI全年走势图.html")
def test_grid_multiple_datazoom_index(): line = Line("折线图示例", width=1200, height=700) line.add( "最高气温", WEEK, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], ) line.add( "最低气温", WEEK, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], legend_top="50%", mark_line=["average"], is_datazoom_show=True, datazoom_xaxis_index=[0, 1], ) v1 = [ [2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22], ] kline = Kline("K 线图示例", title_top="50%") kline.add( "日K", ["2017/7/{}".format(i + 1) for i in range(31)], v1, is_datazoom_show=True, ) grid = Grid() grid.add(line, grid_top="60%") grid.add(kline, grid_bottom="60%") grid.render()
) pie1.render('评分.html') #评论数 datas['dates'] = datas['date'].apply( lambda x: pd.Timestamp(x).date()) #注意,此处为dates,区别原date datas['hour'] = datas['date'].apply(lambda x: pd.Timestamp(x).time().hour) num_of_date = datas.author.groupby(datas.dates).count() #每天有多少作者评论 #评论数时间分布 chart = Line("评论数时间分布") chart.use_theme('dark') chart.add( '评论时间分布', num_of_date.index, num_of_date.values, is_fill=True, #填充曲线所绘制面积 line_opacity=0.2, #折线透明度 area_opacity=0.4, #区域透明度 symbol=None, ) chart.render('评论时间分布.html') #时间分布 num_of_hour = datas.author.groupby(datas.hour).count() #各个小时段有多少作者评论 chart = Line("评论日内时间分布") chart.use_theme('dark') chart.add('评论日内时间分布', num_of_hour.index, num_of_hour.values, mark_point_symbol='diamond', mark_point_textcolor='#40ff27', line_width=2)
ungoods_center_area_sum = row[13] ungoods_center_house_num = row[14] ungoods_center_house_area = row[15] ungoods_center_unhouse_area = row[16] ungoods_suburb_area_sum = row[17] ungoods_suburb_house_num = row[18] ungoods_suburb_house_area = row[19] ungoods_suburb_unhouse_area = row[20] ungoods_city_area_sum = row[21] ungoods_city_house_num = row[22] ungoods_city_house_area = row[23] ungoods_city_unhouse_area = row[24] list_date.append(date) list_goods_center_house_num.append(goods_center_house_num) list_goods_suburb_house_num.append(goods_suburb_house_num) list_goods_city_house_num.append(goods_city_house_num) list_ungoods_center_house_num.append(ungoods_center_house_num) list_ungoods_suburb_house_num.append(ungoods_suburb_house_num) list_ungoods_city_house_num.append(ungoods_city_house_num) line = Line("房产走势图") line.add("城区商品房", list_date, list_goods_center_house_num, mark_point=["max", "min"]) line.add("郊区商品房", list_date, list_goods_suburb_house_num) line.add("全市商品房", list_date, list_goods_city_house_num) line.add("城区二手房", list_date, list_ungoods_center_house_num) line.add("郊区二手房", list_date, list_ungoods_suburb_house_num) line.add("全市二手房", list_date, list_ungoods_city_house_num) line.show_config() line.render()
list_pre2=[] list_pre3=[] list_pre4=[] for j in range(0,df_plot.shape[1]): list_pre1.append(df_plot.iloc[j,j]) list_pre2.append(df_plot.iloc[j+1,j]) list_pre3.append(df_plot.iloc[j+2,j]) list_pre4.append(df_plot.iloc[j+3,j]) date_all=df.timestamp[df.shape[0]-months+3:] sales_real= df.sales[df.shape[0]-months+3:] date_pre=df_plot.columns sales1=list_pre1 sales2=list_pre2 sales3=list_pre3 sales4=list_pre4 line = Line("空调销售预测") line.add("实际销量结果", date_all, sales_real,line_color='black',line_width=2) #line.add("第一次预测结果", date_pre, sales1,line_type='dashed',line_color='green') #line.add("第二次预测结果", date_pre, sales2,line_type='dashed',line_color='green') #line.add("第三次预测结果", date_pre, sales3,line_type='dashed',line_color='green') line.add("第四次预测结果", date_pre, sales4,line_type='dashed',line_color='green') #line.add("python版arima预测结果",date_pre,sales_arima_python,line_type='dashed',line_color='red',line_width=2) #line.add("商家B", attr, v2, is_smooth=True,mark_line=["max", "average"]) line.render('../data/python_ets2.html')
from pyecharts import Bar, Line, Overlap xline = ['A', 'B', 'C', 'D', 'E', 'F'] v1 = [10, 20, 30, 40, 50, 60] v2 = [38, 28, 35, 58, 65, 70] bar = Bar('Line - Bar示例') bar.add('bar', xline, v1) line = Line() line.add('line', xline, v2) overlop = Overlap() overlop.add(bar) overlop.add(line) overlop.render('./picture11.html')
def timeline_charts(): page = Page() attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] bar_1 = Bar("2012 年销量", "数据纯属虚构") bar_1.add("春季", attr, [randint(10, 100) for _ in range(6)]) bar_1.add("夏季", attr, [randint(10, 100) for _ in range(6)]) bar_1.add("秋季", attr, [randint(10, 100) for _ in range(6)]) bar_1.add("冬季", attr, [randint(10, 100) for _ in range(6)]) bar_2 = Bar("2013 年销量", "数据纯属虚构") bar_2.add("春季", attr, [randint(10, 100) for _ in range(6)]) bar_2.add("夏季", attr, [randint(10, 100) for _ in range(6)]) bar_2.add("秋季", attr, [randint(10, 100) for _ in range(6)]) bar_2.add("冬季", attr, [randint(10, 100) for _ in range(6)]) bar_3 = Bar("2014 年销量", "数据纯属虚构") bar_3.add("春季", attr, [randint(10, 100) for _ in range(6)]) bar_3.add("夏季", attr, [randint(10, 100) for _ in range(6)]) bar_3.add("秋季", attr, [randint(10, 100) for _ in range(6)]) bar_3.add("冬季", attr, [randint(10, 100) for _ in range(6)]) bar_4 = Bar("2015 年销量", "数据纯属虚构") bar_4.add("春季", attr, [randint(10, 100) for _ in range(6)]) bar_4.add("夏季", attr, [randint(10, 100) for _ in range(6)]) bar_4.add("秋季", attr, [randint(10, 100) for _ in range(6)]) bar_4.add("冬季", attr, [randint(10, 100) for _ in range(6)]) bar_5 = Bar("2016 年销量", "数据纯属虚构", width=WIDTH, height=HEIGHT) bar_5.add("春季", attr, [randint(10, 100) for _ in range(6)]) bar_5.add("夏季", attr, [randint(10, 100) for _ in range(6)]) bar_5.add("秋季", attr, [randint(10, 100) for _ in range(6)]) bar_5.add("冬季", attr, [randint(10, 100) for _ in range(6)], is_legend_show=True) chart = Timeline(is_auto_play=True, timeline_bottom=0) chart.add(bar_1, '2012 年') chart.add(bar_2, '2013 年') chart.add(bar_3, '2014 年') chart.add(bar_4, '2015 年') chart.add(bar_5, '2016 年') page.add(chart) attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] pie_style = { "is_label_show": True, "radius": [30, 55], "rosetype":'radius' } pie_1 = Pie("2012 年销量比例", "数据纯属虚构") pie_1.add("秋季", attr, [randint(10, 100) for _ in range(6)], **pie_style) pie_2 = Pie("2013 年销量比例", "数据纯属虚构") pie_2.add("秋季", attr, [randint(10, 100) for _ in range(6)], **pie_style) pie_3 = Pie("2014 年销量比例", "数据纯属虚构") pie_3.add("秋季", attr, [randint(10, 100) for _ in range(6)], **pie_style) pie_4 = Pie("2015 年销量比例", "数据纯属虚构") pie_4.add("秋季", attr, [randint(10, 100) for _ in range(6)], **pie_style) pie_5 = Pie("2016 年销量比例", "数据纯属虚构", width=WIDTH, height=HEIGHT) pie_5.add("秋季", attr, [randint(10, 100) for _ in range(6)], **pie_style) chart = Timeline(is_auto_play=True, timeline_bottom=0) chart.add(pie_1, '2012 年') chart.add(pie_2, '2013 年') chart.add(pie_3, '2014 年') chart.add(pie_4, '2015 年') chart.add(pie_5, '2016 年') page.add(chart) attr = ["{}月".format(i) for i in range(1, 7)] bar = Bar("1 月份数据", "数据纯属虚构") bar.add("bar", attr, [randint(10, 50) for _ in range(6)]) line = Line() line.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_0 = Overlap() overlap_0.add(bar) overlap_0.add(line) bar_1 = Bar("2 月份数据", "数据纯属虚构") bar_1.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_1 = Line() line_1.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_1 = Overlap() overlap_1.add(bar_1) overlap_1.add(line_1) bar_2 = Bar("3 月份数据", "数据纯属虚构") bar_2.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_2 = Line() line_2.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_2 = Overlap() overlap_2.add(bar_2) overlap_2.add(line_2) bar_3 = Bar("4 月份数据", "数据纯属虚构") bar_3.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_3 = Line() line_3.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_3 = Overlap() overlap_3.add(bar_3) overlap_3.add(line_3) bar_4 = Bar("5 月份数据", "数据纯属虚构", width=WIDTH, height=HEIGHT) bar_4.add("bar", attr, [randint(10, 50) for _ in range(6)]) line_4 = Line() line_4.add("line", attr, [randint(50, 80) for _ in range(6)]) overlap_4 = Overlap() overlap_4.add(bar_4) overlap_4.add(line_4) chart = Timeline(timeline_bottom=0) chart.add(overlap_0.chart, '1 月') chart.add(overlap_1.chart, '2 月') chart.add(overlap_2.chart, '3 月') chart.add(overlap_3.chart, '4 月') chart.add(overlap_4.chart, '5 月') page.add(chart) return page
y_temp_10 = [random.randint(20, 35) for j in range(30)] line = Line("折线图") # # line.add("", x_times, y_temp_3, mark_line=['max'], mark_point=['min']) # line.add("", x_times, y_temp_10, mark_line=['max'], mark_point=['min']) # # 折线图---阶梯图 # line.add("", x_times, y_temp_3, mark_line=['max'], mark_point=['min'], is_step=True) # line.add("", x_times, y_temp_10, mark_line=['max'], mark_point=['min'], is_step=True) # pip install echarts-countries-pypkg # pip install echarts-china-provinces-pypkg # pip install echarts-china-cities-pypkg # pip install echarts-china-counties-pypkg # # # 折线图---面积图 # 设置透明度 line.add("", x_times, y_temp_3, is_fill=True, area_color='red', area_opacity=0.3) line.add("", x_times, y_temp_10, is_fill=True, area_color='green', area_opacity=0.2) line.render()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from pyecharts import Line attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [5, 20, 36, 10, 10, 100] line = Line("折线图-阶梯图示例") line.add("商家A", attr, v1, is_step=True, is_label_show=True) line.show_config() line.render()
def test_more(): page = Page() # line attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] line = Line("折线图示例") line.add("最高气温", attr, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add("最低气温", attr, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"]) page.add(line) # pie attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [11, 12, 13, 10, 10, 10] pie = Pie("饼图-圆环图示例", title_pos='center') pie.add("", attr, v1, radius=[40, 75], label_text_color=None, is_label_show=True, legend_orient='vertical', legend_pos='left') page.add(pie) # kline v1 = [[2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22]] kline = Kline("K 线图示例") kline.add("日K", ["2017/7/{}".format(i + 1) for i in range(31)], v1) page.add(kline) # radar schema = [("销售", 6500), ("管理", 16000), ("信息技术", 30000), ("客服", 38000), ("研发", 52000), ("市场", 25000)] v1 = [[4300, 10000, 28000, 35000, 50000, 19000]] v2 = [[5000, 14000, 28000, 31000, 42000, 21000]] radar = Radar("雷达图示例") radar.config(schema) radar.add("预算分配", v1, is_splitline=True, is_axisline_show=True) radar.add("实际开销", v2, label_color=["#4e79a7"], is_area_show=False, legend_selectedmode='single') page.add(radar) # scatter3d import random data = [[ random.randint(0, 100), random.randint(0, 100), random.randint(0, 100) ] for _ in range(80)] range_color = [ '#313695', '#4575b4', '#74add1', '#abd9e9', '#e0f3f8', '#ffffbf', '#fee090', '#fdae61', '#f46d43', '#d73027', '#a50026' ] scatter3D = Scatter3D("3D 散点图示例", width=1200, height=600) scatter3D.add("", data, is_visualmap=True, visual_range_color=range_color) page.add(scatter3D) # wordcloud name = [ 'Sam S Club', 'Macys', 'Amy Schumer', 'Jurassic World', 'Charter Communications', 'Chick Fil A', 'Planet Fitness', 'Pitch Perfect', 'Express', 'Home', 'Johnny Depp', 'Lena Dunham', 'Lewis Hamilton', 'KXAN', 'Mary Ellen Mark', 'Farrah Abraham', 'Rita Ora', 'Serena Williams', 'NCAA baseball tournament', 'Point Break' ] value = [ 10000, 6181, 4386, 4055, 2467, 2244, 1898, 1484, 1112, 965, 847, 582, 555, 550, 462, 366, 360, 282, 273, 265 ] wordcloud = WordCloud(width=1300, height=620) wordcloud.add("", name, value, word_size_range=[30, 100], rotate_step=66) page.add(wordcloud) # liquid liquid = Liquid("水球图示例") liquid.add("Liquid", [0.6]) page.add(liquid) page.render()
from pyecharts import Line, Bar, Pie, EffectScatter # 数据 attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [5, 20, 36, 10, 10, 100] v2 = [55, 60, 16, 20, 15, 80] # 普通折线图 line = Line('折线图') line.add('商家A', attr, v1, mark_point=['max']) line.add('商家B', attr, v2, mark_point=['min'], is_smooth=True) line.show_config() line.render(path='./data/01-04折线图.html') # 阶梯折线图 line2 = Line('阶梯折线图') line2.add('商家A', attr, v1, is_step=True, is_label_show=True) line2.show_config() line2.render(path='./data/01-05阶梯折线图.html') # 面积折线图 line3 = Line("面积折线图") line3.add("商家A", attr, v1, is_fill=True, line_opacity=0.2, area_opacity=0.4, symbol=None, mark_point=['max']) line3.add("商家B", attr,
def Line_network(d, title, title1, date, network_in, network_put): bar = Line(d, width=1600, height=500) bar.add(title, date, network_in) bar.add(title1, date, network_put) return bar
import pandas as pd from pyecharts import Line df = pd.read_csv('air_tianjin_2017.csv', header=None, names=["Date", "Quality_grade", "AQI", "AQI_rank", "PM"]) attr = df['Date'] v1 = df['PM'] line = Line("2017年天津PM2.5全年走势图", title_pos='center', title_top='18', width=800, height=400) line.add("", attr, v1, mark_line=['average'], is_fill=True, area_color="#000", area_opacity=0.3, mark_point=["max", "min"], mark_point_symbol="circle", mark_point_symbolsize=25) line.render("2017年天津PM2.5全年走势图.html")
def test_grid(): # grid_0 attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", height=720, is_grid=True) bar.add("商家A", attr, v1, is_stack=True, grid_bottom="60%") bar.add("商家B", attr, v2, is_stack=True, grid_bottom="60%") line = Line("折线图示例", title_top="50%") attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] line.add("最高气温", attr, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add("最低气温", attr, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_top="50%") bar.grid(line.get_series(), grid_top="60%") bar.show_config() bar.render() # grid_1 v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] scatter = Scatter(width=1200, is_grid=True) scatter.add("散点图示例", v1, v2, grid_left="60%", legend_pos="70%") es = EffectScatter() es.add("动态散点图示例", [11, 11, 15, 13, 12, 13, 10], [1, -2, 2, 5, 3, 2, 0], effect_scale=6, legend_pos="20%") scatter.grid(es.get_series(), grid_right="60%") scatter.show_config() scatter.render() # grid_2 attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", height=720, width=1200, title_pos="65%", is_grid=True) bar.add("商家A", attr, v1, is_stack=True, grid_bottom="60%", grid_left="60%") bar.add("商家B", attr, v2, is_stack=True, grid_bottom="60%", grid_left="60%", legend_pos="80%") line = Line("折线图示例") attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] line.add("最高气温", attr, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"]) line.add("最低气温", attr, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_pos="20%") v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] scatter = Scatter("散点图示例", title_top="50%", title_pos="65%") scatter.add("scatter", v1, v2, legend_top="50%", legend_pos="80%") es = EffectScatter("动态散点图示例", title_top="50%") es.add("es", [11, 11, 15, 13, 12, 13, 10], [1, -2, 2, 5, 3, 2, 0], effect_scale=6, legend_top="50%", legend_pos="20%") bar.grid(line.get_series(), grid_bottom="60%", grid_right="60%") bar.grid(scatter.get_series(), grid_top="60%", grid_left="60%") bar.grid(es.get_series(), grid_top="60%", grid_right="60%") bar.show_config() bar.render() # grid_3 line = Line("折线图示例", width=1200, is_grid=True) attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] line.add("最高气温", attr, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], grid_right="65%") line.add("最低气温", attr, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_pos="20%") attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [11, 12, 13, 10, 10, 10] pie = Pie("饼图示例", title_pos="45%") pie.add("", attr, v1, radius=[30, 55], legend_pos="65%", legend_orient='vertical') line.grid(pie.get_series(), grid_left="60%") line.show_config() line.render() # grid_4 line = Line("折线图示例", width=1200, is_grid=True) attr = ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] line.add("最高气温", attr, [11, 11, 15, 13, 12, 13, 10], mark_point=["max", "min"], mark_line=["average"], grid_right="60%") line.add("最低气温", attr, [1, -2, 2, 5, 3, 2, 0], mark_point=["max", "min"], mark_line=["average"], legend_pos="20%", grid_right="60%") attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] value = [20, 40, 60, 80, 100, 120] v1 = [[2320.26, 2320.26, 2287.3, 2362.94], [2300, 2291.3, 2288.26, 2308.38], [2295.35, 2346.5, 2295.35, 2345.92], [2347.22, 2358.98, 2337.35, 2363.8], [2360.75, 2382.48, 2347.89, 2383.76], [2383.43, 2385.42, 2371.23, 2391.82], [2377.41, 2419.02, 2369.57, 2421.15], [2425.92, 2428.15, 2417.58, 2440.38], [2411, 2433.13, 2403.3, 2437.42], [2432.68, 2334.48, 2427.7, 2441.73], [2430.69, 2418.53, 2394.22, 2433.89], [2416.62, 2432.4, 2414.4, 2443.03], [2441.91, 2421.56, 2418.43, 2444.8], [2420.26, 2382.91, 2373.53, 2427.07], [2383.49, 2397.18, 2370.61, 2397.94], [2378.82, 2325.95, 2309.17, 2378.82], [2322.94, 2314.16, 2308.76, 2330.88], [2320.62, 2325.82, 2315.01, 2338.78], [2313.74, 2293.34, 2289.89, 2340.71], [2297.77, 2313.22, 2292.03, 2324.63], [2322.32, 2365.59, 2308.92, 2366.16], [2364.54, 2359.51, 2330.86, 2369.65], [2332.08, 2273.4, 2259.25, 2333.54], [2274.81, 2326.31, 2270.1, 2328.14], [2333.61, 2347.18, 2321.6, 2351.44], [2340.44, 2324.29, 2304.27, 2352.02], [2326.42, 2318.61, 2314.59, 2333.67], [2314.68, 2310.59, 2296.58, 2320.96], [2309.16, 2286.6, 2264.83, 2333.29], [2282.17, 2263.97, 2253.25, 2286.33], [2255.77, 2270.28, 2253.31, 2276.22]] kline = Kline("K 线图示例", title_pos="60%") kline.add("日K", ["2017/7/{}".format(i + 1) for i in range(31)], v1, legend_pos="80%") line.grid(kline.get_series(), grid_left="55%") line.show_config() line.render() # grid_5 import random x_axis = [ "12a", "1a", "2a", "3a", "4a", "5a", "6a", "7a", "8a", "9a", "10a", "11a", "12p", "1p", "2p", "3p", "4p", "5p", "6p", "7p", "8p", "9p", "10p", "11p" ] y_aixs = [ "Saturday", "Friday", "Thursday", "Wednesday", "Tuesday", "Monday", "Sunday" ] data = [[i, j, random.randint(0, 50)] for i in range(24) for j in range(7)] heatmap = HeatMap("热力图示例", height=700, is_grid=True) heatmap.add("热力图直角坐标系", x_axis, y_aixs, data, is_visualmap=True, visual_top="45%", visual_text_color="#000", visual_orient='horizontal', grid_bottom="60%") attr = ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"] v1 = [5, 20, 36, 10, 75, 90] v2 = [10, 25, 8, 60, 20, 80] bar = Bar("柱状图示例", title_top="52%") bar.add("商家A", attr, v1, is_stack=True) bar.add("商家B", attr, v2, is_stack=True, legend_top="50%") heatmap.grid(bar.get_series(), grid_top="60%") heatmap.show_config() heatmap.render()