示例#1
0
def cnplot(cn, new=True, axes=None, kdata=None):
    """绘制系统有效条件

    :param ConditionBase cn: 系统有效条件
    :param new: 仅在未指定axes的情况下生效,当为True时,创建新的窗口对象并在其中进行绘制
    :param axes: 指定在那个轴对象中进行绘制
    :param KData kdata: 指定的KData,如该值为None,则认为该系统有效条件已经
                        指定了交易对象,否则,使用该参数作为交易对象
    """
    if kdata is None:
        kdata = cn.getTO()
    else:
        cn.setTO(kdata)

    refdates = kdata.getDatetimeList()
    if kdata.getQuery().kType == KQuery.DAY:
        x_list = [d.date() for d in refdates]
    else:
        x_list = [d.datetime() for d in refdates]

    axes2 = None
    if axes is None:
        if new:
            axes, axes2 = create_figure(2)
            kplot(kdata, axes=axes)
        else:
            axes = gca()

    max_value = max(HIGH(kdata))
    line = Line()
    y1 = [max_value if cn.isValid(d) else 0 for d in refdates]
    y2 = [0 if cn.isValid(d) else max_value for d in refdates]
    line.add("",
             x_list,
             y1,
             is_step=True,
             is_fill=True,
             yaxis_max=max_value,
             is_symbol_show=False,
             line_opacity=0,
             label_color='#CD0000',
             area_color='#CD0000',
             area_opacity=0.2)
    line.add("",
             x_list,
             y2,
             is_step=True,
             is_fill=True,
             yaxis_max=max_value,
             is_symbol_show=False,
             line_opacity=0,
             label_color='#0000FF',
             area_color='#0000FF',
             area_opacity=0.2)

    gcf().set_xaxis(x_list)

    if axes2 is not None:
        axes2.add(line)
        gcf().add_axis(axes2)
    else:
        axes.add(line)
        gcf().add_axis(axes)

    return gcf()
示例#2
0
    list_pre2.append(df_plot.iloc[j + 1, j])
    list_pre3.append(df_plot.iloc[j + 2, j])
    list_pre4.append(df_plot.iloc[j + 3, j])

date_all = df.timestamp[df.shape[0] - months + 3:]
sales_real = df.sales[df.shape[0] - months + 3:]

date_pre = df_plot.columns
sales1 = list_pre1
sales2 = list_pre2
sales3 = list_pre3
sales4 = list_pre4

line = Line("空调销售预测",
            title_pos='right',
            width=1400,
            height=700,
            title_color='red',
            title_text_size=10)
line.add("实际销量结果", date_all, sales_real, line_color='black', line_width=2)

line.add("第一次预测结果", date_pre, sales1, line_type='dashed', line_color='green')
line.add("第二次预测结果", date_pre, sales2, line_type='dashed', line_color='green')
line.add("第三次预测结果", date_pre, sales3, line_type='dashed', line_color='green')
line.add("第四次预测结果", date_pre, sales4, line_type='dashed', line_color='green')
#line.add("python版arima预测结果",date_pre,sales_arima_python,line_type='dashed',line_color='red',line_width=2)
#line.add("商家B", attr, v2, is_smooth=True,mark_line=["max", "average"])
line.render('../data/python_ets.html')
'''
results1 = holtWinters(tsA, 12, 4, 4, mtype = 'additive')
results2 = holtWinters(tsA, 12, 4, 4, mtype = 'multiplicative')
示例#3
0
文件: k8s.py 项目: smallc2009/sparrow
def hpa():
    try:
        td = time.strftime("%Y-%m-%d", time.localtime())
        valus = []
        db_k8s_deploy = db_op.k8s_deploy
        db_project = db_op.project_list
        Key = 'op_k8s_ingress_log'
        keys = tables = ('name', 'deployment', '最大副本', '最小副本', '当前副本', 'CPU阀值',
                         'CPU当前值', 'QPS当前值', '管理')
        v1 = client.AutoscalingV1Api()
        ret = v1.list_horizontal_pod_autoscaler_for_all_namespaces()
        for i in ret.items:
            try:
                rps = 0
                RPS = []
                project = db_k8s_deploy.query.with_entities(
                    db_k8s_deploy.project).filter(
                        db_k8s_deploy.deployment ==
                        i.spec.scale_target_ref.name).limit(1).all()
                if project:
                    domains = db_project.query.with_entities(
                        db_project.domain).filter(
                            db_project.project == project[0][0]).limit(
                                1).all()
                    if domains[0][0]:
                        for domain in domains[0][0].split(','):
                            vals = RC.hgetall('%s_%s_%s' % (Key, domain, td))
                            vals = sorted(vals.items(),
                                          key=lambda item: item[0])
                            if vals:
                                RPS.append(int(int(vals[-1][-1]) / 60))
                        if RPS:
                            rps = RPS[0]
                            if len(RPS) > 1:
                                rps = reduce(lambda x, y: x + y, RPS)
                    valus.append([
                        i.metadata.name, i.spec.scale_target_ref.name,
                        i.spec.max_replicas, i.spec.min_replicas,
                        i.status.current_replicas, '{0}%'.format(
                            i.spec.target_cpu_utilization_percentage),
                        '{0}%'.format(
                            i.status.current_cpu_utilization_percentage), rps
                    ])
            except Exception as e:
                logging.error(e)
        td = time.strftime('%Y-%m-%d', time.localtime())
        Key = 'op_hpa_chart_%s_%s' % (g.context, td)
        infos = RC.hgetall(Key)
        infos = sorted(infos.items(), key=lambda item: item[0].split('_')[-1])
        line = Line('HPA动态伸缩实时状态',
                    width='110%',
                    height='250px',
                    title_pos='8%',
                    title_text_size=14)
        for project in valus:
            attr = []
            vals = []
            for info in infos:
                if project[0] in info[0]:
                    attr.append(str(info[0].split('_')[-1]))
                    vals.append(int(info[1]))
            line.add(
                project[0],
                attr,
                vals,
                is_toolbox_show=False,
                is_smooth=True,
                mark_point=["max", "min"],
                mark_point_symbolsize=60,
                legend_pos='40%',
                is_datazoom_show=True,
                datazoom_range=[v for v in range(100, 10)],
                datazoom_type='both',
            )
    except Exception as e:
        logging.error(e)
    return render_template('k8s-resource.html',
                           valus=valus,
                           tables=tables,
                           keys=keys,
                           line=line,
                           resource='HPA')
示例#4
0
 def __init__(self):
     self.kline = Kline()
     self.line = Line()
     self.empty_line = Line()
示例#5
0
from pyecharts import Line

attr = ['衬衫', '羊毛衫', '雪纺衫', '裤子', '高跟鞋', '袜子']
v1 = [5, 20, 36, 10, 75, 90]
v2 = [10, 25, 8, 60, 20, 80]
line = Line('折线面积示例图')
line.add(
    '商家A',
    attr,
    v1,
    is_fill=True,
    line_opacity=0.2,  #线条不透明度
    area_opacity=0.4,
    symbol=None)
line.add(
    '商家B',
    attr,
    v2,
    is_fill=True,
    line_color='#000',  #黑色
    area_opacity=0.3,  #填充不透明度
    is_smooth=True)
line.render('./picture6.html')
示例#6
0
from pyecharts import Line, configure

configure(output_image=True)
line = Line("折线图", background_color="white", title_text_size=25)
attr = ['惠州', '东莞', '广州', '深圳', '佛山', '江门', '珠海']
v1 = [23, 45, 68, 58, 32, 28, 36]
v2 = [12, 22, 34, 29, 16, 14, 18]

line.add('举例数字1', attr, v1, mark_line=['average'], is_label_show=True)
#is_smooth:是否平滑处理,可以看到举例数字2在设了is_smooth = True之后,线条变为平滑曲线。
line.add('举例数字2', attr, v2, is_label_show=True, is_smooth=True)

#is_fill实现面积图:
#area_opacity:填充面积的透明度;area_color:填充面积的颜色;symbol = None:去掉线上的点。
line.add('举例数字1', attr, v1, is_fill=True, area_opacity=0.4)
line.add('举例数字2', attr, v2, is_fill=True, is_smooth=True, area_opacity=0.4)

line.render()
示例#7
0
#encoding:utf-8
# 作者:孙亚楠
# 日期:2020/3/21 0021 18:41
# 工具:PyCharm
# Python版本:3.7.3
#此python文件完成功能:
from pyecharts import Grid,Line,Bar
# //设置行名
columns = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
# //设置数据
data1 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3]
data2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3]
# //设置折线图标题位置
line = Line("折线图","一年的降水量与蒸发量",title_top="45%")
line.add("降水量", columns, data1, is_label_show=True)
line.add("蒸发量", columns, data2, is_label_show=True)
grid = Grid()
# //设置柱状图的主标题与副标题
bar = Bar("柱状图", "一年的降水量与蒸发量")
# //添加柱状图的数据及配置项
bar.add("降水量", columns, data1, mark_line=["average"], mark_point=["max", "min"])
bar.add("蒸发量", columns, data2, mark_line=["average"], mark_point=["max", "min"])
# //设置两个图表的相对位置
grid.add(bar, grid_bottom="60%")
grid.add(line, grid_top="60%")
grid.render()
示例#8
0
from pyecharts import Line
attr = ['衬衫', '羊毛衫', '雪纺衫', '裤子', '高跟鞋', '袜子']
v1 = [5, 20, 36, 10, 75, 90]
v2 = [10, 25, 8, 60, 20, 80]
line = Line('折线示例图')
line.add('商家A',
         attr,
         v1,
         mark_point=['average', 'max', 'min'],
         is_fill=True,
         mark_point_textcolor='#40ff27',
         line_opacity=0.2)  #数字越小颜色越浅
line.add('商家B',
         attr,
         v2,
         mark_point=['average', 'max', 'min'],
         is_smooth=True,
         is_fill=True,
         mark_point_symbol='arrow',
         mark_point_symbolsize=40)
line.render('./Line-graph.html')
示例#9
0
        data1,
        mark_line=["average"],
        mark_point=["max", "min"])
bar.add("蒸发量",
        columns,
        data2,
        mark_line=["average"],
        mark_point=["max", "min"])
# 生成本地文件(默认为.html文件)
bar.render()

# 折线图
cities = ["合肥", "芜湖", "南京", "北京", "天津", "马鞍山", "杭州", "扬州", "苏州", "亳州"]
data3 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0]
# data2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8]
line = Line("气温变化折线图", '2018-4-16', width=1200, height=600)
line.add("最高气温",
         cities,
         data3,
         mark_point=['average'],
         is_datazoom_show=False,
         is_label_show=True)
# line.add("最低气温", cities, data2, mark_line=['average'], is_smooth=True)
# line.render('Line-High-Low.html')
line.render(path='折线图.gif')

# 仪表盘图
gu = Gauge("仪表盘图")
gu.add("指标", "达标", 85)
gu.render("Guage-eg.html")
示例#10
0
def plot(df):
    kl = df.copy()
    kl['MID'] = (kl['HIGH'] + kl['LOW']) / 2
    kl['AG13'] = kl['MID'].rolling(window=13).mean()
    kl['AG8'] = kl['MID'].rolling(window=8).mean()
    kl['AG5'] = kl['MID'].rolling(window=5).mean()
    kl['SMA5'] = kl['MID'].rolling(window=5).mean()
    kl['SMA34'] = kl['MID'].rolling(window=34).mean()
    kl['AO'] = kl['SMA5'] - kl['SMA34']
    kl['AC'] = (kl['AO'] -
                kl['AO'].rolling(window=5).mean()).rolling(window=5).mean()
    kl = kl[20:]
    for i in range(2, len(kl['MID'])):
        kl.ix[i, 'AG13'] = (kl.ix[i - 1, 'AG13'] * 12 +
                            (kl.ix[i, 'HIGH'] + kl.ix[i, 'LOW']) / 2) / 13
        kl.ix[i, 'AG8'] = (kl.ix[i - 1, 'AG8'] * 7 +
                           (kl.ix[i, 'HIGH'] + kl.ix[i, 'LOW']) / 2) / 8
        kl.ix[i, 'AG5'] = (kl.ix[i - 1, 'AG5'] * 4 +
                           (kl.ix[i, 'HIGH'] + kl.ix[i, 'LOW']) / 2) / 5
    kl['AG13'] = kl['AG13'].shift(8)
    kl['AG8'] = kl['AG8'].shift(5)
    kl['AG5'] = kl['AG5'].shift(3)
    kl = kl.where(kl.notnull(), 0)
    kl['GTUP'] = abs(kl['AG13'] - kl['AG8'])
    kl['GTDOWN'] = abs(kl['AG8'] - kl['AG5'])
    kl['MUP'] = 0
    kl['MDOWN'] = 0
    markd = []
    for i in range(2, len(kl['MID']) - 2):
        if kl.ix[i, 'HIGH'] == max(kl.ix[i - 2, 'HIGH'], kl.ix[i - 1, 'HIGH'],
                                   kl.ix[i, 'HIGH'], kl.ix[i + 1, 'HIGH'],
                                   kl.ix[i + 2, 'HIGH']):
            #kl.ix[i,'MUP']=1
            markd.append({
                "coord": [kl.index[i], kl.ix[i, 'HIGH']],
                "name": "1"
            })
        if kl.ix[i, 'LOW'] == min(kl.ix[i - 2, 'LOW'], kl.ix[i - 1, 'LOW'],
                                  kl.ix[i, 'LOW'], kl.ix[i + 1, 'LOW'],
                                  kl.ix[i + 2, 'LOW']):
            markd.append({
                "coord": [kl.index[i], kl.ix[i, 'LOW']],
                "name": "2"
            })

    page = Page(page_title='AC')
    kline = Kline()
    kline.add('',
              list(kl.index), [
                  list(kl[['OPEN', 'CLOSE', 'LOW', 'HIGH']].iloc[i, ].values)
                  for i in range(len(kl))
              ],
              is_datazoom_show=True,
              datazoom_xaxis_index=[0, 1],
              datazoom_type="both",
              is_xaxislabel_align=True,
              tooltip_axispointer_type="cross",
              mark_point=markd,
              mark_point_symbol='circle',
              mark_point_symbolsize=10)
    line = Line(' ')
    line.add('JAW',
             list(kl.index),
             list(kl['AG13']),
             line_color=['#0000ff'],
             label_color=['#0000ff'])
    line.add('TEETH',
             list(kl.index),
             list(kl['AG8']),
             line_color=['#ff0000'],
             label_color=['#ff0000'])
    line.add('LIPS',
             list(kl.index),
             list(kl['AG5']),
             line_color=['#00ff00'],
             label_color=['#00ff00'])
    overlap1 = Overlap()
    overlap1.add(kline)
    overlap1.add(line)
    #gator
    # bar1 = Bar(' ')
    # bar2 = Bar(' ')
    # up = list(kl['GTUP'])
    # down = list(kl['GTDOWN'])
    # redup = []
    # greenup = []
    # reddown = []
    # greendown = []
    # for i in range(len(up)):
    # 	if (i == 0):
    # 		greenup.append(up[i])
    # 		redup.append(0)
    # 		greendown.append(-down[i])
    # 		reddown.append(0)
    # 		continue
    # 	if (up[i] > up[i - 1]):
    # 		greenup.append(up[i])
    # 		redup.append(0)
    # 	else:
    # 		greenup.append(0)
    # 		redup.append(up[i])
    # 	if (down[i] > down[i - 1]):
    # 		greendown.append(-down[i])
    # 		reddown.append(0)
    # 	else:
    # 		greendown.append(0)
    # 		reddown.append(-down[i])
    #
    # 	# bar.add('MACDhist',klt,macd[2].tolist(),is_datazoom_show=True,legend_top='65%')
    # bar1.add('GTREDUP', list(kl.index), redup, legend_top='65%', label_color=['#ff0000'])
    # bar2.add('GTREDDOWN', list(kl.index), reddown, legend_top='65%', label_color=['#00ff00'])
    # bar1.add('GTGREENUP', list(kl.index), greenup, legend_top='65%', label_color=['#ff0000'])
    # bar2.add('GTGREENDOWN', list(kl.index), greendown, legend_top='65%', label_color=['#00ff00'])
    bar1 = Bar(' ')
    bar2 = Bar(' ')
    ac = list(kl['AC'])
    acr = []
    acg = []
    for i in range(len(ac)):
        if (i == 0):
            acr.append(ac[i])
            acg.append(0)
            continue
        if ac[i] > ac[i - 1]:
            acr.append(0)
            acg.append(ac[i])
        else:
            acr.append(ac[i])
            acg.append(0)
    bar1.add('ACR',
             list(kl.index),
             acr,
             legend_top='65%',
             label_color=['#ff0000'])
    bar2.add('ACG',
             list(kl.index),
             acg,
             legend_top='65%',
             label_color=['#00ff00'])
    overlap2 = Overlap()
    overlap2.add(bar1)
    overlap2.add(bar2)
    grid = Grid(width=1920, height=950)
    grid.add(overlap1, grid_bottom='40%')
    grid.add(overlap2, grid_top='70%')
    # page.add(line)
    # page.add(overlap)
    page.add(grid)
    path = os.path.abspath('.')
    page.render(path + '\\plot\\AC.html')
示例#11
0
async def process(resample='1w', window_size=7*52,  min_timetomarket=None, test=False, where='ALL'):
    rsts = await util.fetch(URL,  resample=resample, window_size=window_size,
                            min_timetomarket=min_timetomarket, test=test, where=where)
    c = defaultdict(Counter)
    for rst in rsts:
        for key, value in rst['result'].items():
            c[key].update(**value)
    df = None
    for key, value in c.items():
        tmp = pd.DataFrame.from_dict(value, 'index', columns=[key])
        if df is None:
            df = tmp
        else:
            df = pd.concat([df, tmp], axis=1, sort=True)

    df.index = pd.DatetimeIndex(df.index)

    df = df.sort_index()
    ds = pd.date_range(min(df.index), max(df.index), freq=resample)

    df = df.reindex(ds,
                    copy=False, fill_value=0)
    # print(df)
    # x = df.plot()
    # plt.show()

    df = df.fillna(value=0)

    line1 = Line()

    line1.add('is_rolling_max', df.index, df['is_rolling_max'])

    line2 = Line()
    line2.add('is_rolling_min', df.index, df['is_rolling_min'])

    overlap = Overlap(
    )
    overlap.add(line1)
    overlap.add(line2)  # , yaxis_index=1, is_add_yaxis=True
    util.render(overlap, path="render.html",)

    line1 = Line()

    line1.add('ismax', df.index, df['ismax'])

    line2 = Line()
    line2.add('ismin', df.index, df['ismin'])

    overlap = Overlap(
    )
    overlap.add(line1)
    overlap.add(line2)
    util.render(overlap, path="render2.html",)
    # overlap.render(path="render2.html",)

    for c in df.columns:
        df[c] = pd.to_numeric(df[c])
    df = df.resample('1m').sum()

    market_size = await util.get_marketsize(where=where)
    market_size = pd.DataFrame.from_dict(market_size)
    market_size.index = pd.DatetimeIndex(market_size.index)
    df['marketsize'] = market_size
    df['ismin'] = df['ismin'] / df['marketsize']
    df['ismax'] = df['ismax'] / df['marketsize']

    line1 = Line()

    line1.add('ismax', df.index, df['ismax'])

    line2 = Line()
    line2.add('ismin', df.index, df['ismin'])

    overlap = Overlap(
    )
    overlap.add(line1)
    overlap.add(line2)
    util.render(overlap, path="render3.html",)
    return df
示例#12
0
def test_page_grid_timeline_overlap():
    # Grid
    v1 = [5, 20, 36, 10, 75, 90]
    v2 = [10, 25, 8, 60, 20, 80]
    bar = Bar("柱状图示例", height=720, width=1200, title_pos="65%")
    bar.add("商家A", CLOTHES, v1, is_stack=True)
    bar.add("商家B", CLOTHES, v2, is_stack=True, legend_pos="80%")
    line = Line("折线图示例")
    line.add(
        "最高气温",
        WEEK,
        [11, 11, 15, 13, 12, 13, 10],
        mark_point=["max", "min"],
        mark_line=["average"],
    )
    line.add(
        "最低气温",
        WEEK,
        [1, -2, 2, 5, 3, 2, 0],
        mark_point=["max", "min"],
        mark_line=["average"],
        legend_pos="20%",
    )
    v1 = [5, 20, 36, 10, 75, 90]
    v2 = [10, 25, 8, 60, 20, 80]
    scatter = Scatter("散点图示例", title_top="50%", title_pos="65%")
    scatter.add("scatter", v1, v2, legend_top="50%", legend_pos="80%")
    es = EffectScatter("动态散点图示例", title_top="50%")
    es.add(
        "es",
        [11, 11, 15, 13, 12, 13, 10],
        [1, -2, 2, 5, 3, 2, 0],
        effect_scale=6,
        legend_top="50%",
        legend_pos="20%",
    )

    grid = Grid()
    grid.add(bar, grid_bottom="60%", grid_left="60%")
    grid.add(line, grid_bottom="60%", grid_right="60%")
    grid.add(scatter, grid_top="60%", grid_left="60%")
    grid.add(es, grid_top="60%", grid_right="60%")

    # Timeline
    bar_1 = Bar("2012 年销量", "数据纯属虚构")
    bar_1.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_1.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_1.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_1.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)])

    bar_2 = Bar("2013 年销量", "数据纯属虚构")
    bar_2.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_2.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_2.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_2.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)])

    bar_3 = Bar("2014 年销量", "数据纯属虚构")
    bar_3.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_3.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_3.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_3.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)])

    bar_4 = Bar("2015 年销量", "数据纯属虚构")
    bar_4.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_4.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_4.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_4.add("冬季", CLOTHES, [randint(10, 100) for _ in range(6)])

    bar_5 = Bar("2016 年销量", "数据纯属虚构", height=720, width=1200)
    bar_5.add("春季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_5.add("夏季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_5.add("秋季", CLOTHES, [randint(10, 100) for _ in range(6)])
    bar_5.add(
        "冬季",
        CLOTHES,
        [randint(10, 100) for _ in range(6)],
        is_legend_show=True,
    )

    timeline = Timeline(is_auto_play=True, timeline_bottom=0)
    timeline.add(bar_1, "2012 年")
    timeline.add(bar_2, "2013 年")
    timeline.add(bar_3, "2014 年")
    timeline.add(bar_4, "2015 年")
    timeline.add(bar_5, "2016 年")

    # Overlap
    attr = ["{}月".format(i) for i in range(1, 13)]
    v1 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3]
    v2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3]
    v3 = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2]

    bar = Bar(height=720, width=1200)
    bar.add("蒸发量", attr, v1)
    bar.add("降水量", attr, v2, yaxis_formatter=" ml", yaxis_max=250)
    line = Line()
    line.add("平均温度", attr, v3, yaxis_formatter=" °C")

    overlap = Overlap()
    overlap.add(bar)
    overlap.add(line, yaxis_index=1, is_add_yaxis=True)

    page = Page()
    page.add(grid)
    page.add(timeline)
    page.add(overlap)
    page.render()
citys = ['beijing', 'shanghai', 'guangzhou', 'shenzhen']
v = []
for i in range(4):
    filename = 'air_' + citys[i] + '_2018.csv'
    df = pd.read_csv(filename, header=None, names=["Date", "Quality_grade", "AQI", "AQI_rank", "PM"])

    dom = df[['Date', 'PM']]
    list1 = []
    for j in dom['Date']:
        time = j.split('-')[1]
        list1.append(time)
    df['month'] = list1

    month_message = df.groupby(['month'])
    month_com = month_message['PM'].agg(['mean'])
    month_com.reset_index(inplace=True)
    month_com_last = month_com.sort_index()

    v1 = np.array(month_com_last['mean'])
    v1 = ["{}".format(int(i)) for i in v1]
    v.append(v1)

attr = ["{}".format(str(i) + '月') for i in range(1, 12)]

line = Line("2018年北上广深PM2.5全年走势图", title_pos='center', title_top='0', width=800, height=400)
line.add("北京", attr, v[0], line_color='red', legend_top='8%')
line.add("上海", attr, v[1], line_color='purple', legend_top='8%')
line.add("广州", attr, v[2], line_color='blue', legend_top='8%')
line.add("深圳", attr, v[3], line_color='orange', legend_top='8%')
line.render("2018年北上广深PM2.5全年走势图.html")
示例#14
0
# 定义k线图的提示框的显示函数
def show_kline_data(params, pos):
    param = params[0]

    if param.data[4]:
        return "date = " + param.name + "<br/>" + "open = " + param.data[
            1] + "<br/>" + "close = " + param.data[
                2] + "<br/>" + "high = " + param.data[
                    3] + "<br/>" + "low = " + param.data[4] + "<br/> "
    else:
        return "date = " + param.name + "<br/>" + "cci = " + param.value + "<br/>"


# 绘制cci
cci_line = Line()
cci_line.add(
    "cci",
    x_axis=data['date'],
    y_axis=cci,
    is_datazoom_show=True,
    datazoom_xaxis_index=[0, 1],
    tooltip_tragger='axis',
    is_toolbox_show=True,
    yaxis_force_interval=100,
    legend_top="70%",
    legend_orient='vertical',
    legend_pos='right',
    yaxis_pos='left',
    is_xaxislabel_align=True,
    tooltip_formatter=show_kline_data,
示例#15
0
def score_draw(csv_file):
    page = Page(csv_file+":评论等级分析")
    score, date, val, score_list = [], [], [], []
    result = {}
    path = os.path.abspath(os.curdir)
    csv_file = path + "\\" + csv_file + ".csv"
    csv_file = csv_file.replace('\\', '\\\\')
    d = pd.read_csv(csv_file, engine='python', encoding='utf-8')[['score', 'date']].dropna()  # 读取CSV转为dataframe格式,并丢弃评论为空的记录
    for indexs in d.index:  # 一种遍历df行的方法(下面还有第二种,iterrows)
        score_list.append(tuple(d.loc[indexs].values[:])) # 目前只找到转换为tuple然后统计相同元素个数的方法
    print("有效评分总数量为:",len(score_list), " 条")
    for i in set(list(score_list)):
        result[i] = score_list.count(i)  # dict类型
    info = []
    for key in result:
        score= key[0]
        date = key[1]
        val = result[key]
        info.append([score, date, val])
    info_new = DataFrame(info)  # 将字典转换成为数据框
    info_new.columns = ['score', 'date', 'votes']
    info_new.sort_values('date', inplace=True)    # 按日期升序排列df,便于找最早date和最晚data,方便后面插值
    print("first df", info_new)
    # 以下代码用于插入空缺的数据,每个日期的评分类型应该有5中,依次遍历判断是否存在,若不存在则往新的df中插入新数值
    mark = 0
    creat_df = pd.DataFrame(columns = ['score', 'date', 'votes']) # 创建空的dataframe
    for i in list(info_new['date']):
        location = info_new[(info_new.date==i)&(info_new.score=="力荐")].index.tolist()
        if location == []:
            creat_df.loc[mark] = ["力荐", i, 0]
            mark += 1
        location = info_new[(info_new.date==i)&(info_new.score=="推荐")].index.tolist()
        if location == []:
            creat_df.loc[mark] = ["推荐", i, 0]
            mark += 1
        location = info_new[(info_new.date==i)&(info_new.score=="还行")].index.tolist()
        if location == []:
            creat_df.loc[mark] = ["还行", i, 0]
            mark += 1
        location = info_new[(info_new.date==i)&(info_new.score=="较差")].index.tolist()
        if location == []:
            creat_df.loc[mark] = ["较差", i, 0]
            mark += 1
        location = info_new[(info_new.date==i)&(info_new.score=="很差")].index.tolist()
        if location == []:
            creat_df.loc[mark] = ["很差", i, 0]
            mark += 1
    info_new = info_new.append(creat_df.drop_duplicates(), ignore_index=True)
    score_list = []
    info_new.sort_values('date', inplace=True)    # 按日期升序排列df,便于找最早date和最晚data,方便后面插值
    print(info_new)
    for index, row in info_new.iterrows():   # 第二种遍历df的方法
        score_list.append([row['date'], row['votes'], row['score']])
    tr = ThemeRiver()
    tr.add(['力荐', '推荐', '还行', '较差', '很差'], score_list, is_label_show=True, is_more_utils=True)
    page.add_chart(tr)

    attr, v1, v2, v3, v4, v5 = [], [], [], [], [], []
    attr = list(sorted(set(info_new['date'])))
    bar = Bar()
    for i in attr:
        v1.append(int(info_new[(info_new['date']==i)&(info_new['score']=="力荐")]['votes']))
        v2.append(int(info_new[(info_new['date']==i)&(info_new['score']=="推荐")]['votes']))
        v3.append(int(info_new[(info_new['date']==i)&(info_new['score']=="还行")]['votes']))
        v4.append(int(info_new[(info_new['date']==i)&(info_new['score']=="较差")]['votes']))
        v5.append(int(info_new[(info_new['date']==i)&(info_new['score']=="很差")]['votes']))
    bar.add("力荐", attr, v1, is_stack=True)
    bar.add("推荐", attr, v2, is_stack=True)
    bar.add("还行", attr, v3, is_stack=True)
    bar.add("较差", attr, v4, is_stack=True)
    bar.add("很差", attr, v5, is_stack=True, is_convert=True, mark_line=["average"], is_more_utils=True)
    page.add_chart(bar)

    line = Line()
    line.add("力荐", attr, v1, is_stack=True)
    line.add("推荐", attr, v2, is_stack=True)
    line.add("还行", attr, v3, is_stack=True)
    line.add("较差", attr, v4, is_stack=True)
    line.add("很差", attr, v5, is_stack=True, is_convert=False, mark_line=["average"], is_more_utils=True)
    page.add_chart(line)

    page.render(csv_file[:-4] + "_日投票量分析汇总.html")
示例#16
0
from pyecharts import Line, Page,Gauge
line = Line("本周气温")
attr = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]
line.add(
    "最高气温",
    attr,
    [11, 11, 15, 13, 12, 13, 10],
    mark_point=["max", "min"],
    mark_line=["average"],
)
line.add(
    "最低气温",
    attr,
    [1, -2, 2, 5, 3, 2, 0],
    mark_point=["max", "min"],
    mark_line=["average"],
    legend_pos="20%",
)
gauge = Gauge("湿度计量表")
gauge.add(
    "最佳湿度",
    "实际湿度",
    66.66,
    angle_range=[180, 0],
    scale_range=[0, 100],
    is_legend_show=True,
)
page = Page()
page.add(line)
page.add(gauge)
page.render()
示例#17
0
def interface_report():
    try:
        Bars = []
        searchs = []
        db_project = db_op.project_list
        db_idcs = db_idc.idc_id
        db_servers = db_idc.idc_servers
        db_project_third = db_op.project_third
        db_third = db_idc.third_resource
        Infos = defaultdict()
        values = OrderedDict()
        dt = datetime.datetime.now()
        #获取域名列表
        hosts = RC_CLUSTER.smembers(
            'api_domain_lists_%s' %
            time.strftime('%Y-%m-%d', time.localtime()))
        hosts = [host for host in hosts]
        if hosts:
            searchs = json.dumps([{
                "id": str(i),
                "text": str(host)
            } for i, host in enumerate(hosts)])
        #获取域名和接口信息
        if 'business_bigdata_select_host' in request.cookies and 'business_bigdata_select_uri' in request.cookies:
            host = request.cookies['business_bigdata_select_host']
            uri = urllib.parse.unquote(
                request.cookies['business_bigdata_select_uri'])
            #获取接口相关的资产信息
            projects = db_project.query.with_entities(
                db_project.ip, db_project.ssh_port,
                db_project.resource).filter(
                    db_project.domain.like('%{0},%'.format(host))).all()
            if projects:
                idc_lists = db_idcs.query.with_entities(
                    db_idcs.id, db_idcs.aid).all()
                idc_lists = {idcs[0]: idcs[-1] for idcs in idc_lists}
                third_lists = db_third.query.with_entities(
                    db_third.id, db_third.resource_type).all()
                third_lists = {third[0]: third[-1] for third in third_lists}
                ips = [val[0] for val in projects]
                resource = projects[0][-1]
                idc_ids = db_servers.query.with_entities(
                    db_servers.idc_id, func.count(db_servers.idc_id)).filter(
                        db_servers.ip.in_(tuple(ips))).group_by(
                            db_servers.idc_id).all()
                if idc_ids:
                    idc_ids = [list(ids) for ids in idc_ids]
                    for ids in idc_ids:
                        ids[0] = idc_lists[ids[0]]
                    RC.delete('idc_incrs')
                    for ids in idc_ids:
                        RC.hincrby('idc_incrs', ids[0], ids[1])
                    idc_ids = RC.hgetall('idc_incrs')
                    idc_attr = [ids for ids in idc_ids]
                    idc_vals = [int(idc_ids[ids]) for ids in idc_ids]
                    pie = Pie("%s台%s服务器机房分布" % (len(set(projects)), resource),
                              width='100%',
                              height='270px',
                              title_pos='center',
                              title_text_size=14)
                    pie.add("",
                            idc_attr,
                            idc_vals,
                            is_label_show=True,
                            is_toolbox_show=False,
                            is_legend_show=False,
                            legend_orient='vertical',
                            legend_pos='left',
                            radius=[0, 65],
                            is_random=True)
                    Infos['pie'] = pie
            project = db_project.query.with_entities(
                db_project.project).filter(
                    db_project.domain.like('%{0},%'.format(host))).all()
            #获取接口相关的应用服务资源
            if project:
                project = project[0][0]
                third_vals = db_project_third.query.with_entities(
                    db_project_third.third_id,
                    func.count(db_project_third.third_id)).filter(
                        db_project_third.project == project).group_by(
                            db_project_third.third_id).all()
                if third_vals:
                    third_vals = [list(val) for val in third_vals]
                    for val in third_vals:
                        val[0] = third_lists[val[0]]
                    RC.delete('third_incrs')
                    for val in third_vals:
                        RC.hincrby('third_incrs', val[0], val[1])
                    third_vals = RC.hgetall('third_incrs')
                    third_vals[resource] = len(projects)
                    third_attr = [val for val in third_vals]
                    third_counts = [int(third_vals[val]) for val in third_vals]
                    pie_third = Pie("应用服务资源分布",
                                    width='100%',
                                    height='270px',
                                    title_pos='center',
                                    title_text_size=14)
                    pie_third.add(
                        "",
                        third_attr,
                        third_counts,
                        is_label_show=True,
                        is_toolbox_show=False,
                        legend_orient='vertical',
                        legend_pos='right',
                        radius=[55, 65],
                        is_random=True,
                        is_legend_show=False,
                    )
                    Infos['pie_third'] = pie_third
            #接口性能数据获取
            year = time.strftime('%Y', time.localtime())
            for i in range(2, 9):
                ot = dt - datetime.timedelta(days=i - 1)
                nt = dt - datetime.timedelta(days=i - 2)
                ot = ot.strftime('%Y-%m-%dT00:00:00Z')
                nt = nt.strftime('%Y-%m-%dT00:00:00Z')
                try:
                    pv_sum = 0.0
                    cmd = 'select sum(mean_pv) from ' + 'analysis%s' % year + " where time >='%s' and time < '%s' and host='%s' and uri = '%s';" % (
                        ot, nt, host, uri)
                    result = Influx_cli.query(cmd)
                    if result:
                        for infos in result.get_points():
                            infos = infos
                        if infos:
                            pv_sum = infos['sum']
                except Exception as e:
                    logging.error(e)
                try:
                    values['avg_resp'] = 0.0
                    cmd = 'select mean(mean_avg_resp) from ' + 'analysis%s' % year + " where time >='%s' and time < '%s'and host='%s' and uri = '%s';" % (
                        ot, nt, host, uri)
                    result = Influx_cli.query(cmd)
                    if result:
                        for infos in result.get_points():
                            infos = infos
                        if infos:
                            values['avg_resp:%s' %
                                   ot.split('T')[0]] = '%.2f' % infos['mean']
                except Exception as e:
                    logging.error(e)
                try:
                    s4 = 0.0
                    cmd = 'select sum(mean_status_4xx) from ' + 'analysis%s' % year + " where time >='%s' and time < '%s' and host='%s' and uri = '%s';" % (
                        ot, nt, host, uri)
                    result = Influx_cli.query(cmd)
                    if result:
                        for infos in result.get_points():
                            infos = infos
                        if infos:
                            s4 = infos['sum']
                    s5 = 0.0
                    cmd = 'select sum(mean_status_5xx) from ' + 'analysis%s' % year + " where time >='%s' and time < '%s' and host='%s' and uri = '%s';" % (
                        ot, nt, host, uri)
                    result = Influx_cli.query(cmd)
                    if result:
                        for infos in result.get_points():
                            infos = infos
                        if infos:
                            s5 = infos['sum']
                    if pv_sum > 0:
                        values['status_sum:%s' % ot.split('T')[0]] = float(
                            '%.2f' % (float(s4 + s5) / pv_sum * 100))
                except Exception as e:
                    logging.error(e)
                values['pv_sum:%s' % ot.split('T')[0]] = int(pv_sum) * 60
            #接口性能展示
            if values:
                #pv总数
                bar_pv = Bar("PV访问量",
                             width='105%',
                             height='270px',
                             title_pos='center',
                             title_text_size=14)
                vals = [(val.split(':')[-1], values[val]) for val in values
                        if 'pv_sum:' in val]
                if vals:
                    bar_vals = [val[0] for val in vals]
                    bar_counts = [val[-1] for val in vals]
                    bar_pv.add('',
                               bar_vals,
                               bar_counts,
                               is_label_show=True,
                               is_toolbox_show=False,
                               legend_orient='vertical',
                               legend_pos='right',
                               xaxis_interval=0,
                               is_random=True)
                    Bars.append(bar_pv)
                #平均响应时间
                line_resp = Line("平均响应时间",
                                 width='105%',
                                 height='270px',
                                 title_pos='center',
                                 title_text_size=14)
                vals = [(val.split(':')[-1], values[val]) for val in values
                        if 'avg_resp:' in val]
                if vals:
                    line_vals = [val[0] for val in vals]
                    line_counts = [val[-1] for val in vals]
                    line_resp.add('',
                                  line_vals,
                                  line_counts,
                                  is_label_show=True,
                                  is_toolbox_show=False,
                                  legend_orient='vertical',
                                  legend_pos='right',
                                  xaxis_interval=0,
                                  yaxis_formatter='s',
                                  is_random=True,
                                  is_smooth=False,
                                  mark_point=["min", "max"])
                    Bars.append(line_resp)
                #错误状态码占比
                line_status = Line("错误码占比",
                                   width='105%',
                                   height='270px',
                                   title_pos='center',
                                   title_text_size=14)
                vals = [(val.split(':')[-1], values[val]) for val in values
                        if 'status_sum:' in val]
                if vals:
                    line_vals = [val[0] for val in vals]
                    line_counts = [val[-1] for val in vals]
                    line_status.add('',
                                    line_vals,
                                    line_counts,
                                    is_fill=True,
                                    is_label_show=True,
                                    is_smooth=False,
                                    is_toolbox_show=False,
                                    legend_orient='vertical',
                                    legend_pos='right',
                                    xaxis_interval=0,
                                    yaxis_formatter='%',
                                    is_random=True,
                                    mark_line=["max", "min"])
                    Bars.append(line_status)
    except Exception as e:
        logging.error(e)
    return render_template('interface_report.html',
                           Bars=Bars,
                           Infos=Infos,
                           searchs=searchs)
示例#18
0
def draw():
    # 热力图
    tomato_com = pd.read_csv('西虹市首富.csv')
    grouped = tomato_com.groupby(['city'])
    grouped_pct = grouped['score']
    # tip_pct列
    city_com = grouped_pct.agg(['mean', 'count'])
    city_com.reset_index(inplace=True)
    city_com['mean'] = round(city_com['mean'], 2)
    # data = [(city_com['city'][i], city_com['count'][i])
    #         for i in range(0, city_com.shape[0])]
    # geo = Geo(
    #     '《西虹市首富》全国热力图',
    #     title_color="#fff",
    #     title_pos="center",
    #     width=1200,
    #     height=600,
    #     background_color='#404a59')
    # attr, value = geo.cast(data)
    # geo.add(
    #     "",
    #     attr,
    #     value,
    #     type="heatmap",
    #     visual_range=[0, 200],
    #     visual_text_color="#fff",
    #     symbol_size=10,
    #     is_visualmap=True,
    #     is_roam=False)
    # geo.render('西虹市首富全国热力图.html')

    # 折线和柱状图
    city_main = city_com.sort_values('count', ascending=False)[0:20]
    attr = city_main['city']
    v1 = city_main['count']
    v2 = city_main['mean']
    line = Line("主要城市评分")
    line.add(
        "城市",
        attr,
        v2,
        is_stack=True,
        xaxis_rotate=30,
        yaxis_min=4.2,
        mark_point=['min', 'max'],
        xaxis_interval=0,
        line_color='lightblue',
        line_width=4,
        mark_point_textcolor='black',
        mark_point_color='lightblue',
        is_splitline_show=False)

    bar = Bar("主要城市评论数")
    bar.add(
        "城市",
        attr,
        v1,
        is_stack=True,
        xaxis_rotate=30,
        yaxis_min=4.2,
        xaxis_interval=0,
        is_splitline_show=False)

    overlap = Overlap()
    # 默认不新增 x y 轴,并且 x y 轴的索引都为 0
    overlap.add(bar)
    overlap.add(line, yaxis_index=1, is_add_yaxis=True)
    overlap.render('主要城市评论数_平均分.html')
示例#19
0
def test_more_charts():
    page = Page()

    # line
    line = Line("折线图示例")
    line.add(
        "最高气温",
        WEEK,
        [11, 11, 15, 13, 12, 13, 10],
        mark_point=["max", "min"],
        mark_line=["average"],
    )
    line.add(
        "最低气温",
        WEEK,
        [1, -2, 2, 5, 3, 2, 0],
        mark_point=["max", "min"],
        mark_line=["average"],
    )

    # pie
    v1 = [11, 12, 13, 10, 10, 10]
    pie = Pie("饼图-圆环图示例", title_pos="center")
    pie.add(
        "",
        CLOTHES,
        v1,
        radius=[40, 75],
        label_text_color=None,
        is_label_show=True,
        legend_orient="vertical",
        legend_pos="left",
    )

    page.add([line, pie])

    # kline
    v1 = [
        [2320.26, 2320.26, 2287.3, 2362.94],
        [2300, 2291.3, 2288.26, 2308.38],
        [2295.35, 2346.5, 2295.35, 2345.92],
        [2347.22, 2358.98, 2337.35, 2363.8],
        [2360.75, 2382.48, 2347.89, 2383.76],
        [2383.43, 2385.42, 2371.23, 2391.82],
        [2377.41, 2419.02, 2369.57, 2421.15],
        [2425.92, 2428.15, 2417.58, 2440.38],
        [2411, 2433.13, 2403.3, 2437.42],
        [2432.68, 2334.48, 2427.7, 2441.73],
        [2430.69, 2418.53, 2394.22, 2433.89],
        [2416.62, 2432.4, 2414.4, 2443.03],
        [2441.91, 2421.56, 2418.43, 2444.8],
        [2420.26, 2382.91, 2373.53, 2427.07],
        [2383.49, 2397.18, 2370.61, 2397.94],
        [2378.82, 2325.95, 2309.17, 2378.82],
        [2322.94, 2314.16, 2308.76, 2330.88],
        [2320.62, 2325.82, 2315.01, 2338.78],
        [2313.74, 2293.34, 2289.89, 2340.71],
        [2297.77, 2313.22, 2292.03, 2324.63],
        [2322.32, 2365.59, 2308.92, 2366.16],
        [2364.54, 2359.51, 2330.86, 2369.65],
        [2332.08, 2273.4, 2259.25, 2333.54],
        [2274.81, 2326.31, 2270.1, 2328.14],
        [2333.61, 2347.18, 2321.6, 2351.44],
        [2340.44, 2324.29, 2304.27, 2352.02],
        [2326.42, 2318.61, 2314.59, 2333.67],
        [2314.68, 2310.59, 2296.58, 2320.96],
        [2309.16, 2286.6, 2264.83, 2333.29],
        [2282.17, 2263.97, 2253.25, 2286.33],
        [2255.77, 2270.28, 2253.31, 2276.22],
    ]
    kline = Kline("K 线图示例")
    kline.add(
        "日K",
        ["2017/7/{}".format(i + 1) for i in range(31)],
        v1,
        is_datazoom_show=True,
    )
    page.add(kline)

    # radar
    schema = [
        ("销售", 6500),
        ("管理", 16000),
        ("信息技术", 30000),
        ("客服", 38000),
        ("研发", 52000),
        ("市场", 25000),
    ]
    v1 = [[4300, 10000, 28000, 35000, 50000, 19000]]
    v2 = [[5000, 14000, 28000, 31000, 42000, 21000]]
    radar = Radar("雷达图示例")
    radar.config(schema)
    radar.add("预算分配", v1, is_splitline=True, is_axisline_show=True)
    radar.add(
        "实际开销",
        v2,
        label_color=["#4e79a7"],
        is_area_show=False,
        legend_selectedmode="single",
    )
    page.add(radar)

    # scatter3d
    data = [[
        random.randint(0, 100),
        random.randint(0, 100),
        random.randint(0, 100),
    ] for _ in range(80)]
    scatter3D = Scatter3D("3D 散点图示例", width=1200, height=600)
    scatter3D.add("", data, is_visualmap=True, visual_range_color=RANGE_COLOR)
    page.add(scatter3D)

    # wordcloud
    name = [
        "Sam S Club",
        "Macys",
        "Amy Schumer",
        "Jurassic World",
        "Charter Communications",
        "Chick Fil A",
        "Planet Fitness",
        "Pitch Perfect",
        "Express",
        "Home",
        "Johnny Depp",
        "Lena Dunham",
        "Lewis Hamilton",
        "KXAN",
        "Mary Ellen Mark",
        "Farrah Abraham",
        "Rita Ora",
        "Serena Williams",
        "NCAA baseball tournament",
        "Point Break",
    ]
    value = [
        10000,
        6181,
        4386,
        4055,
        2467,
        2244,
        1898,
        1484,
        1112,
        965,
        847,
        582,
        555,
        550,
        462,
        366,
        360,
        282,
        273,
        265,
    ]
    wordcloud = WordCloud(width=1300, height=620)
    wordcloud.add("", name, value, word_size_range=[30, 100], rotate_step=66)
    page.add(wordcloud)

    # liquid
    liquid = Liquid("水球图示例")
    liquid.add("Liquid", [0.6])
    page.add(liquid)
    assert len(page) == 7
    assert isinstance(page[0], Line)
    assert ("echarts" in page.js_dependencies) or ("echarts.min"
                                                   in page.js_dependencies)
    page.render()
示例#20
0
def create_charts():
    page = Page()

    style = Style(width=WIDTH, height=HEIGHT)
    df = pd.read_csv('./data_cleaned.csv')
    df['CREATE_TIME'] = pd.to_datetime(df['CREATE_TIME'])
    df['MONTH'] = 0
    months = []
    for i in df.CREATE_TIME:
        month = i.strftime("%Y-%m")
        months.append(month)
    df.MONTH = months

    EVENT_SRC_NAME = df.EVENT_SRC_NAME.value_counts()
    chart = Bar("投诉渠道种类", **style.init_style)
    chart.add("",
              EVENT_SRC_NAME.index,
              EVENT_SRC_NAME.values,
              mark_point=["max", "min"],
              mark_line=["average"],
              is_stack=True)
    page.add(chart)

    chart = Timeline(is_auto_play=True,
                     timeline_bottom=0,
                     width=WIDTH,
                     height=HEIGHT)
    for month, group in df.groupby('MONTH'):
        EVENT_SRC_NAME = group.EVENT_SRC_NAME.value_counts()
        chart_1 = Bar("投诉渠道事件数", **style.init_style)
        chart_1.add("",
                    EVENT_SRC_NAME.index,
                    EVENT_SRC_NAME.values,
                    mark_point=["max", "min"],
                    mark_line=["average"],
                    is_stack=True)
        chart.add(chart_1, month)
    page.add(chart)

    chart = Timeline(is_auto_play=True,
                     timeline_bottom=0,
                     width=WIDTH,
                     height=HEIGHT)
    for name, c in df.groupby('EVENT_SRC_NAME'):
        month_count = defaultdict(int)
        for month, group in c.groupby('MONTH'):
            month_count[month] = len(group)
        m_s = sorted(list(month_count.keys()))
        m_l = [month_count[i] for i in m_s]
        chart_1 = Line("各月份投诉渠道数", **style.init_style)
        chart_1.add(
            "事件数",
            m_s,
            m_l,
            mark_point=["max", "min"],
            is_more_utils=True,
            mark_line=["average"],
            is_smooth=True,
        )
        chart.add(chart_1, name)
    page.add(chart)

    return page
import numpy as np
import pandas as pd
from pyecharts import Line

df = pd.read_csv('air_tianjin_2017.csv',
                 header=None,
                 names=["Date", "Quality_grade", "AQI", "AQI_rank", "PM"])

dom = df[['Date', 'PM']]
list1 = []
for j in dom['Date']:
    time = j.split('-')[1]
    list1.append(time)
df['month'] = list1

month_message = df.groupby(['month'])
month_com = month_message['PM'].agg(['mean'])
month_com.reset_index(inplace=True)
month_com_last = month_com.sort_index()

attr = ["{}".format(str(i) + '月') for i in range(1, 13)]
v1 = np.array(month_com_last['mean'])
v1 = ["{}".format(int(i)) for i in v1]

line = Line("2017年天津月均PM2.5走势图",
            title_pos='center',
            title_top='18',
            width=800,
            height=400)
line.add("", attr, v1, mark_point=["max", "min"])
line.render("2017年天津月均PM2.5走势图.html")
示例#22
0
from pyecharts import Line

line = Line("折线图--面积实例", 'hello')

#data
attr = ["{}".format(n) for n in range(1, 7)]
v1 = [5, 20, 36, 10, 75, 90]
v2 = [10, 25, 8, 60, 20, 80]

#                           填充                    不透明性                    填充颜色       节点标注
line.add("one",
         attr,
         v1,
         is_fill=True,
         line_opacity=0.2,
         area_opacity=0.4,
         area_color='yellow',
         is_label_show=True)
#平滑曲线
line.add("two",
         attr,
         v2,
         is_fill=True,
         line_opacity=0.8,
         area_opacity=0.2,
         area_color='green',
         is_smooth=True)

line.render(r'C:\Users\Administrator\Desktop\数据可视化\pyecharts\html\12.html')
示例#23
0
import numpy as np

data = pd.read_csv(r'E:\vscode_code\GitHub项目\爬虫+数据分析\猫眼\maoyan.csv',
                   encoding='utf-8')
#print(data['star'].mean())
data['year'] = data['pub_time'].str.split('-').str[1]
data['month'] = data['pub_time'].str.split('-').str[1]
#print(data.head())
year = data.groupby('year')['year'].count()
month = data.groupby('month')['month'].count()

from pyecharts import Line

attr = list(year.index)
v = list(year)
line = Line("电影年份分布情况")
line.add("", attr, v, mark_point=["average"])
line.render(r'E:\vscode_code\GitHub项目\爬虫+数据分析\猫眼\电影年份分布情况.html')

from pyecharts import Bar

attr = list(month.index)
v = list(month)
bar = Bar("")
bar.add("", attr, v)
bar.render(r'E:\vscode_code\GitHub项目\爬虫+数据分析\猫眼\电影月份分布情况.html')


def get_country(i):
    country = i.split('(')
    if len(country) == 1:
示例#24
0
    def createReport(self, dev):
        lisMem = pick.readInfo(AppPerCon.info_path + self.dev + '_' +
                               self.pack + '_' + self.flag + "_mem.pickle")
        lisCpu = pick.readInfo(AppPerCon.info_path + self.dev + '_' +
                               self.pack + '_' + self.flag + "_cpu.pickle")
        lisFps = pick.readInfo(AppPerCon.info_path + self.dev + '_' +
                               self.pack + '_' + self.flag + "_fps.pickle")
        lisDevinfo = pick.readInfo(AppPerCon.info_path + "info.pickle")

        print("lisDevinfo: {}, dev: {}".format(lisDevinfo, dev))
        # pix = lisDevinfo[0][dev]['header']['pix']
        # net = lisDevinfo[0][dev]['header']['net']
        # name = lisDevinfo[0][dev]['header']['phone_name']
        # rom = lisDevinfo[0][dev]['header']['rom']

        # devinfo = "设备信息-分辨率:" + pix + "\\"\
        #                               +"网络:" + net + "\\"\
        #                               +"设备名:"+ name + "\\"\
        #                               +"内存容量:"+ str(rom)+"MB"

        v1 = [i for i in lisCpu if type(i) == str]
        v2 = [i for i in lisCpu if type(i) != str]
        v3 = [i for i in lisMem if type(i) == str]
        v4 = [i for i in lisMem if type(i) != str]
        v5 = [i for i in lisFps if type(i) == str]
        v6 = [i for i in lisFps if type(i) != str]

        page = Page(self.reportName)

        attr = v1
        bar = Bar()
        bar.add("ROKI_bar", attr, v2)
        line = Line(self.reportName + "-" + "CPU占用", [],
                    width=1200,
                    height=400)
        # line.add("ROKI_line", attr, v2, is_stack=True, is_label_show=True,
        #          is_smooth=False ,is_more_utils =True,is_datazoom_show=False, yaxis_formatter="%",
        #          mark_point=["max", "min"], mark_line=["average"])

        overlap = Overlap(self.reportName + "-" + "CPU占用",
                          width=1200,
                          height=400)
        # overlap.add(line)
        overlap.add(bar)
        page.add(overlap)

        attr1 = v3
        line1 = Line(self.reportName + "-" + "MEM消耗", width=1200, height=400)
        line1.add("ROKI_line",
                  attr1,
                  v4,
                  is_stack=True,
                  is_label_show=True,
                  is_smooth=False,
                  is_more_utils=True,
                  is_datazoom_show=False,
                  yaxis_formatter="MB",
                  mark_point=["max", "min"],
                  mark_line=["average"])
        bar1 = Bar()
        bar1.add("ROKI_bar", attr1, v4)
        overlap1 = Overlap(width=1200, height=400)
        overlap1.add(line1)
        overlap1.add(bar1)
        page.add(overlap1)

        attr2 = v5
        line2 = Line(self.reportName + "-" + "FPS帧率", width=1200, height=400)
        line2.add("ROKI_line",
                  attr2,
                  v6,
                  is_stack=True,
                  is_label_show=True,
                  is_smooth=False,
                  is_more_utils=True,
                  is_datazoom_show=False,
                  yaxis_formatter="fps",
                  mark_point=["max", "min"],
                  mark_line=["average"])
        bar2 = Bar()
        bar2.add("ROKI_bar", attr2, v6)
        overlap2 = Overlap(width=1200, height=400)
        overlap2.add(line2)
        overlap2.add(bar2)
        page.add(overlap2)

        page.render(AppPerCon.report_path + self.dev + "_" + self.pack + "_" +
                    self.flag + "_" + "report.html")
示例#25
0
df1 = pd.read_csv(fn)
df1['datetime'] = df1['date'] + ' ' + df1['time']

#print(df1.head())
dt_list =  list(df1['datetime'])
#print(dt_list)
k_plot_value = df1.apply(lambda record: [record['open'], record['close'], record['low'], record['high']], axis=1).tolist()
#print(k_plot_value)

kline = Kline("K 线图示例")
kline.add("日K", dt_list, k_plot_value, is_datazoom_show=True, )
#kline.render()


df_p = pd.read_csv('points1.csv')
line = Line()
line.add('test',df_p.datetime,df_p.value, line_type='dashed')
# #line.render()

#2019-08-14,21:04:00,5546
#2019-08-14,22:22:00,5530
#2019-08-14,22:50:00,5536.

overlap = Overlap()
overlap.add(kline)
overlap.add(line)
overlap.render()

# if __name__ == '__main__':
#     pass
示例#26
0
# Line + Kline
from pyecharts import Grid, Line, Kline

attr_2 = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]
v3 = [11, 11, 15, 13, 12, 13, 10]
v4 = [1, -2, 2, 5, 3, 2, 0]
line = Line('折线图示例')
line.add('最高气温', attr_2, v3, mark_point=['max', 'min'], mark_line=['average'])
line.add('最低气温',
         attr_2,
         v4,
         mark_point=['max', 'min'],
         mark_line=['average'],
         legend_pos='20%')

v1 = [
    [2320.26, 2320.26, 2287.3, 2362.94],
    [2300, 2291.3, 2288.26, 2308.38],
    [2295.35, 2346.5, 2295.35, 2345.92],
    [2347.22, 2358.98, 2337.35, 2363.8],
    [2360.75, 2382.48, 2347.89, 2383.76],
    [2383.43, 2385.42, 2371.23, 2391.82],
    [2377.41, 2419.02, 2369.57, 2421.15],
    [2425.92, 2428.15, 2417.58, 2440.38],
    [2411, 2433.13, 2403.3, 2437.42],
    [2432.68, 2334.48, 2427.7, 2441.73],
    [2430.69, 2418.53, 2394.22, 2433.89],
    [2416.62, 2432.4, 2414.4, 2443.03],
    [2441.91, 2421.56, 2418.43, 2444.8],
    [2420.26, 2382.91, 2373.53, 2427.07],
    [2383.49, 2397.18, 2370.61, 2397.94],
示例#27
0
def overlap_charts():
    page = Page()

    chart_init = {
        "width": WIDTH,
        "height": HEIGHT,
    }

    attr = ['A', 'B', 'C', 'D', 'E', 'F']
    v1 = [10, 20, 30, 40, 50, 60]
    v2 = [38, 28, 58, 48, 78, 68]
    bar = Bar("折线图-柱状图叠加", **chart_init)
    bar.add("bar", attr, v1)
    line = Line()
    line.add("line", attr, v2)
    chart = Overlap()
    chart.add(bar)
    chart.add(line)
    page.add(chart)

    v1 = [10, 20, 30, 40, 50, 60]
    v2 = [30, 30, 30, 30, 30, 30]
    v3 = [50, 50, 50, 50, 50, 50]
    v4 = [10, 10, 10, 10, 10, 10]
    es = EffectScatter("散点图-动态散点图叠加", **chart_init)
    es.add("es", v1, v2)
    scatter = Scatter()
    scatter.add("scatter", v1, v3)
    es_1 = EffectScatter()
    es_1.add("es_1", v1, v4, symbol='pin', effect_scale=5)
    chart = Overlap()
    chart.add(es)
    chart.add(scatter)
    chart.add(es_1)
    page.add(chart)

    v1 = [[2320.26, 2320.26, 2287.3, 2362.94],
          [2300, 2291.3, 2288.26, 2308.38],
          [2295.35, 2346.5, 2295.35, 2345.92],
          [2347.22, 2358.98, 2337.35, 2363.8],
          [2360.75, 2382.48, 2347.89, 2383.76],
          [2383.43, 2385.42, 2371.23, 2391.82],
          [2377.41, 2419.02, 2369.57, 2421.15],
          [2425.92, 2428.15, 2417.58, 2440.38],
          [2411, 2433.13, 2403.3, 2437.42],
          [2432.68, 2334.48, 2427.7, 2441.73],
          [2430.69, 2418.53, 2394.22, 2433.89],
          [2416.62, 2432.4, 2414.4, 2443.03],
          [2441.91, 2421.56, 2418.43, 2444.8],
          [2420.26, 2382.91, 2373.53, 2427.07],
          [2383.49, 2397.18, 2370.61, 2397.94],
          [2378.82, 2325.95, 2309.17, 2378.82],
          [2322.94, 2314.16, 2308.76, 2330.88],
          [2320.62, 2325.82, 2315.01, 2338.78],
          [2313.74, 2293.34, 2289.89, 2340.71],
          [2297.77, 2313.22, 2292.03, 2324.63],
          [2322.32, 2365.59, 2308.92, 2366.16],
          [2364.54, 2359.51, 2330.86, 2369.65],
          [2332.08, 2273.4, 2259.25, 2333.54],
          [2274.81, 2326.31, 2270.1, 2328.14],
          [2333.61, 2347.18, 2321.6, 2351.44],
          [2340.44, 2324.29, 2304.27, 2352.02],
          [2326.42, 2318.61, 2314.59, 2333.67],
          [2314.68, 2310.59, 2296.58, 2320.96],
          [2309.16, 2286.6, 2264.83, 2333.29],
          [2282.17, 2263.97, 2253.25, 2286.33],
          [2255.77, 2270.28, 2253.31, 2276.22]]
    attr = ["2017/7/{}".format(i + 1) for i in range(31)]
    kline = Kline("K 线图-折线图叠加", **chart_init)
    kline.add("日K", attr, v1)
    line_1 = Line()
    line_1.add("line-1", attr, [random.randint(2400, 2500) for _ in range(31)])
    line_2 = Line()
    line_2.add("line-2", attr, [random.randint(2400, 2500) for _ in range(31)])
    chart = Overlap()
    chart.add(kline)
    chart.add(line_1)
    chart.add(line_2)
    page.add(chart)

    attr = ["{}月".format(i) for i in range(1, 13)]
    v1 = [2.0, 4.9, 7.0, 23.2, 25.6, 76.7, 135.6, 162.2, 32.6, 20.0, 6.4, 3.3]
    v2 = [2.6, 5.9, 9.0, 26.4, 28.7, 70.7, 175.6, 182.2, 48.7, 18.8, 6.0, 2.3]
    v3 = [2.0, 2.2, 3.3, 4.5, 6.3, 10.2, 20.3, 23.4, 23.0, 16.5, 12.0, 6.2]
    bar = Bar("多 Y 轴叠加", **chart_init)
    bar.add("蒸发量", attr, v1)
    bar.add("降水量", attr, v2, yaxis_formatter=" ml", yaxis_max=250)
    line = Line()
    line.add("平均温度", attr, v3, yaxis_formatter=" °C")
    chart = Overlap()
    chart.add(bar)
    chart.add(line, yaxis_index=1, is_add_yaxis=True)
    page.add(chart)

    return page
示例#28
0
文件: iVIX.py 项目: quan8tum/ivix
    vixDate = datetime.strptime(vixDate, '%Y/%m/%d')
    T_near = (near - vixDate).days / 365.0
    T_next = (nexts - vixDate).days / 365.0
    # the forward index prices
    nearPriceDiff = getStrikeMinCallMinusPutClosePrice(optionsNearTerm)
    nextPriceDiff = getStrikeMinCallMinusPutClosePrice(optionsNextTerm)
    near_F = nearPriceDiff[0] + np.exp(T_near * R_near) * nearPriceDiff[1]
    next_F = nextPriceDiff[0] + np.exp(T_next * R_next) * nextPriceDiff[1]
    # 计算不同到期日期权对于VIX的贡献
    near_sigma = calSigmaSquare(optionsNearTerm, near_F, R_near, T_near)
    next_sigma = calSigmaSquare(optionsNextTerm, next_F, R_next, T_next)

    # 利用两个不同到期日的期权对VIX的贡献sig1和sig2,
    # 已经相应的期权剩余到期时间T1和T2;
    # 差值得到并返回VIX指数(%)
    w = (T_next - 30.0 / 365.0) / (T_next - T_near)
    vix = T_near * w * near_sigma + T_next * (1 - w) * next_sigma
    return 100 * np.sqrt(abs(vix) * 365.0 / 30.0)


ivix = []
for day in tradeday['DateTime']:
    ivix.append(calDayVIX(day))

from pyecharts import Line
attr = true_ivix[u'日期'].tolist()
line = Line(u"中国波指")
line.add("中证指数发布", attr, true_ivix[u'收盘价(元)'].tolist(), mark_point=["max"])
line.add("手动计算", attr, ivix, mark_line=["max", 'average'])
line.render()
示例#29
0
                print(earlest_msg_node)

            latest_reply = msg_list[len(msg_list) - 1][0]
            latest_reply_msg = msg_list[len(msg_list) - 1][1]
            content_match = regex.search(latest_reply_msg[1])
            if content_match:
                latest_time_node = "最晚一条发言:%s %s,来自:%s群" % (latest_reply_msg[0], latest_reply, content_match.group(1))
                print(latest_time_node)
                latest_msg_node = '"%s" 说 「%s」' % (content_match.group(2), content_match.group(3))
                print(latest_msg_node)

    page = Page()

    # line
    item_name_list, item_num_list = counter2list(dict2sorted_by_key(date_msg_counter))
    line = Line("群心情走势图", "截至日期:%s" % item_name_list[len(item_name_list) - 1], title_text_size=30,
                subtitle_text_size=18, title_pos='center')
    line.add("", item_name_list, item_num_list, mark_point=["max"], legend_pos='65%',
             xaxis_interval=2, xaxis_rotate=27, xaxis_label_textsize=20, yaxis_label_textsize=20, yaxis_name_pos='end',
             yaxis_pos="%50", is_label_show=True)
    page.add(line)

    # pie
    attr = ["总发言数", "日均发言", "发言最多", "发言最少"]
    v1 = [len(msg_list), math.floor(len(msg_list) / len(date_msg_counter)), most_msg_count[1],
          dict_sorted_by_value[0][1]]
    pie = Pie("群聊数据统计", title_pos='center')
    pie.add("", attr, v1, radius=[40, 75], label_text_color=None, is_label_show=True, legend_orient='vertical',
            legend_pos='left')
    page.add(pie)

    # bar
示例#30
0
regr=linear_model.LinearRegression()
regr.fit(x,y)
print('Intercept:{}'.format(regr.intercept_))
print('Coeffecien:{}'.format(regr.coef_))
plt.plot(x,regr.predict(x),linewidth=10,color='blue')
'''
#散点图绘制
plt.scatter(x,y,color='black')
plt.xlabel('专业课成绩')
plt.ylabel('数学成绩')


#考生编号转换列为索引
computer_top_ten.set_index('考生编号')
#print(computer_top_ten)
line1=Line("计算机前十名")
i=computer_top_ten['考生编号']
j=computer_top_ten['总分']
attr1=list(map(str,i))
v=list(j)
line1.add("",attr1,v,is_smooth=True,mark_line=["max","average"])
page.add(line1)



'''
绘制成绩分布直方图
'''
computer.set_index('考生编号')