def v_positions_history(self, end=yesterdaydash(), rendered=True): """ river chart visulization of positions ratio history use text size to avoid legend overlap in some sense, eg. legend_text_size=8 """ start = self.totcftable.iloc[0].date times = pd.date_range(start, end) tdata = [] for date in times: sdata = sorted( [( date, fob.briefdailyreport(date).get("currentvalue", 0), fob.name, ) for fob in self.fundtradeobj], key=lambda x: x[1], reverse=True, ) tdata.extend(sdata) tr = ThemeRiver() tr.add( series_name=[foj.name for foj in self.fundtradeobj], data=tdata, label_opts=opts.LabelOpts(is_show=False), singleaxis_opts=opts.SingleAxisOpts(type_="time", pos_bottom="10%"), ) if rendered: return tr.render_notebook() else: return tr
def v_positions_history(self, end=yesterdaydash(), **vkwds): ''' river chart visulization of positions ratio history use text size to avoid legend overlap in some sense, eg. legend_text_size=8 ''' start = self.totcftable.iloc[0].date times = pd.date_range(start, end) tdata = [] for date in times: sdata = sorted([(date, fob.briefdailyreport(date).get('currentvalue', 0), fob.aim.name) for fob in self.fundtradeobj], key=lambda x: x[1], reverse=True) tdata.extend(sdata) tr = ThemeRiver() tr.add([foj.aim.name for foj in self.fundtradeobj], tdata, is_datazoom_show=True, is_label_show=False, legend_top="0%", legend_orient='horizontal', **vkwds) return tr
with open("news_top_title.json", "r") as f: top_titles = json.load(f)[topic] with open("news_emotion_count.json", "r") as f: news_ec = json.load(f) emotions = ["负面", "中性", "正面"] with open("cov_cnt.json", "r") as f: cc_data = json.load(f) theme_river = ThemeRiver( init_opts=opts.InitOpts(width="1200px", height="600px")) theme_river.add( series_name=tr_series[topic], data=tr_data[topic], label_opts=opts.LabelOpts(is_show=False), singleaxis_opts=opts.SingleAxisOpts(pos_top="50", pos_bottom="50", type_="time"), ) theme_river.set_global_opts(tooltip_opts=opts.TooltipOpts( trigger="axis", axis_pointer_type="line"), legend_opts=opts.LegendOpts(pos_top="5%", is_show=True)) wc_tl = Timeline(init_opts=opts.InitOpts(width="1200px", height="600px")) idx = 0 for date, tmp_data in wc_data: wc = WordCloud() wc.add(series_name="", data_pair=tmp_data) wc.set_global_opts( title_opts=opts.TitleOpts(title="词频统计",
dim=7, name="等级", type_="category", data=["优", "良", "轻度污染", "中度污染", "重度污染", "严重污染"], ), ] parallel = Parallel() # 添加坐标轴和数据 parallel.add_schema(schema=schema).add("", data) parallel.render_notebook() # %% [markdown] # ### Radar -- 雷达图 radar = Radar() radar.add_schema(schema=[ opts.RadarIndicatorItem(name=_k, max_=200) for _k in list("ABCDFG") ]) radar.add("Expectation", [Faker.values()]).add("Reality", [Faker.values()]) radar.render_notebook() # %% [markdown] # ### ThemeRiver -- 流量图 themeriver = ThemeRiver() with open("data/themeriver.json") as j: data = json.load(j) cats = list(set([i[-1] for i in data])) themeriver.add(cats, data, singleaxis_opts=opts.SingleAxisOpts(type_="time")) themeriver.render_notebook()
columns=['year'], index=df_year['year'].unique()) df_river['year'] = df_river['year'].apply(str) df_river['count'] = df_year[df_year[g] == 1].groupby(by=['year'])[g].sum() df_river['genre'] = np.full(len(df_river), g) df_river['count'] = df_river['count'].fillna(0) data_river.extend(df_river.values.tolist()) river = ThemeRiver(init_opts=opts.InitOpts( width="2000px", height="600px", theme=ThemeType.LIGHT)) river.add( series_name=genres, data=data_river, label_opts=opts.LabelOpts(font_size=10), singleaxis_opts=opts.SingleAxisOpts( pos_top="50", pos_bottom="50", type_="time", ), ) river.set_global_opts( tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type="line"), title_opts=opts.TitleOpts(title="1901-2020 曲風流變", subtitle="1901-2020", pos_bottom="85%", pos_right="80%"), ) river.set_series_opts(label_opts=opts.LabelOpts(is_show=0)) # themeriver().load_javascript() river.render_notebook()