def update_graph(crime, race, rates): if race == 'All': df = data[(data['Crime'] == crime)] fig = px.choropleth(df, locations="Code", color=df['Count'] / df['Population'], locationmode="USA-states", scope="usa", animation_frame='Year') else: df = data[(data['Crime'] == crime) & (data['Race'] == race)] if rates == 'race_pop': fig = px.choropleth(df, locations="Code", color=df['Count'] / df['Population_Totals'], locationmode="USA-states", scope="usa", animation_frame='Year') else: fig = px.choropleth(df, locations="Code", color=df['Count'] / df['Population'], locationmode="USA-states", scope="usa", animation_frame='Year') fig.update_layout(font=dict(color='white'), paper_bgcolor='rgba(42,161,152,0)', plot_bgcolor='rgba(0,0,0,0)', legend_title_text='Race') return fig
def test_choropleth_plot(self): gapminder = px.data.gapminder() gapminder2007 = gapminder.query("year == 2007") px.choropleth(gapminder, locations="iso_alpha", color="lifeExp", hover_name="country", animation_frame="year", color_continuous_scale=px.colors.sequential.Plasma, projection="natural earth")
def create_region_map2(express_bubble_data): map_graph1 = px.choropleth(express_bubble_data, scope="world", locations="region", color="confirmed", locationmode="country names", hover_name="region", animation_frame="date", color_continuous_scale=px.colors.sequential.Plasma, projection ='robinson') map_graph1 = dcc.Graph(figure=map_graph1) map_graph2 = px.choropleth(express_bubble_data, scope="world", locations="region", color="growth", locationmode="country names", hover_name="region", animation_frame="date", color_continuous_scale=px.colors.sequential.Plasma, projection ='robinson') map_graph2 = dcc.Graph(figure=map_graph2) map_graph3 = px.choropleth(express_bubble_data, scope="world", locations="region", color="deaths", locationmode="country names", hover_name="region", animation_frame="date", color_continuous_scale=px.colors.sequential.Plasma, projection ='robinson') map_graph3 = dcc.Graph(figure=map_graph3) return map_graph1, map_graph2, map_graph3
def page_3(option_slctds, catnat_select): dff2 = df_catnat.copy() dff2 = dff2[dff2["Année"] == option_slctds] fig_cat_annee = px.bar(catnat_annee, title='Nombre de catastrophes naturelles par année', x='Année', y='nombre_de_catnat') fig_map = px.choropleth( dff2, title='Nombre de catastrophes naturelles chaque année', locations='id', geojson=counties, color='nb_catnat', scope='europe') fig_map.update_geos(fitbounds="locations", visible=False) dff3 = typeYears.copy() dff3 = dff3[dff3["Type_de_catnat"] == catnat_select] fig_type = px.bar(dff3, title='Nombre de catastrophes par type', x='Année', y='Nombre_catnat') return fig_cat_annee, fig_map, fig_type
def happiness_map(year_range, active_tab): """Builds a cholorpleth map colored by happiness score based on year, time range, country list ** Only executes if "Summary View" tab is selected ** Parameters ---------- year_range : list List of years to filter on. Will only contain endpoints active_tab : string Name of active tab in content area. Used to short circuit callback if detail content isn't active Returns ------- fig : [plotly.express.Figure] Chloropleth map with happiness score by country """ # Short circuit if detail tab isn't active if active_tab != "summary_view": return {} # Filter to specified data # Leave all countries in filtered_df = filter_df(summary_df, [], [], year_range).sort_values(by="year") fig = px.choropleth( data_frame=filtered_df, # locationmode="ISO-3", locations="country_code", hover_name="country", color="happiness_score", animation_frame="year", animation_group="country", color_continuous_scale=px.colors.sequential.Sunset_r, ) fig.layout.sliders[0].pad.t = 10 fig.layout.updatemenus[0].pad.t = 10 fig.update_layout( title_text="Happiness Score Worldwide on 10 Point Scale", geo=dict(showframe=False, showcoastlines=False, projection_type="equirectangular"), margin=dict(l=0, r=0, t=50, b=0), ) return fig
def build_US_votemap(): '''Build a votemap of relevant Presiential Vote Counts by State''' set_pandas() df_pres = pd.read_csv(".../archive/president_state.csv") states = pd.read_csv( ".../archive/latitude-and-longitude-for-every-country-and-state.csv") states = states[['usa_state', 'usa_state_code']] merged = df_pres.merge(states, left_on='state', right_on='usa_state') fig = px.choropleth(merged, locations="usa_state_code", color="total_votes", locationmode="USA-states", hover_name="state", range_color=[0, 10000000], scope="usa", title="2020 USA Presiential Election Vote Counts") fig.show() return
def main(df, name): province = select_province(df, name) if name == '广西': name = '广西壮族自治区' elif name == '内蒙古': name = '内蒙古自治区' elif name == '宁夏': name = '宁夏回族自治区' elif name == '西藏': name = '西藏自治区' elif name == '新疆': name = '新疆维吾尔自治区' elif name == '上海': name = '上海市' elif name == '重庆': name = '重庆市' elif name == '北京': name = '北京市' elif name == '天津': name = '天津市' else: name += '省' with open(f"city/{name}.json", 'r', encoding='utf-8') as fp: geo = json.load(fp) city = [] for c in geo['features']: d = {} d['name'] = c['properties']['name'] d['adcode'] = c['properties']['adcode'] city.append(d) city_code = pd.DataFrame(city) df = province.join(city_code.set_index('name'), on='城市') fig = px.choropleth(df, geojson=geo, locations='adcode', featureidkey='properties.adcode', color='最高气温', hover_data=['城市', '最高气温', '最低气温'], title=f'{name}') fig.update_geos(fitbounds="locations", visible=False) return fig
def CreateMap(map_data): """ value : dictionary containing all the data about geographical repartition of doctors return : a map representing the values """ geojson_departement_france = requests.get(geojson_repo_rul).json() map_repartition_medecin = px.choropleth( map_data, geojson=geojson_departement_france, locations="departement", featureidkey="properties.code", color_continuous_scale="magma", color="habitants par medecin", scope="europe", hover_data=["nom departement", "departement", "habitants par medecin"]) map_repartition_medecin.update_geos(showcountries=False, showcoastlines=False, showland=False, fitbounds="locations") map_repartition_medecin["layout"]["height"] = 750 return map_repartition_medecin
def build_County_heatmap(): '''''' #set_pandas restrictions from utils set_pandas() #Building DataFrames based on candidate data df_pres = pd.read_csv("../archive/president_county_candidate.csv") #set party variables rep = df_pres['party'] == 'REP' dem = df_pres['party'] == 'DEM' df_pres = df_pres[rep | dem] #group by state & party df_pres = df_pres.groupby(['state', 'party']).sum() df_pres = df_pres.unstack() df_pres = df_pres.reset_index() #load latitude & longitude file into a dataframe & take just the state & state code columns states = pd.read_csv( "../archive/latitude-and-longitude-for-every-country-and-state.csv") states = states[['usa_state', 'usa_state_code']] #merge states & pres data ###Important evaluate column names, the rename here converts back to strings as this often returns Tuple data types which will give key errors merged = df_pres.merge(states, left_on='state', right_on='usa_state') merged.rename(columns=''.join, inplace=True) #calculate percentage of Democrats based on the below equation and plot the data into the US plot merged['percent_democrat'] = merged['total_votesDEM'] * 100 / ( merged['total_votesREP'] + merged['total_votesDEM']) fig = px.choropleth( merged, locations="usa_state_code", color="percent_democrat", locationmode='USA-states', hover_name="state", range_color=[25, 75], color_continuous_scale='RdBu', #blues scope="usa", title= '2020 USA Election: Percent of Population Voting for the Democratic Party (as of +todays_date+)' ) fig.show() return
def update_figure(year_slider, month_slider, day_slider, media_dropdown, mfm_dropdown, year_dropdown): dg = df.loc[(df["year"] == year_slider) & (df["month"] == month_slider) & (df["day"] == day_slider)] fig1 = px.histogram( dg, x="female_percent", color="media_type", ) fig2 = px.histogram( dg, x="male_percent", color="media_type", ) fig3 = px.bar(dg.loc[(dg["media_type"] == media_dropdown)], x="channel_name", y=["female_percent", "male_percent", "music_percent"]) #the definition of the new dataframe for the map sf_zone['value'][0] = df_zone["Zone_A_" + mfm_dropdown][year_dropdown] sf_zone['value'][1] = df_zone["Zone_B_" + mfm_dropdown][year_dropdown] sf_zone['value'][2] = df_zone["Zone_C_" + mfm_dropdown][year_dropdown] fig4 = px.choropleth(sf_zone, geojson=sf_zone.geometry, locations=sf_zone.index, color='value', scope="europe", title="Proportions par zones de vacances", hover_name="nom") fig4.update_geos(fitbounds="locations", visible=False) return (fig1, fig2, fig3, fig4)
def serve_layout(): # Links to time series datasets on github: url_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' url_deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv' # Create dataframes from datasets: df_confirmed = pd.read_csv(url_confirmed) df_deaths = pd.read_csv(url_deaths) # Replace null values with zeroes: df_confirmed[df_confirmed.columns[4:]] = df_confirmed[ df_confirmed.columns[4:]].fillna(0, downcast='infer') df_deaths[df_deaths.columns[4:]] = df_deaths[df_deaths.columns[4:]].fillna( 0, downcast='infer') # Try today's date. If not yet updated use yesterday's date for daily reports: try: date = datetime.now().strftime('%m-%d-%Y') url_daily_reports = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date}.csv' df_daily_reports = pd.read_csv(url_daily_reports, dtype={'FIPS': object}) df_daily_reports['FIPS'] = df_daily_reports['FIPS'].str.zfill(5) except: date = (datetime.now() - timedelta(days=1)).strftime('%m-%d-%Y') url_daily_reports = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date}.csv' df_daily_reports = pd.read_csv(url_daily_reports, dtype={'FIPS': object}) df_daily_reports['FIPS'] = df_daily_reports['FIPS'].str.zfill(5) # Subsets of confirmed cases: df_china = df_confirmed[df_confirmed['Country/Region'] == 'China'] df_other = df_confirmed[df_confirmed['Country/Region'] != 'China'] # Add ISO3 codes to daily updating df mapper = country(from_key='name', to_key='iso3') country_index = {} country_index['West Bank and Gaza'] = 'PSE' country_index['Taiwan*'] = 'TWN' country_index['Timor-Leste'] = 'TLS' country_index['Holy See'] = 'VAT' country_index['Republic of the Congo'] = 'COG' country_index['Congo (Brazzaville)'] = 'COG' country_index['Congo (Kinshasa)'] = 'COD' df_confirmed['ISO3'] = df_confirmed['Country/Region'].apply( lambda x: country_index.get(x, mapper(x))) # Reformat for global choropleth: df_global = df_confirmed.groupby(['ISO3', 'Country/Region']).sum().reset_index() # Convert date columns to rows: df_global = pd.melt(df_global, id_vars=['ISO3', 'Country/Region', 'Lat', 'Long'], value_vars=list( df_global.select_dtypes(include='int64')), var_name='Date', value_name='Confirmed Cases') # Setup df containing states with most cases: df_us = df_daily_reports[df_daily_reports['Country_Region'] == 'US'] leading_states = df_us.groupby('Province_State')['Confirmed'].sum( ).sort_values(ascending=False)[0:10].index df_us_leading_states = df_us[df_us['Province_State'].isin( leading_states)].groupby('Province_State').sum().sort_values( by=['Confirmed'], ascending=False).reset_index() df_us_leading_states[ 'Active'] = df_us_leading_states['Confirmed'] - df_us_leading_states[ 'Recovered'] - df_us_leading_states['Deaths'] # Setup df containing states with most deaths: leading_states_deaths = df_us.groupby('Province_State')['Deaths'].sum( ).sort_values(ascending=False)[0:10].index df_us_leading_states_deaths = df_us[df_us['Province_State'].isin( leading_states_deaths)].groupby('Province_State').sum().sort_values( by=['Deaths'], ascending=False).reset_index() # Setup df containing countries with most cases: leading_countries = df_daily_reports.groupby( 'Country_Region')['Confirmed'].sum().sort_values( ascending=False)[0:10].index df_leading_countries = df_daily_reports[ df_daily_reports['Country_Region'].isin(leading_countries)].groupby( 'Country_Region').sum().sort_values(by=['Confirmed'], ascending=False).reset_index() df_leading_countries[ 'Active'] = df_leading_countries['Confirmed'] - df_leading_countries[ 'Recovered'] - df_leading_countries['Deaths'] # Setup df containing countries with most deaths: leading_countries_deaths = df_daily_reports.groupby( 'Country_Region')['Deaths'].sum().sort_values( ascending=False)[0:10].index df_leading_countries_deaths = df_daily_reports[df_daily_reports[ 'Country_Region'].isin(leading_countries_deaths)].groupby( 'Country_Region').sum().sort_values(by=['Deaths'], ascending=False).reset_index() # df for US choropleth: df_us_choro = df_us.groupby('Province_State').sum().reset_index() # Add dict for state abbreviations for US choropleth: us_state_abbrev = { 'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT', 'Delaware': 'DE', 'District of Columbia': 'DC', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID', 'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA', 'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS', 'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ', 'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Northern Mariana Islands': 'MP', 'Ohio': 'OH', 'Oklahoma': 'OK', 'Oregon': 'OR', 'Palau': 'PW', 'Pennsylvania': 'PA', 'Puerto Rico': 'PR', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD', 'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virgin Islands': 'VI', 'Virginia': 'VA', 'Washington': 'WA', 'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY', } df_us_choro['Abbrev'] = df_us_choro['Province_State'].map( us_state_abbrev).fillna(df_us_choro['Province_State']) df_us_choro = df_us_choro[df_us_choro['Abbrev'].apply( lambda x: len(x) < 3)] # ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ # ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ## TIME SERIES fig_time = go.Figure() # Confirmed cases in mainland China fig_time.add_trace( go.Scatter( x=[i[:-3] for i in list(df_other.select_dtypes(include='int64'))], y=list(df_china.select_dtypes(include='int64').sum()), name='China', line_color='#7f7f7f')) # Confirmed cases for the rest of the world fig_time.add_trace( go.Scatter( x=[i[:-3] for i in list(df_other.select_dtypes(include='int64'))], y=list(df_other.select_dtypes(include='int64').sum()), name='Rest of World', line_color='#ff7f0e')) # Worldwide deaths fig_time.add_trace( go.Scatter( x=[i[:-3] for i in list(df_other.select_dtypes(include='int64'))], y=list(df_deaths.select_dtypes(include='int64').sum()), name='Worldwide Deaths', line_color='#d62728')) for trace in fig_time.data: trace.hovertemplate = '%{x}<br>%{y}' fig_time.update_yaxes(hoverformat=',f') fig_time.update_layout( title_text='Coronavirus over Time', legend={ 'x': 0.02, 'y': 0.55 }, legend_bgcolor='rgba(0,0,0,0.1)', height=350, margin={ 'r': 10, 't': 50, 'l': 10, 'b': 70 }, annotations=[ dict(xshift=10, yshift=-10, x=0, y=1.0, showarrow=False, text='Total Cases: ' + f'{sum(df_daily_reports["Confirmed"]):,}' + '<br>Total Deaths: ' + f'{sum(df_daily_reports["Deaths"]):,}', xref='paper', yref='paper', font=dict(size=16, color='#ffffff'), align='left', bordercolor='rgba(0,0,0,0.1)', borderwidth=2, borderpad=4, bgcolor='#ff7f0e') ]) ## GLOBAL CHOROPLETH fig_global = px.choropleth( df_global, locations='ISO3', color='Confirmed Cases', hover_name='Country/Region', hover_data=['Date'], projection='natural earth', animation_frame='Date', range_color=(0, df_global['Confirmed Cases'].max()), color_continuous_scale=[ [0, 'rgb(250, 250, 250)'], #0 [1 / 10000, 'rgb(250, 175, 100)'], #10 [1 / 1000, 'rgb(250, 125, 0)'], #100 [1 / 100, 'rgb(200, 100, 0)'], #1000 [1 / 10, 'rgb(250, 50, 50)'], #10000 [1, 'rgb(100, 0, 0)'], #100000 ]) # Must loop though traces AND frames to format hovertemplate for trace in fig_global.data: trace.hovertemplate = '<b>%{hovertext}</b> (%{customdata[0]})<br>%{z:,f}' for frame in fig_global.frames: frame.data[ 0].hovertemplate = '<b>%{hovertext}</b> (%{customdata[0]})<br>%{z:,f}' # Animation speed and slider/button locations fig_global.layout.updatemenus[0].buttons[0].args[1]['frame'][ 'duration'] = 50 fig_global.layout.updatemenus[0].pad = {'l': 10, 't': 0} fig_global.layout.sliders[0].pad = {'b': 10, 't': -20, 'l': 10} fig_global.layout.sliders[0].currentvalue = {'prefix': 'Date = '} fig_global.layout.coloraxis.colorbar.title.text = 'Confirmed<br>Cases' fig_global.update_layout( title='Global Time Series', margin={ 'r': 0, 't': 50, 'l': 0, 'b': 10 }, ) ## US CHOROPLETH fig_us = px.choropleth( df_daily_reports, geojson=counties, locations='FIPS', scope='usa', color='Confirmed', hover_name='Admin2', hover_data=['Province_State'], range_color=(0, df_daily_reports[df_daily_reports['Country_Region'] == 'US']['Confirmed'].max()), color_continuous_scale=[ [0, 'rgb(250, 250, 250)'], #0 [1 / 10000, 'rgb(250, 175, 100)'], #10 [1 / 1000, 'rgb(250, 125, 0)'], #100 [1 / 100, 'rgb(200, 100, 0)'], #1000 [1 / 10, 'rgb(250, 50, 50)'], #10000 [1, 'rgb(100, 0, 0)'], #100000 ]) for trace in fig_us.data: trace.hovertemplate = '<b>%{hovertext}</b> (%{customdata[0]})<br>%{z:,f}' fig_us.layout.coloraxis.colorbar.title.text = 'Confirmed<br>Cases' fig_us.update_traces(marker_line_width=0.1) fig_us.update_layout( title=f'US Counties ({date})', margin={ 'r': 0, 't': 50, 'l': 0, 'b': 30 }, ) ## MOST AFFECTED trace_glob_c = go.Bar(x=df_leading_countries['Country_Region'], y=df_leading_countries['Confirmed'], marker={'color': 'rgb(250, 175, 100)'}, visible=True) trace_glob_d = go.Bar(x=df_leading_countries_deaths['Country_Region'], y=df_leading_countries_deaths['Deaths'], marker={'color': 'rgb(250, 50, 50)'}, visible=False) trace_us_c = go.Bar(x=df_us_leading_states['Province_State'], y=df_us_leading_states['Confirmed'], marker={'color': 'rgb(250, 175, 100)'}, visible=True) trace_us_d = go.Bar(x=df_us_leading_states_deaths['Province_State'], y=df_us_leading_states_deaths['Deaths'], marker={'color': 'rgb(250, 50, 50)'}, visible=False) fig_most_affected = make_subplots(rows=1, cols=2) fig_most_affected.append_trace(trace_glob_c, 1, 1) fig_most_affected.append_trace(trace_glob_d, 1, 1) fig_most_affected.append_trace(trace_us_c, 1, 2) fig_most_affected.append_trace(trace_us_d, 1, 2) for trace in fig_most_affected.data: trace.name = '' trace.hovertemplate = '%{x}<br>%{y}' fig_most_affected.update_yaxes(hoverformat=',f') fig_most_affected.update_layout( title=f'Leading Countries and US States ({date})', showlegend=False, height=350, margin={ 'r': 10, 't': 50, 'l': 40, 'b': 10 }, updatemenus=[ dict(pad={ 'r': 10, 't': 10 }, x=1.0, y=1.0, active=0, buttons=list([ dict(label='Confirmed', method='update', args=[{ 'visible': [True, False, True, False] }]), dict(label='Deaths', method='update', args=[{ 'visible': [False, True, False, True] }]), ])) ]) # ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ # ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ return html.Div(children=[ html.Div( [ html.H3('Coronavirus Dashboard'), html.Div([ html.P(f'Updated: {date}', style={'font-style': 'italic'}), ], style={'display': 'inline-block'}), html.Div( [ dcc.Markdown( '''Source: [Johns Hopkins University CSSE](https://github.com/CSSEGISandData/COVID-19)''', style={'font-style': 'italic'}) ], style={ 'display': 'inline-block', 'float': 'right', 'color': '#ff7f0e' }), ], style={ 'color': 'white', 'paddingLeft': '10px', 'background': 'linear-gradient(to right, #ff7f0e 25%, 50%, white)' }), html.Div(children=[ html.Div([dcc.Graph(figure=fig_time, )], style={'margin': '0'}, className='five columns'), html.Div([dcc.Graph(figure=fig_most_affected, )], style={'margin': '0'}, className='seven columns'), ], className='twelve columns'), html.Div(children=[ html.Div([dcc.Graph(figure=fig_us, )], style={'margin': '0'}, className='six columns'), html.Div([dcc.Graph(figure=fig_global, )], style={'margin': '0'}, className='six columns') ], className='twelve columns'), html.Div([ html.Div([ dcc.Markdown( '''If you find this dashboard helpful, please share it and consider donating to a charity on the frontlines of COVID-19, such as [Doctors Without Borders](https://donate.doctorswithoutborders.org/onetime.cfm). \nCreated and maintained by [John Larson](https://www.linkedin.com/in/johnlarson2016/).''' ), ], style={ 'paddingLeft': '10px', 'paddingTop': '20px' }), ], className='twelve columns') ])
#!/usr/bin/env python # coding: utf-8 # In[1]: import plotly_express as px # In[3]: gapminder = px.data.gapminder() # In[4]: px.choropleth(gapminder, locations="iso_alpha", color="lifeExp", hover_name="country", animation_frame="year", color_continuous_scale=px.colors.sequential.Plasma) # In[ ]:
def get_plot(self, parameters): target_dict = { 1: "COUNT(*)", 2: "AVG(AvgTone)", 3: "Sum(NumMentions)", 4: "AVG(GoldsteinScale)" } qe = QueryExecutor() query = self.query.format( start=parameters['range'].value[0], end=parameters['range'].value[1], country=parameters['country_id'].value, role_1=parameters['actor_type'].value, role_2=(3 - parameters['actor_type'].value), target_type=target_dict[parameters['target_type'].value], ) df = qe.get_result_dataframe(query) df['country_iso'] = df['Country'].map(Utils().get_fips_iso_mapping()) df.dropna(inplace=True) target_name_dict = { 1: 'Event Count', 2: 'Average Tone', 3: 'Sum of Mentions', 4: 'Average Goldstein scale' } target_col_name = target_name_dict[parameters['target_type'].value] if parameters['target_type'].value in [1, 3]: target_col_name = "Log " + target_col_name df[target_col_name] = np.log10(df['Target']) else: df[target_col_name] = df['Target'] df['Date'] = df['MonthYear'].apply( lambda date: str(int(date / 100)) + "-" + str(int(date % 100))) columns = ['country_iso', 'Date'] filler = pd.DataFrame(list( product(df['country_iso'].unique(), df['Date'].unique())), columns=columns) filled = df.join(filler.groupby(columns).count(), on=columns, how='right')[columns + [target_col_name]] filled[target_col_name].fillna(0.0, inplace=True) color_bounds = filled[target_col_name].min( ), filled[target_col_name].max() fig = px.choropleth( filled, locations='country_iso', locationmode='ISO-3', color=target_col_name, animation_frame='Date', range_color=color_bounds, color_continuous_scale=px.colors.sequential.Aggrnyl, title="{} for each months between {} as Actor {} and other countries" .format( target_name_dict[parameters['target_type'].value], Utils().get_fips_country_id_to_name_mapping()[ parameters['country_id'].value], parameters['actor_type'].value)) return plot(fig, include_plotlyjs=True, output_type='div')
title_str_1 = "Change in child poverty from a $100 child allowance" title_str_2 = "By Lower Legislative District" title_str_3 = "By Upper Legislative District" title_str_1_CA = "Change in child poverty from a $100 child allowance in California" title_str_2_CA = "By Lower Legislative District" title_str_3_CA = "By Upper Legislative District" pov_child_assembly_map = px.choropleth( assembly_pov_names, color="child_pct_change_100", geojson=j_file_assembly, locations="ind", labels={ "child_pct_change_100": "% change in child poverty", "child_poverty_base_100": "Base child poverty rate", "child_poverty_reform_100": "Reform child poverty rate", "SLDLST": "Assembly District Number", }, color_continuous_scale="blues_r", scope="usa", hover_data={ "child_pct_change_100", "child_poverty_base_100", "child_poverty_reform_100", }, ) pov_child_assembly_map.update_layout(hoverlabel=dict( bgcolor="white", font_size=16, )) pov_child_assembly_map.update_layout(title_text=title_str_1 + "<br>" + title_str_2,
def get_plot(self, parameters): qe = QueryExecutor() query = self.query.format( start=parameters['range'].value[0], end=parameters['range'].value[1], ) df = qe.get_result_dataframe(query) countries_to_leave = Utils().get_valid_fips_countries(25000) df = df[(df['Actor1Geo'].isin(countries_to_leave)) & (df['Actor2Geo'].isin(countries_to_leave))] df.sort_values(['Actor1Geo', 'Actor2Geo'], inplace=True) index = pd.MultiIndex.from_product( (countries_to_leave, countries_to_leave), names=['Actor1Geo', 'Actor2Geo']) df.index = pd.MultiIndex.from_arrays( (df['Actor1Geo'], df['Actor2Geo']), names=['Actor1Geo', 'Actor2Geo']) df.drop(['Actor1Geo', 'Actor2Geo'], axis=1, inplace=True) df = df.reindex(index, fill_value=0.0).reset_index() df.sort_values(['Actor1Geo', 'Actor2Geo'], inplace=True) dist_matrix = df['AvgTone'].values.reshape( (-1, df.groupby('Actor1Geo')['Actor2Geo'].count().unique()[0])) down_limit, up_limit = np.percentile(dist_matrix, (1, 99)) dist_matrix = np.clip(dist_matrix, down_limit, up_limit) labels_idx = pd.Series(df['Actor1Geo'].unique(), name='country_id') n_clusters = parameters['n_clusters'].value if parameters['method'].value == 'agglomerative': model = AgglomerativeClustering( n_clusters=n_clusters, affinity='precomputed', linkage='complete', compute_full_tree=True, ) elif parameters['method'].value == 'britch': model = Birch(branching_factor=5, n_clusters=n_clusters) elif parameters['method'].value == 'kmeans': model = KMeans(n_clusters=n_clusters) if parameters['actor_type'].value == 1: clusters = model.fit_predict(dist_matrix) else: clusters = model.fit_predict(dist_matrix.T) cluster_df = pd.concat( [pd.Series(clusters, name='cluster_id'), labels_idx], axis=1) cluster_df = cluster_df.join( Utils().get_fips_iso_mapping(), on=['country_id'], how='right', ).fillna(-1) cluster_df['country_name'] = cluster_df['country_id'].map( Utils().get_fips_country_id_to_name_mapping()) cluster_df.rename({'ISO': 'country_iso'}, axis=1, inplace=True) # Uncomment to write result to csv # cluster_df.groupby('cluster_id')['country_name'].apply(lambda countries: '; '.join(countries)).to_csv('clustering_result.csv') fig = px.choropleth( cluster_df, locations='country_iso', locationmode='ISO-3', color='cluster_id', hover_name='country_name', hover_data=['cluster_id'], labels={ 'country_name': 'Country Name', 'cluster_id': 'Cluster ID' }, color_continuous_scale=px.colors.qualitative.Alphabet, ) return plot(fig, include_plotlyjs=True, output_type='div')
df_q1_agg_country[ 'Couverture médiatique'] = df_q1_agg_country.numArticles / df_q1_agg_country.numEvent st.dataframe(df_q1) if country1 != "" and language1 != "": st.markdown("Nombre d'articles selon le nombre d'événement") fig = px.scatter(df_q1, x="numArticles", y="numEvent") st.plotly_chart(fig) if country1 == "": st.markdown("**Couverture médiatique:**") fig = px.choropleth(df_q1_agg_country, locations="iso", color="Couverture médiatique", range_color=[4.5, 7], color_continuous_scale="RdYlGn") st.plotly_chart(fig) st.markdown("**Top 10 pays:**") fig = px.bar( df_q1_agg_country[df_q1_agg_country['iso'] != ''].sort_values( "numArticles", ascending=False)[:10], x="iso", y="numArticles") st.plotly_chart(fig) st.markdown("**Top 10 langues:**") fig = px.bar(df_q1_agg_lang.sort_values("numArticles", ascending=False)[:10],
'SP': 'ESP' }) campaigns_df = df[[ 'Country_Codes', 'AcceptedCmp1', 'AcceptedCmp2', 'AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'Response' ]].melt(id_vars='Country_Codes', var_name='Campaign', value_name='% accepted') campaigns_df = pd.DataFrame( campaigns_df.groupby(['Country_Codes', 'Campaign'])['% accepted'].mean() * 100).reset_index(drop=False) campaigns_map = px.choropleth( campaigns_df, locationmode='ISO-3', color='% accepted', facet_col='Campaign', facet_col_wrap=3, projection='natural earth', locations='Country_Codes', title='Marketing Campaign Percentage Success Rate by Country') campaigns_map.write_image(savepath + 'marketing_campaign_success_by_country.png', width=700, height=500, scale=3) campaigns_map.show() # plot illustrating relation between income and spending # 'range_x' upper limit set to 200000 to leave out outliers that detrimentally affect shape of graph fig = px.scatter( data_frame=df,
def get_plot(self, parameters): qe = QueryExecutor() query = self.query.format( start=parameters['range'].value[0], end=parameters['range'].value[1], ) df = qe.get_result_dataframe(query) countries_to_leave = Utils().get_valid_fips_countries(25000) df = df[(df['ActorGeo'].isin(countries_to_leave))] multi_index = pd.MultiIndex.from_product( [ df['ActorGeo'].unique(), df['Type1'].unique(), df['Type2'].unique() ], names=['ActorGeo', 'Type1', 'Type2']) df.index = pd.MultiIndex.from_arrays( [df['ActorGeo'], df['Type1'], df['Type2']], names=['ActorGeo', 'Type1', 'Type2']) df.drop(['ActorGeo', 'Type1', 'Type2'], axis=1, inplace=True) df = df.reindex(multi_index).reset_index() df.fillna(0., inplace=True) df.sort_values(['ActorGeo', 'Type1', 'Type2'], inplace=True) df.index = np.arange(df.shape[0]) df = df[(df.Type1 != 0.0) & df.Type2 != 0.0] types_no = np.unique(df.groupby('ActorGeo')['AvgTone'].count())[0] data = df['AvgTone'].values.reshape((-1, types_no)) labels = df['ActorGeo'].unique() norm_data = (data - np.mean(data, axis=1)[:, np.newaxis]) / np.std( data, axis=1)[:, np.newaxis] n_clusters = parameters['n_clusters'].value if parameters['method'].value == 'agglomerative': model = AgglomerativeClustering( n_clusters=n_clusters, affinity='precomputed', linkage='complete', compute_full_tree=True, ) elif parameters['method'].value == 'britch': model = Birch(branching_factor=5, n_clusters=n_clusters) elif parameters['method'].value == 'kmeans': model = KMeans(n_clusters=n_clusters) elif parameters['method'].value == 'affinity_prop': model = AffinityPropagation() clusters = model.fit_predict(norm_data) cluster_df = pd.DataFrame({ 'country_id': labels, 'cluster_id': clusters }) cluster_df = cluster_df.join( Utils().get_fips_iso_mapping(), on=['country_id'], how='right', ).fillna(-1) cluster_df.rename({'ISO': 'country_iso'}, axis=1, inplace=True) cluster_df['country_name'] = cluster_df['country_id'].map( Utils().get_fips_country_id_to_name_mapping()) # Uncomment to write result to csv # cluster_df.groupby('cluster_id')['country_name'].apply(lambda countries: '; '.join(countries)).to_csv('clustering_result.csv') fig = px.choropleth( cluster_df, locations='country_iso', locationmode='ISO-3', color='cluster_id', hover_name='country_name', hover_data=['cluster_id'], labels={ 'country_name': 'Country Name', 'cluster_id': 'Cluster ID' }, color_continuous_scale=px.colors.qualitative.Alphabet, ) return plot(fig, include_plotlyjs=True, output_type='div')
def create_real_map(mask=False): if mask is False: fig = go.Figure(data=go.Choropleth( locations=df_map['abbr'], z=df_map['case'], text=df_map['text'], # hover info locationmode='USA-states', colorscale='Reds', autocolorscale=False, marker_line_color='white', # line markers between states # colorbar_title="Daily Cases per 1M", colorbar=dict(title='Daily Cases per 1M', thickness=10, len=0.5), hovertemplate='%{text}' + '<extra></extra>', )) else: color_list = [px.colors.qualitative.Vivid[i] for i in [3, 2, 10]] # [3, 2, 10, 6] mask_status_list = ['Mandatory', 'Sometimes Required', 'Not Required'] color_dict = {p: c for c, p in zip(color_list, mask_status_list)} fig = px.choropleth( data_frame=df_map, locations=df_map['abbr'], color=df_map['mask'], scope="usa", locationmode="USA-states", color_discrete_map=color_dict, # hover_data=['text_phase'], hover_data={ 'abbr': False, 'state': True, 'mask': True, }, labels={'mask': 'Masks'}) fig.update_traces(marker_line_width=0.2, marker_opacity=0.9) fig.update_layout( title={ 'text': '', # f'Latest Daily Information({current_date})' # "yref": "paper", 'y': 0.98, 'x': 0.01, }, geo=dict( scope='usa', projection=go.layout.geo.Projection(type='albers usa'), showlakes=True, # lakes lakecolor='rgb(255, 255, 255)'), # paper_bgcolor='#F7FBFE', # canvas color # plot_bgcolor='#F7FBFE', # plot color #D8D991 #F6EEDF #FFF8DE # hoverlabel={'namelength': -1}, # autosize=False, height=700, # autosize = True, margin={ # 'autoexpand': True, 'l': 0, 'r': 0, 't': 10, # 20 'b': 10, }, ) return fig
#definition of the diferent graphs that will be plot in the dashboard page fig1 = px.histogram( dg, x="female_percent", color="media_type", ) fig2 = px.histogram( dg, x="male_percent", color="media_type", ) fig3 = px.bar(dg, x="channel_name", y=["female_percent", "male_percent", "music_percent"]) fig4 = px.choropleth(df_zone, geojson=sf) fig4.update_geos(fitbounds="locations", visible=False) #creation of the different layout of the page with sliders,dropdowns and graphs app.layout = html.Div( style={'textAlign': 'center'}, children=[ navbar, html.Label('Année'), dcc.Slider( id='year-slider', min=2001, max=2018, step=1, value=Year, marks={str(year): str(year)
fig = px.choropleth( df, locations="cartodb_id", geojson=mp, featureidkey="properties.cartodb_id", color="category", color_discrete_map={ '0': '#fffcfc', '1 - 1,000': '#ffdbdb', '1,001 - 5,000': '#ffbaba', '5,001 - 10,000': '#ff9e9e', '10,001 - 30,000': '#ff7373', '30,001 - 50,000': '#ff4d4d', '50,001 and higher': '#ff0d0d' }, category_orders={ 'category': [ '0', '1 - 1,000', '1,001 - 5,000', '5,001 - 10,000', '10,001 - 30,000', '30,001 - 50,000', '50,001 and higher' ] }, animation_frame="timeframe", scope='north america', title='<b>COVID-19 cases in Canadian provinces</b>', labels={ 'cases': 'Number of Cases', 'category': 'Category' }, hover_name='province', hover_data={ 'cases': True, 'cartodb_id': False }, # height=900, locationmode='geojson-id', )
df2.groupby(['#date'])['Infected person(per day)'].sum()) df2['Infected person(per day)'] = df2['Infected person(per day)'].diff() df2.dropna(subset=['Infected person(per day)'], inplace=True) df2["Date"] = df2.index.map(f_strftime) df2["Country"] = country_list[i] df2["iso_alpha"] = dict_country[country_list[i]] df2.reset_index(inplace=True, drop=True) dataframe_list.append(df2) # すべてのデータフレームを縦に繋ぐ df_all = pd.concat(dataframe_list, axis=0) # 感染者数を対数変換する df_all["log(Infected person(per day))"]=\ np.log10(df_all["Infected person(per day)"]+1.0e-10) fig = px.choropleth(df_all, locations="iso_alpha", color="log(Infected person(per day))",\ hover_name="Country", animation_frame="Date", range_color=[0, 4], width=1000, height=800) app = dash.Dash( __name__, external_stylesheets=["https://codepen.io/chriddyp/pen/bWLwgP.css"]) app.layout = html.Div([ html.H1("COVID-19:Visualization of the number of cases by Plotly Express"), dcc.Graph(figure=fig) ]) if __name__ == '__main__': app.run_server(debug=True)
# In[214]: # In[215]: import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib.cm import rainbow import plotly_express as px # In[217]: fig = px.choropleth(dh_df2, locations='Code', color='Cardiovascular diseases', scope='world', color_continuous_scale=px.colors.sequential.GnBu, range_color=(10, 50), title='Cardiovascular diseases', height=700, animation_frame='Year') fig.show() sns.boxplot(dh_df2['Cardiovascular diseases']) # **Above we introduce new libraries and functions and map the heart diesease chart according to year and color rate** # **Below we used same functions but on Mental disorders** # In[221]: fig = px.choropleth(dh_df2, locations='Code',