import dash_core_components as dcc import dash_html_components as html from urllib.request import urlopen import json df = pd.read_csv('language_data.csv') # Geojson (the country borders for the choropleth map) with urlopen( 'https://raw.githubusercontent.com/datasets/geo-countries/master/data/countries.geojson' ) as response: geojson_countries = json.load(response) app = dash.Dash(__name__) server = app.server px.set_mapbox_access_token(open(".mapbox_token.txt").read()) df['Value'] = 'Not Spoken' languages_list = [{ 'label': 'English', 'value': 'English' }, { 'label': 'French', 'value': 'French' }, { 'label': 'Russian', 'value': 'Russian' }, { 'label': 'Albanian', 'value': 'Albanian' }, {
import plotly.graph_objs as go import json import scipy.optimize as optim from fbprophet import Prophet from app import app def func_logistic(t, a, b, c): return c / (1 + a * np.exp(-b * t)) # load data and geojson df = pd.read_csv('apps/data/covid_19_india.csv') px.set_mapbox_access_token( "pk.eyJ1IjoiNDRoaW1hbnNodTQ0NCIsImEiOiJjazh5azlnNGgwMnJwM2xxcDJyNzFhMG05In0.srzRed7pv2bC2PSGEuRaNg" ) path = "apps/states2.json" f = open(path, "r") state_geo = json.loads(f.read()) states_list = [{'label': i, 'value': i} for i in df['states'].unique()] duration_list = [{ 'label': 'Tomorrow', 'value': 1 }, { 'label': 'Next week', 'value': 7 }, { 'label': 'Next month',
"#10523e", ] ADOPTION_RATES = [ 15, 25, 50 ] #change this based on LP output scenarios that will be displayed DEFAULT_OPACITY = 0.5 VALID_USERNAME_PASSWORD_PAIRS = get_logins( os.path.join(APP_PATH, os.path.join(".secret", "login_credentials.json"))) #set CSS style sheets styles = {'pre': {'border': 'thin lightgrey solid', 'overflowX': 'scroll'}} #mapbox variables mapbox_style = "mapbox://styles/butlerbt/ckhma6w7n12ic19pghqpyanfq" mapbox_access_token = "pk.eyJ1IjoiYnV0bGVyYnQiLCJhIjoiY2s2aDJqNzl2MDBqdDNqbWlzdWFqYjZnOCJ9.L4RJNdK2aqr6kHcHZxksXw" px.set_mapbox_access_token(mapbox_access_token) #login for use in development - to be removed when launched live auth = dash_auth.BasicAuth(app, VALID_USERNAME_PASSWORD_PAIRS) #html layout of app app.layout = html.Div(children=[ html.Div( id="root", children=[ html.Div( id="header", children=[ html.Img(id="logo", src=app.get_asset_url("rmi-logo.svg")), html.H4(children="Los Angeles EV TNC Modeling"), ],
import plotly.graph_objs as go import plotly.express as px import config, pipeline as pp #---------------------------------------------------------------------------------- client = boto3.client('s3', aws_access_key_id=os.environ['AWS_ID'], aws_secret_access_key=os.environ['AWS_KEY']) model_obj = client.get_object(Bucket="nyc-taxi-trip-processing-data", Key="data/model.pickle") model = pickle.loads(model_obj['Body'].read()) df_map_obj = client.get_object(Bucket="nyc-taxi-trip-processing-data", Key="data/taxi_trip_samples_2.csv") df_map = pd.read_csv(StringIO(df_map_obj['Body'].read().decode('utf-8'))) px.set_mapbox_access_token(os.environ["MAPBOX"]) fig = px.scatter_mapbox(df_map, lat="latitude", lon="longitude", color="type", opacity=0.7, size="dist_mile", size_max=12, zoom=10, color_continuous_scale=px.colors.cyclical.IceFire, animation_frame="pickup_hour") fig.update_layout(width=900, height=500, font_family="Verdana") graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder) #---------------------------------------------------------------------------------- # Flask instance app = Flask(__name__)
def updatemap(input_data, day): #print('INSIDE UPDATE MAP') px.set_mapbox_access_token( "pk.eyJ1Ijoic21taXNocmEiLCJhIjoiY2s1eGF0aGNtMWJvczNxbXg2dDJiaG1kaiJ9.UOxmGAsTrKH_m4wCZTgrNg" ) df = px.data.carshare() #print(df.head()) #print(df.dtypes) #print(dff.head()) #Size is number of offurences of the category of crime #Color is number of unique types of crimes #plotsize is a list filtered_df = dff[[ 'Category', 'PdDistrict', 'Resolution', 'Time', 'X', 'Y' ]][(dff['DayOfWeek'].isin(day) & dff['Category'].isin(input_data))] #Number the crime crime_dict = {} i = 0 for crime in filtered_df['Category'].unique(): crime_dict[crime] = i i += 1 #crimeSize crimeSize = [] for index, row in filtered_df.iterrows(): crimeSize.append(crime_dict[row['Category']]) #print('Number of records ', filtered_df.shape[0]) #print(len(crimeSize)) #CrimeColor #At a particular latitude and longitude , highest number of crimes -> category crime_list = [] colorDict = {} crimeDict = {} x_list = [] y_list = [] crime_list = [] num_crime_list = [] count = 0 for index in filtered_df.index: x = float(filtered_df['X'][index]) y = float(filtered_df['Y'][index]) #Get X and Y and store in list #x_list.append(x) #y_list.append(y) crime = input_data[0] #location and highest number of crimes #tempCount = filtered_df[(filtered_df.X == x) & (filtered_df.Y == y)].shape[0] flag = 0 for i in input_data: tempCount = filtered_df[(filtered_df.X == x) & (filtered_df.Y == y) & (filtered_df.Category == i)].shape[0] if colorDict.get((x, y), 0) > tempCount: flag = 0 continue else: flag = 1 colorDict[(x, y)] = tempCount crime = i #num_crime_list.append(colorDict.get((x,y), 0)) #Location and highest crime category crimeDict[(x, y)] = crime #print('Appended' ,count, num_crime_list[count], crime_list[count]) #count += 1 #Getting X and Y xy_list = colorDict.keys() for i in xy_list: x_list.append(i[0]) y_list.append(i[1]) for (x, y) in colorDict: num_crime_list.append(colorDict[(x, y)]) for (x, y) in crimeDict: crime_list.append(crime_dict[crimeDict[(x, y)]]) #print('xlist', len(x_list)) #print('ylist', len(y_list)) #print('Num crime',len(num_crime_list)) #print('crime_list',len(crime_list)) #print(len(crimeDict)) #Create a new data frame (X,Y,Crime,NumberofCrime) d = { 'X': x_list, 'Y': y_list, 'Crime': crime_list, 'NumOfCrime': num_crime_list } new_df = pd.DataFrame(data=d) #print(new_df.head()) #print(new_df.dtypes) fig = px.scatter_mapbox(new_df, lat="Y", lon="X", color="Crime", size="NumOfCrime", color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10) return fig
'r': 2, 't': 5, 'b': 5 }) #### Animated Map #### #get mapbox API key for plotting path = '/Users/mitchellkrieger/.secret/mapbox_api.json' with open(path) as f: api = json.load(f) api_key = api['api_token'] px.set_mapbox_access_token(api_key) #plot animap = px.scatter_mapbox(animation_data, lat="_lat", lon="_long", animation_frame='dt', animation_group='id', color="percent_full", size="avail_bikes", color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10, width=600, height=650)
import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import plotly.express as px from emp.app import app from emp.data.datasets import earthquakes import emp.regressor as regressor logger = logging.getLogger(__name__) # Defaults are the center of the contiguous US default_latitude = 39.8283 default_longitude = -98.5795 px.set_mapbox_access_token(os.getenv('MAPBOX_ACCESS_TOKEN')) scattermap = px.scatter_mapbox( center={ 'lat': default_latitude, 'lon': default_longitude }, color=earthquakes.magnitude, data_frame=earthquakes, height=500, lat=earthquakes.latitude, lon=earthquakes.longitude, # Setting the size & opacity like this allows the larger earthquakes to stand out on the map opacity=earthquakes.magnitude / 10, size=earthquakes.magnitude ** 5, size_max=25, zoom=3) # Shrink the margins (defaults 80) on the top and bottom to 5px
import gc import requests import pandas as pd import flask import plotly.express as px from ncov19_dash.utils import STATES_COORD from ncov19_dash import config px.set_mapbox_access_token(config.MAPBOX_ACCESS_TOKEN) # TODO: Make Drive-thru testing center API def get_drive_thru_testing_centers(): try: drive_thru_df = pd.read_csv(config.DRIVE_THRU_URL) drive_thru_df["Street Address"] = \ drive_thru_df["Street Address"].fillna("") except ValueError as ex: print(f'[ERROR] get_drive_thru_testing_center error, {ex}') drive_thru_df = pd.DataFrame() return drive_thru_df ################################################################################ def confirmed_scatter_mapbox(state="United States"): """Displays choroplepth map for the data. For the whole US, the map is divided by state. :return card: A dash boostrap component Card object with a dash component
# coding: utf-8 # In[1]: import plotly.express as px import csv from datetime import datetime import pandas as pd px.set_mapbox_access_token(open(".\\mapbox_token").read()) # In[7]: data_ini = -30 data_fim = 0 data_aux = '1900-01-01 00:00:00' strings = [] with open('C:\\Users\\rique\\Desktop\\fires2.csv') as csvfile: readCSV = csv.reader(csvfile, delimiter=',') group = 0 for row in readCSV: if (row[4] == 'True'): row[0] = (float(row[0])**(1 / 3)) row[1] = float(row[1]) row[2] = float(row[2]) group += 1 str_date = row[5] data_aux = datetime.strptime(str_date, '%Y-%m-%d').date() row.append(0) row.append(group) strings.append(row)
def time_series_map(self, start_date, end_date, kw_list, geo): if __name__ == "__main__": pytrend = Time_Series() ## Initial dates date1 = start_date # input start date date2 = end_date # input end date month_list = [ i.strftime("%Y-%m-%d") for i in pd.date_range(start=date1, end=date2, freq='MS') ] ## Pair months month_pair = list() for i, j in zip(month_list, month_list[1:]): month_pair.append(i + ' ' + j) ## Create and populate df in_df = pd.DataFrame(columns=['geoName']).set_index('geoName') for i in month_pair: self.build_payload(kw_list=kw_list, geo=geo, timeframe=i) df = self.interest_by_region(resolution='CITY', inc_low_vol=True, inc_geo_code=True) df['time'] = i in_df = in_df.append(df) if len(in_df) == 0: raise ValueError( f'not enough searches for keyword "{kw_list[0]}" in the {geo} market' ) ## Append lat & lng to lists lat = list() lng = list() for row in range(len(in_df['coordinates'])): lat.append(in_df['coordinates'].iloc[row]['lat']) lng.append(in_df['coordinates'].iloc[row]['lng']) ## Create region df from lists city_df = pd.DataFrame([lat, lng]).T city_df.columns = ['lat', 'long'] ## DF transformations in_df.reset_index(inplace=True) try: city_df['name'] = in_df['geoName'] except: in_df['geoName'] = in_df['index'] city_df['name'] = in_df['geoName'] city_df[in_df.columns[2]] = in_df[in_df.columns[2]] city_df[in_df.columns[3]] = in_df[in_df.columns[3]] city_df['time'] = in_df['time'] city_df[['start', 'finish']] = city_df['time'].str.split(' ', expand=True) ## Fetch OpenDataSoft API def get_city_opendata(city, country): tmp = 'https://public.opendatasoft.com/api/records/1.0/search/?' \ 'dataset=worldcitiespop&q=%s&sort=population&facet=country&refine.country=%s' cmd = tmp % (city, country) res = requests.get(cmd) dct = json.loads(res.content) out = dct['records'][0]['fields'] return out ## Calculate city pop if in_df['country'].iloc[0] == 'US': country_opendata = 'us' elif in_df['country'].iloc[0] == 'GB-ENG': country_opendata = 'gb' else: country_opendata = in_df['country'].iloc[0].lower() city_dict = dict() for city in set(city_df['name']): try: city_dict[city] = get_city_opendata( city, country_opendata)['population'] except: city_dict[city] = 5000 ## Map to city_df city_df['pop.'] = city_df['name'].map(city_dict) ## Preview DF city_df.replace(0, np.nan, inplace=True) pop_col = city_df.pop('pop.') city_df.insert(3, 'pop.', pop_col) ## Center of Map based on country if in_df['country'].iloc[0] == 'US': map_center = {'lat': 39.00, 'lon': -98.00} zoom = 3 elif in_df['country'].iloc[0] == 'GB-ENG': map_center = {'lat': 53.0, 'lon': -1.1743} zoom = 5.6 ## Plot average interest import plotly.express as px px.set_mapbox_access_token( 'pk.eyJ1IjoiY2hlZXNldWdseSIsImEiOiJja2JqZmR5YXIwb2hoMzBycDBiNHN1MWZrIn0.RNzuuAyOAtuPcwYIZRskEQ' ) fig = px.scatter_mapbox(city_df, lat = "lat", lon = "long", color = city_df[city_df.columns[5]], size = city_df['pop.'], color_continuous_scale = px.colors.sequential.Blues, size_max = 40, zoom = zoom, \ hover_name = city_df['name'], animation_frame="start", animation_group='name' , range_color=(0,100), \ center = map_center) fig.layout.updatemenus[0].buttons[0].args[1]["frame"]["duration"] = 700 fig.layout.updatemenus[0].buttons[0].args[1]["transition"][ "duration"] = 700 fig.layout.sliders[0].pad.t = 10 fig.layout.updatemenus[0].pad.t = 10 fig.update_layout(template="plotly_white", mapbox_style="dark", hovermode='closest', title=f"Average Interest per {geo} City", width=950, height=900, showlegend=False) fig.show()
- [email protected] """ #%% cd "~/Works/Python/graphics/Plotly" #%% import numpy as np import pandas as pd pd.set_option('display.max_columns', 150) pd.set_option('display.max_rows', 150) #%% import plotly.express as px px.set_mapbox_access_token('pk.eyJ1IjoibWFybGVuYWR1ZGEiLCJhIjoiY2tjYW1kY2g0MXU5ZjJzcXA0MWN4dWIwYSJ9.ZSaNpYw_pWxdaY9gMNNTpQ') import plotly.io as pio pio.renderers #%% us_cities = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/us-cities-top-1k.csv") us_cities = us_cities.query("State in ['New York', 'Ohio']") us_cities.head() #%% using Plotly Express fig = px.line_mapbox(us_cities, lat="lat", lon="lon", color="State", zoom=3, height=500) fig.update_layout(mapbox_style="stamen-terrain", mapbox_zoom=4, mapbox_center_lat = 41, margin={"r":0,"t":0,"l":0,"b":0})
def interest_by_city(self, data, geo_location): temp_df = data.copy() ## Append lat & lng to lists lat = list() lng = list() for row in range(len(temp_df['coordinates'])): lat.append(temp_df['coordinates'].iloc[row]['lat']) lng.append(temp_df['coordinates'].iloc[row]['lng']) ## Create region df from lists city_df = pd.DataFrame([lat, lng]).T city_df.columns = ['lat', 'long'] ## Transform original DF temp_df.reset_index(inplace=True) city_df['name'] = temp_df['geoName'] city_df[temp_df.columns[2]] = temp_df[temp_df.columns[2]] try: city_df[temp_df.columns[3]] = temp_df[temp_df.columns[3]] except: pass try: city_df[temp_df.columns[4]] = temp_df[temp_df.columns[4]] except: pass try: city_df[temp_df.columns[5]] = temp_df[temp_df.columns[5]] except: pass try: city_df[temp_df.columns[6]] = temp_df[temp_df.columns[6]] except: pass try: city_df[temp_df.columns[7]] = temp_df[temp_df.columns[7]] except: pass ## Fetch OpenDataSoft API import requests import json def get_city_opendata(city, country): tmp = 'https://public.opendatasoft.com/api/records/1.0/search/?' \ 'dataset=worldcitiespop&q=%s&sort=population&facet=country&refine.country=%s' cmd = tmp % (city, country) res = requests.get(cmd) dct = json.loads(res.content) out = dct['records'][0]['fields'] return out ## Calculate city pop if temp_df['country'].iloc[0] == 'US': country_opendata = 'us' elif temp_df['country'].iloc[0] == 'GB-ENG': country_opendata = 'gb' else: country_opendata = temp_df['country'].iloc[0].lower() city_dict = dict() for city in set(city_df['name']): try: city_dict[city] = get_city_opendata( city, country_opendata)['population'] except: city_dict[city] = 5000 ## Map to city_df city_df['pop.'] = city_df['name'].map(city_dict) ## Preview DF city_df.replace(0, np.nan, inplace=True) pop_col = city_df.pop('pop.') city_df.insert(3, 'pop.', pop_col) ## Center of Map based on country if temp_df['country'].iloc[0] == 'US': map_center = {'lat': 39.00, 'lon': -98.00} zoom = 3 elif temp_df['country'].iloc[0] == 'GB-ENG': map_center = {'lat': 53.0, 'lon': -1.1743} zoom = 5.6 ## Plot average interest import plotly.express as px px.set_mapbox_access_token( 'pk.eyJ1IjoiY2hlZXNldWdseSIsImEiOiJja2JqZmR5YXIwb2hoMzBycDBiNHN1MWZrIn0.RNzuuAyOAtuPcwYIZRskEQ' ) fig = px.scatter_mapbox(city_df, lat = "lat", lon = "long", color = city_df.columns[5], size = 'pop.', color_continuous_scale = px.colors.sequential.Reds, size_max = 40, zoom = zoom, \ hover_name = city_df['name'], range_color=(0,100), \ center = map_center) try: fig2 = px.scatter_mapbox(city_df, lat = "lat", lon = "long", color = city_df.columns[6], size = 'pop.', color_continuous_scale = px.colors.sequential.Greens, size_max = 40, zoom = 3, \ hover_name = city_df['name'], range_color=(0, 100)) fig.add_trace(fig2.data[0]) except: pass try: fig3 = px.scatter_mapbox(city_df, lat = "lat", lon = "long", color = city_df.columns[7], size = 'pop.', color_continuous_scale = px.colors.sequential.Greens, size_max = 40, zoom = 3, \ hover_name = city_df['name'], range_color=(0,100)) fig.add_trace(fig3.data[0]) except: pass try: fig4 = px.scatter_mapbox(city_df, lat = "lat", lon = "long", color = city_df.columns[8], size = 'pop.', color_continuous_scale = px.colors.sequential.Greens, size_max = 40, zoom = 3, \ hover_name = city_df['name'], range_color=(0,100)) fig.add_trace(fig4.data[0]) except: pass try: fig5 = px.scatter_mapbox(city_df, lat = "lat", lon = "long", color = city_df.columns[9], size = 'pop.', color_continuous_scale = px.colors.sequential.Greens, size_max = 40, zoom = 3, \ hover_name = city_df['name'], range_color=(0,100)) fig.add_trace(fig5.data[0]) except: pass ## Got it but wrong color fig.update_layout(template="plotly_white", mapbox_style="dark", hovermode='closest', title=f"Average Interest per {geo_location} City", width=950, height=900, showlegend=False, coloraxis_colorbar=dict(title="")) if len(city_df.columns[4:-1]) == 1: fig.show() elif len(city_df.columns[4:-1]) == 2: fig.update_layout(updatemenus=[ dict( type="buttons", direction="right", active=0, x=0.60, y=1.05, buttons=list([ dict(label=city_df.columns[5], method='update', args=[{ 'visible': [True, False] }]), dict(label=city_df.columns[6], method='update', args=[{ 'visible': [False, True] }]), ]), # fonts and border bgcolor='white', bordercolor='lightgrey', font=dict(size=10)) ]) fig.show() elif len(city_df.columns[4:-1]) == 3: fig.update_layout(updatemenus=[ dict( type="buttons", direction="right", active=0, x=0.60, y=1.05, buttons=list([ dict(label=city_df.columns[5], method='update', args=[{ 'visible': [True, False, False] }]), dict(label=city_df.columns[6], method='update', args=[{ 'visible': [False, True, False] }]), dict(label=city_df.columns[7], method='update', args=[{ 'visible': [False, False, True] }]), ]), # fonts and border bgcolor='white', bordercolor='lightgrey', font=dict(size=10)) ]) fig.show() elif len(city_df.columns[4:-1]) == 4: fig.update_layout(updatemenus=[ dict( type="buttons", direction="right", active=0, x=0.60, y=1.05, buttons=list([ dict(label=city_df.columns[5], method='restyle', args=[{ 'visible': [True, False, False, False] }]), dict(label=city_df.columns[6], method='update', args=[{ 'visible': [False, True, False, False] }]), dict(label=city_df.columns[7], method='update', args=[{ 'visible': [False, False, True, False] }]), dict(label=city_df.columns[8], method='update', args=[{ 'visible': [False, False, False, True] }]), ]), # fonts and border bgcolor='white', bordercolor='lightgrey', font=dict(size=10)) ]) fig.show() elif len(city_df.columns[4:-1]) == 5: fig.update_layout(updatemenus=[ dict( type="buttons", direction="right", active=0, x=1.00, y=1.05, buttons=list([ dict(label=city_df.columns[5], method='update', args=[{ 'visible': [True, False, False, False, False] }]), dict(label=city_df.columns[6], method='update', args=[{ 'visible': [False, True, False, False, False] }]), dict(label=city_df.columns[7], method='update', args=[{ 'visible': [False, False, True, False, False] }]), dict(label=city_df.columns[8], method='update', args=[{ 'visible': [False, False, False, True, False] }]), dict(label=city_df.columns[9], method='update', args=[{ 'visible': [False, False, False, False, True] }]), # fonts and border ]), # fonts and border bgcolor='white', bordercolor='lightgrey', font=dict(size=10)) ]) fig.show()
import dash import dash_core_components as dcc import dash_html_components as html import dash_table import pandas as pd import plotly.express as px from dash.dependencies import Input, Output df = pd.read_csv("data/kitakyushu_hinanjo.csv", encoding="shift-jis") # mapboxのアクセストークンを読み込む px.set_mapbox_access_token("< your-token >") app = dash.Dash(__name__) app.layout = html.Div([ html.Div( [ html.H1("北九州避難所マップ", style={"textAlign": "center"}), # ➊ テーブルの作成 dash_table.DataTable( id="kitakyushu-datatable", style_cell={ "textAlign": "center", "maxWidth": "80px", "whiteSpace": "normal", "minWidth": "80px", }, fixed_rows={ "headers": True, "data": 0 },
excesos_velocidad = excesos_velocidad0 #excesos_velocidad = alarmas_anomalas[alarmas_anomalas["EventName"] == 'Excesos de velocidad'] excesos_velocidad["month"].unique() coordenadas_velocidad = excesos_velocidad.groupby( ["longitude", "latitude"])["idCompany"].count().sort_values(ascending=False) coordenadas_velocidad = coordenadas_velocidad.reset_index().rename( {"idCompany": "count"}, axis=1) ################################################### # Generating the map figure ################################################### px.set_mapbox_access_token( "pk.eyJ1IjoiZHM0YXRlYW00NCIsImEiOiJja2d5ZjlkdTcwMXUwMnJybmJpNWpkZzZiIn0.NxhXaQUcuSz6VbMEFdR58A" ) fig_map = px.scatter_mapbox(coordenadas_velocidad, lat="latitude", lon="longitude", color="count", size="count", color_continuous_scale=px.colors.cyclical.Edge, title="Speeding Alarm Locations", size_max=12, zoom=4.5) ################################################### # Generating the speeding locations map predicted ###################################################
import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import numpy as np import pandas as pd from datetime import datetime as dt import plotly.express as px px.set_mapbox_access_token( r'pk.eyJ1IjoiZGF2ZTcwNTUiLCJhIjoiY2s1cGw2eDE5MDJ0ZjNucW9saWxvcXNxbCJ9.M4GmLRvRQ-ehFt4G7TZNYw' ) username_passwords = [['admin', 'password']] # Load and process data external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] risk_data = pd.read_pickle('risk_data.pickle') # Build Dashboard app = dash.Dash(__name__, external_stylesheets=external_stylesheets) server = app.server # Defining Dash Core Components placetypes = dcc.Dropdown(id='placetypes', options=[{ 'label': place_type, 'value': place_type
def draw_observation(data, date_obj, map_region): """ Draw observation map with plotly """ # set mapbox token px.set_mapbox_access_token(CONFIG.CONFIG['MAPBOX']['token']) # create figures map_center = { 'lat': (map_region[2] + map_region[3]) * 0.5, 'lon': (map_region[0] + map_region[1]) * 0.5 } figs = collections.OrderedDict() # draw precipitation bins = [0.1, 10, 25, 50, 100, 250, 1200] keys = ['0.1~10', '10~25', '25~50', '50~100', '100~250', '>=250'] cols = [ 'lightgreen', 'yellow', 'lightskyblue', 'blue', 'magenta', 'maroon' ] cols_map = dict(zip(keys, cols)) data['rain'] = pd.cut(data['PRE_Time_0808'], bins=bins, labels=keys) data['Rainfall'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \ data['PRE_Time_0808'].astype(str) data['rain_size'] = data['PRE_Time_0808'] + data['PRE_Time_0808'].mean() df = data[data['rain'].notna()] if df.shape[0] >= 2: figs['Rainfall'] = px.scatter_mapbox( df, lat="Lat", lon="Lon", color="rain", category_orders={'rain': keys}, color_discrete_map=cols_map, hover_data={ 'Rainfall': True, 'Lon': False, 'Lat': False, 'rain': False, 'rain_size': False }, mapbox_style='satellite-streets', size="rain_size", center=map_center, size_max=10, zoom=4, title='Accumulated precipitation ({})'.format( date_obj.strftime("%Y%m%d 08-08")), width=900, height=700) # draw maximum temperature bins = [35, 37, 40, 60] keys = ['35~37', '37~40', '>=40'] cols = ['rgb(255,191,187)', 'rgb(250,89,0)', 'rgb(230,0,8)'] cols_map = dict(zip(keys, cols)) data['max_temp_warning'] = pd.cut(data['TEM_Max'], bins=bins, labels=keys) data['max_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \ data['TEM_Max'].astype(str) df = data[data['max_temp_warning'].notna()] if df.shape[0] >= 2: figs['Max_temperature'] = px.scatter_mapbox( df, lat="Lat", lon="Lon", color="max_temp_warning", category_orders={'max_temp_warning': keys}, color_discrete_map=cols_map, hover_data={ 'max_temp': True, 'Lon': False, 'Lat': False, 'max_temp_warning': False, 'TEM_Max': False }, mapbox_style='satellite-streets', size="TEM_Max", center=map_center, size_max=10, zoom=4, title='Maximum temperature ({})'.format( date_obj.strftime("%Y%m%d 08-08")), width=900, height=700) # draw minimum temperature bins = [-120, -40, -30, -20, -10, 0] keys = ['<=-40', '-40~-30', '-30~-20', '-20~-10', '-10~0'] cols = [ 'rgb(178,1,223)', 'rgb(8,7,249)', 'rgb(5,71,162)', 'rgb(5,109,250)', 'rgb(111,176,248)' ] cols_map = dict(zip(keys, cols)) data['min_temp_warning'] = pd.cut(data['TEM_Min'], bins=bins, labels=keys) data['min_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \ data['TEM_Min'].astype(str) df = data[data['min_temp_warning'].notna()] if df.shape[0] >= 2: figs['Min_temprature'] = px.scatter_mapbox( df, lat="Lat", lon="Lon", color="min_temp_warning", category_orders={'min_temp_warning': keys}, color_discrete_map=cols_map, hover_data={ 'min_temp': True, 'Lon': False, 'Lat': False, 'min_temp_warning': False, 'TEM_Min': False }, mapbox_style='satellite-streets', size=-1.0 * df["TEM_Min"], center=map_center, size_max=10, zoom=4, title='Minimum temperature ({})'.format( date_obj.strftime("%Y%m%d 08-08")), width=900, height=700) # draw low visibility data['VIS_Min'] /= 1000.0 bins = [0, 0.05, 0.2, 0.5, 1] keys = ['<=0.05', '0.05~0.2', '0.2~0.5', '0.5~1'] cols = [ 'rgb(0,82,77)', 'rgb(0,153,160)', 'rgb(0,210,204)', 'rgb(95,255,252)' ] cols_map = dict(zip(keys, cols)) data['min_vis_warning'] = pd.cut(data['VIS_Min'], bins=bins, labels=keys) data['VIS_Min_size'] = 2.0 - data["VIS_Min"] data['min_vis'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \ data['VIS_Min'].astype(str) df = data[data['min_vis_warning'].notna()] if df.shape[0] >= 2: figs['Low_visibility'] = px.scatter_mapbox( df, lat="Lat", lon="Lon", color="min_vis_warning", category_orders={'min_vis_warning': keys}, color_discrete_map=cols_map, hover_data={ 'min_vis': True, 'Lon': False, 'Lat': False, 'min_vis_warning': False, 'VIS_Min_size': False }, mapbox_style='satellite-streets', size="VIS_Min_size", center=map_center, size_max=10, zoom=4, title='Low visibility ({})'.format( date_obj.strftime("%Y%m%d 08-08")), width=900, height=700) # draw high wind bins = [10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 32.7, 37.0, 120] keys = [ '10.8~13.8', '13.9~17.1', '17.2~20.7', '20.8~24.4', '24.5~28.4', '28.5~32.6', '32.7~36.9', '>=37.0' ] cols = [ 'rgb(0,210,244)', 'rgb(0,125,255)', 'rgb(253,255,0)', 'rgb(247,213,0)', 'rgb(255,141,0)', 'rgb(251,89,91)', 'rgb(255,3,0)', 'rgb(178,1,223)' ] cols_map = dict(zip(keys, cols)) data['max_win_warning'] = pd.cut(data['WIN_S_Max'], bins=bins, labels=keys) data['max_win'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \ data['WIN_S_Max'].astype(str) df = data[data['max_win_warning'].notna()] if df.shape[0] >= 2: figs['High_wind'] = px.scatter_mapbox( df, lat="Lat", lon="Lon", color="max_win_warning", category_orders={'max_win_warning': keys}, color_discrete_map=cols_map, hover_data={ 'max_win': True, 'Lon': False, 'Lat': False, 'max_win_warning': False, 'WIN_S_Max': False }, mapbox_style='satellite-streets', size="WIN_S_Max", center=map_center, size_max=10, zoom=4, title='Maximum wind speed ({})'.format( date_obj.strftime("%Y%m%d 08-08")), width=900, height=700) return figs
dbc.CardHeader('Sunburst Graph of University Features'), dbc.CardBody(dcc.Graph(id='sunburst', figure=sunburst, responsive=True)) ]) # heat map heatmap = px.imshow(df1[available_indicators].corr(), color_continuous_scale='RdYlBu_r') heatmap_graph = dbc.Card([ dbc.CardHeader('Correlation among variables'), dbc.CardBody(dcc.Graph(id='heatmap', figure=heatmap, responsive=True)) ]) #map # map px.set_mapbox_access_token( 'pk.eyJ1IjoieXV4dWFuMDgzMCIsImEiOiJja2ZjcmJoeGMxaWR1MnhycXhiejRweHk3In0.EeR8LofkaGjZqQ3itwaWyA' ) usamap = px.scatter_mapbox(df1, lat="lat", lon="lng", hover_name="institution", animation_frame='Control', color='Plan', zoom=10, color_discrete_sequence=px.colors.qualitative.T10) usamap.update_layout(geo_scope='usa', mapbox_zoom=3, mapbox_center={ "lat": df1['lat'].mean(), "lon": df1['lng'].mean()
def app(): px.set_mapbox_access_token("pk.eyJ1Ijoib2VuYWNoZSIsImEiOiJjazM2NWVwcmUxZnc3M2JvcXVvbjJiN2dpIn0.WZidyL9W3mlaLbM0TvAVXQ") # Methods to load and change data @st.cache() def load_datasets(): """ Loads dataset of US trial information only; this is a portion of the global trial information, with additional location information added using `geopy`. Returns: - all_us_data (pd.DataFrame): information for of all ongoing COVID19 trials in the US """ # All US clinical trials data_df = pd.read_csv("https://media.githubusercontent.com/media/oena/bios823_final_project/master/dashboard/dashboard_data/cleaned_us_covid_studies_with_geo_092020.tsv", sep="\t") all_us_data = data_df.dropna() return all_us_data @st.cache() def load_filtering_options(all_us_data): """ Loads filtering options for sidebar based on current dataset being used. Returns: - filtering_options (dict): contains filter options as keys and available choices as values. """ filter_options = {} # states represented states_list = list(all_us_data["Location_City_or_State"].unique()) states_list.sort() states_list.insert(0, "All available states") filter_options["by_state"] = states_list # Phase phase_list = list(all_us_data["Phases"].unique()) phase_list.sort() phase_list.insert(0, "All phases") filter_options["by_phase"] = phase_list # interventions represented intervention_type_list = list(all_us_data["Intervention Type"].unique()) intervention_type_list.sort() intervention_type_list.insert(0, "All available interventions") filter_options["by_intervention_type"] = intervention_type_list # drugs represented drug_list = [i.split(": ")[1] for i in list(all_us_data["Interventions"].unique()) if i.split(": ")[0] in ["DRUG", "BIOLOGICAL"]] drug_list.sort() drug_list.insert(0, "All available drugs & biologics") filter_options["by_drug"] = drug_list return filter_options def filter_dataset(all_us_data, drug_selection, map_display, output_type): """ Filters dataset of US trials by choices selected Parameters: - all_us_data (pd.DataFrame): information for of all ongoing COVID19 trials in the US (possibly filtered) - drug_selection (str): specific drug or biologic selected by user - map_display (str): user selection for what information to display on US map - output_type (str; must be "data" or "count"): what type of output to return Returns: - """ # Filter if needed if drug_selection != "All available drugs & biologics": all_us_data = all_us_data[all_us_data["Drug Type"] == drug_selection] print(all_us_data.shape) if output_type == "data": return all_us_data else: # Count of clinical trials by institution, for map if map_display == "Number of ongoing trials": out_df = pd.DataFrame(all_us_data.groupby(["Location_Institution", "lat", "lon"])["NCT Number"].nunique()) out_df.reset_index(inplace=True) out_df.columns = ['Institution', 'Latitude', 'Longitude', "Number of ongoing trials"] elif map_display == "Number of interventions": out_df = pd.DataFrame(all_us_data.groupby(["Location_Institution", "lat", "lon"])["Interventions"].nunique()) out_df.reset_index(inplace=True) out_df.columns = ['Institution', 'Latitude', 'Longitude', "Number of interventions"] elif map_display == "Trial enrollment status": out_df = pd.DataFrame(all_us_data.groupby(["Location_Institution", "lat", "lon", "Enrollment"])["Status"].agg(["unique"])) out_df.reset_index(inplace=True) out_df.columns = ['Institution', 'Latitude', 'Longitude', "Enrollment", "Trial enrollment status"] out_df["Trial enrollment status"] = [i[0] for i in out_df["Trial enrollment status"].values] return out_df def download_link(object_to_download, download_filename, download_link_text): """ Generates a link to download the given object_to_download. Parameters: - object_to_download (str, pd.DataFrame): The object to be downloaded. - download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt - download_link_text (str): Text to display for download link. Returns: - A download link for object specified. """ if isinstance(object_to_download,pd.DataFrame): object_to_download = object_to_download.to_csv(index=False) # some strings <-> bytes conversions necessary here b64 = base64.b64encode(object_to_download.encode()).decode() return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>' # Load data initially data_load_state = st.text("Loading data...") us_study_data = load_datasets() filter_options = load_filtering_options(us_study_data) data_load_state.text("") # Page title st.title("What kinds of COVID-19 trials are happening in the US?") st.header("Explore ongoing clinical trial efforts in US") with st.beta_expander("Click here to expand more details about this page"): st.markdown("This page displays some practical information about trials currently happening in the United States. Feel free to filter information on location, trial phase, and other features relevant to you; when you're done (and if you'd like to follow up on any trials), you can download your results to csv.") st.subheader("Some potential caveats to note:") st.markdown("- Adding geospatial data to some study locations was not possible in an automated fashion, " "so they are not included in the plot below. \n " "- Geospatial data was added automatically by institute (mostly hospital) location name using `geopy`, " "so it may not be accurate for all locations.") # Sidebar to switch between study locations and latest covid rates st.sidebar.subheader("Filter trial information:") state_value = st.sidebar.selectbox("Filter trials by state:", filter_options["by_state"]) if state_value != "All available states": us_study_data = us_study_data[us_study_data["Location_City_or_State"] == state_value] filter_options = load_filtering_options(us_study_data) phase_value = st.sidebar.selectbox("Filter trials by phase:", filter_options["by_phase"]) if phase_value != "All phases": us_study_data = us_study_data[us_study_data["Phases"] == phase_value] filter_options = load_filtering_options(us_study_data) intervention_type = st.sidebar.selectbox("Find trials by intervention type:", filter_options["by_intervention_type"]) if intervention_type != "All available interventions": us_study_data = us_study_data[us_study_data["Intervention Type"] == intervention_type] filter_options = load_filtering_options(us_study_data) intervention = st.sidebar.selectbox("Find trials by drugs/biologics being studied: ", filter_options["by_drug"]) st.sidebar.write("Note. A biologic (aka biological) is a drug made from living organisms (or components thereof).") show_data_table = st.sidebar.checkbox("Show study information fulfilling above criteria") # Main plot with st.beta_container(): # Map title st.subheader("Count of ongoing COVID clinical trials by institute") # Map layout c1, c2 = st.beta_columns([8,2]) # Select box for map -- changes displayed colors on map c2.markdown("**Change map display:**") radio_display = c2.radio("Choose one of:", options=["Number of ongoing trials", "Number of interventions", "Trial enrollment status"]) # Plot map filtered_count_df = filter_dataset(us_study_data, intervention, radio_display, "count") # Color palette type needs to change depending on what's displayed if radio_display == "Trial enrollment status": count_map = px.scatter_mapbox(filtered_count_df, lat="Latitude", lon="Longitude", color=radio_display, size="Enrollment", hover_name="Institution", mapbox_style="light", center={"lat": 38, "lon": -95}, zoom=2) else: count_map = px.scatter_mapbox(filtered_count_df, lat="Latitude", lon="Longitude", color=radio_display, size=radio_display, #color_discrete_sequence=color_palette, hover_name="Institution", mapbox_style="light", center={"lat": 38, "lon": -95}, zoom=2) count_map.update_layout(width=1100, height=600) c1.plotly_chart(count_map, use_container_width=True) # Data table of studies if show_data_table: with st.beta_container(): st.subheader("Summary table of key trial information") cols_to_keep = ["NCT Number", "Title", "Phases", "Status", "Enrollment", "Location_City_or_State", "Location_Institution", "Address", "URL"] filtered_data = filter_dataset(us_study_data, intervention, radio_display, "data") filtered_data_display = filtered_data[cols_to_keep].drop_duplicates() st.write(filtered_data_display) if st.button('Download Dataframe as CSV'): tmp_download_link = download_link(filtered_data_display, 'covid_trials_information.csv', 'Click here to download your data!') st.markdown(tmp_download_link, unsafe_allow_html=True)
""" import dash import dash_core_components as dcc import dash_html_components as html import pandas as pd import plotly.graph_objs as go from plotly.subplots import make_subplots #from dash.dependencies import Input, Output import plotly.express as px #import base64 import json #from textwrap import dedent as d px.set_mapbox_access_token( 'pk.eyJ1IjoiZXdpbGxpYW1zMjAyMCIsImEiOiJja2FpdTIxOXMwM2wzMnFtbmVmb3IzZDJ6In0.TVsQ-iu8bN4PQLkBCr6tQQ' ) external_stylesheets = [ 'https://codepen.io/chriddyp/pen/bWLwgP.css', 'https://raw.githubusercontent.com/elimywilliams/sc_covid19/master/header2.css', 'https://github.com/plotly/dash-app-stylesheets/blob/master/dash-oil-and-gas.css' ] #from PIL import Image #image_filename = 'http://www.southerncrossinc.com/wp-content/uploads/2019/02/SC-logo-website.png' #image = Image.open(image_filename) #image.show() #from PIL import Image #import requests
conn = cx_Oracle.connect(user=config.DATABASE_5['user'], password=config.DATABASE_5['password'], dsn=dsn_tns) elif dbname == None: raise ValueError("Couldn't not find DB with given name") return conn # Get some example data (a list of dicts with {name: country name, latlng: position tuple, ...}) marker_data = requests.get( "https://gist.githubusercontent.com/erdem/8c7d26765831d0f9a8c62f02782ae00d/raw" "/248037cd701af0a4957cce340dabb0fd04e38f4c/countries.json").json() px.set_mapbox_access_token( "pk.eyJ1Ijoibml0aW5rdW1hcjEyMzQiLCJhIjoiY2tkaWQxdjBxMDNzMDJ6bzBnNDB4aTkyZyJ9.9NmlWUlLadztPucWTBSRzA" ) def fetch_data(q, environ): conn = connect_db(environ) cur = conn.cursor() cur.execute( '''alter session set nls_date_format = 'YYYY/MM/DD HH24:MI:SS' ''') result = pd.read_sql(sql=q, con=conn) cur.close() conn.close() return result def get_results(environ):
import os import pandas as pd import plotly.graph_objs as go import cufflinks import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output import plotly.express as px import colorcet as cl px.set_mapbox_access_token(os.environ.get('TOKEN')) af = pd.read_csv('data/africa.csv') df = pd.read_csv('data/latest.csv') df.rename(columns={ 'Country_Region':'Country', 'Long_': 'Long', 'Case_Fatality_Ratio': 'C.F.R' }, inplace=True) df.drop([ 'FIPS','Admin2','Combined_Key','Province_State' ], axis=1, inplace=True) y = list(af['Country']) df = df[df['Country'].isin(y)] small = df.nsmallest(10, columns='Recovered')
import pandas as pd import plotly.express as px from apis.utils.job_search.utils import params from config import Config px.set_mapbox_access_token(Config.mapbox_token) def time_series(df: pd.DataFrame, color: str): df = df.sort_values('Date', ascending=True).reset_index(drop=True) if color == 'All': df['count'] = df.index return px.line(df, x='Date', y='count') df['count'] = df.assign(count=1).groupby(color)['count'].cumsum() df = df[~df[color].isna()] return px.line(df, x='Date', y='count', color=color, template='plotly_dark', labels={'count': 'Number of Applications'}) def map_figure(df: pd.DataFrame, color: str): df = df[~df['Latitude'].isna() & ~df['Longitude'].isna()] return px.scatter_mapbox(df, lat='Latitude', lon='Longitude', color=color) class PlotCallbacks(object): @staticmethod def figures(df, color): df = pd.read_json(df) if df.shape[0] == 0: return params['empty-figure'], params['empty-figure']
def parse_config(config_path: str) -> dict: with open(config_path) as conf: return json.load(conf) if __name__ == "__main__": config = parse_config("config.json") traffic_cameras = pandas.read_json(config["api_base"].format( config["app_id"], config["api_format"])) traffic_cameras["Geometry"] = [ value["WGS84"] for _, value in traffic_cameras["Geometry"].items() ] # Refer https://geopandas.readthedocs.io/en/latest/gallery/create_geopandas_from_pandas.html traffic_cameras["Geometry"] = traffic_cameras["Geometry"].apply(wkt.loads) geo_df = geopandas.GeoDataFrame(traffic_cameras, geometry="Geometry") plotlyex.set_mapbox_access_token(config["mapbox_token"]) fig = plotlyex.scatter_mapbox(geo_df, lat=geo_df.geometry.y, lon=geo_df.geometry.x, hover_name="CameraImageUrl", zoom=1) fig.show()
STATE = 'START' data = pd.read_csv("https://www.fire.ca.gov/imapdata/mapdataall.csv") table_data = pd.DataFrame() data=data[data['incident_acres_burned'].notnull()] # Dropping Nulls data=data[data['incident_dateonly_extinguished'].notnull()] # Dropping Nulls data['date'] = pd.to_datetime(data['incident_dateonly_extinguished']) data['date_start'] = pd.to_datetime(data['incident_dateonly_created']) data = data[data.date > datetime.strptime('2010-01-01', '%Y-%m-%d')] # Dropping wrong/invalid dates min_date = min(data.date) max_date = max(data.date) # Set Mapbox Access Token if path.exists('src/.mapbox_token'): px.set_mapbox_access_token(open("src/.mapbox_token").read()) elif 'MAPBOX_TOKEN' in environ.keys(): px.set_mapbox_access_token(environ['MAPBOX_TOKEN']) else: print("TOKEN not found in env or local dir") # Create App app = dash.Dash(__name__, suppress_callback_exceptions=True) # Navbar navbar = html.Div(id='navbar', className='topnav' ,children=[ html.A('Home', id='home-page-nav', className="home-page", href='home'), html.A('California Incident Map', id='cali-map-nav', className="non-home", href='app1'), html.A('County Incident Map', id='county-map-nav', className="non-home", href='app2'),
import plotly.express as px px.set_mapbox_access_token( "pk.eyJ1IjoiZXJhbmdyaW4iLCJhIjoiY2thanE2Mnc2MGN6aDJycGV0MHl4MXI3bCJ9.hAQ0SAX1JGgeM_9lBZtC2g" ) df = px.data.carshare() print(df) fig = px.scatter_mapbox(df, lat="centroid_lat", lon="centroid_lon", color="peak_hour", size="car_hours", color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10) fig.show()
st.altair_chart(c3, use_container_width=True) with st.beta_expander("See explanation"): st.markdown(""" Notice that in US, the confirmed cases is extremly high which need us to make more attention to this.Though the death cases is not very high, the active cases is still a lot which means it is hard to fully recovered. """) if session == "Map": st.sidebar.subheader("Pick A Map") mapplots = st.sidebar.selectbox("Maps", ["Daily updated", "Spread of COVID-19"]) current_time = datetime.now() - timedelta(days=1) if current_time.hour < 13: current_time = datetime.now() - timedelta(days=2) today = current_time.strftime("%m-%d-%Y") px.set_mapbox_access_token( 'pk.eyJ1IjoibW9sdW9zaXJpdXMiLCJhIjoiY2toOTlvenZrMGEzNzJwcjFzbjNuaG42eSJ9.glsH-yMcr1tmNxlzHDsCpQ' ) ##spread animated if mapplots == "Spread of COVID-19": ##data cleaning and merge confirmed_global = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' death_global = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv' recover_global = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' confirmed = pd.read_csv(confirmed_global) death = pd.read_csv(death_global) recover = pd.read_csv(recover_global) dates = confirmed.columns[4:] confirmed_df = confirmed.melt( id_vars=['Province/State', 'Country/Region', 'Lat', 'Long'], value_vars=dates,
def set_display_children( location_selected_feature, location_selected_option, health_selected_feature, health_selected_option, ): token = os.getenv( "pk.eyJ1IjoibXN1YXJlejkiLCJhIjoiY2ttZ3F1cjZ0MDAxMjJubW5tN2RsYzI2bCJ9.l7Ht-cO4Owt7vgiAY3lwsQ" ) px.set_mapbox_access_token( "pk.eyJ1IjoibXN1YXJlejkiLCJhIjoiY2ttZ3F1cjZ0MDAxMjJubW5tN2RsYzI2bCJ9.l7Ht-cO4Owt7vgiAY3lwsQ" ) dff = df[df[location_selected_feature] == location_selected_option] avg_lat = mean(dff["Latitude"]) avg_lon = mean(dff["Longitude"]) if location_selected_feature == 'City': zoom_level = 13 if location_selected_feature == 'Community': zoom_level = 15 if (health_selected_option == [] or len( dff[dff[health_selected_feature].isin(health_selected_option)]) == 0): fig = px.scatter_mapbox( data_frame=dff, # [df['Clinic Access']==value], lat=dff["Latitude"], lon=dff["Longitude"], zoom=zoom_level, hover_data={ "Water Access": False, "Clinic Access": False, "Floor Condition": False, "Roof Condition": False, "Latrine or Bathroom Access": False, "Longitude": False, "Latitude": False, }, ) fig.update_traces(marker_opacity=0) else: dff = dff[dff[health_selected_feature].isin(health_selected_option)] lat_val = dff["Latitude"] lon_val = dff["Longitude"] fig = px.scatter_mapbox( data_frame=dff, # [df['Clinic Access']==value], lat=dff["Latitude"], lon=dff["Longitude"], color=dff[health_selected_feature], color_discrete_map=color_map[health_selected_feature], hover_name="Community", hover_data={ "Water Access": True, "Clinic Access": True, "Floor Condition": True, "Roof Condition": True, "Latrine or Bathroom Access": True, "Longitude": False, "Latitude": False, }, zoom=zoom_level, opacity=0.8, category_orders={ health_selected_feature: all_health_options[health_selected_feature] }, custom_data=[ "Community", "Water Access", "Clinic Access", "Floor Condition", "Roof Condition", "Latrine or Bathroom Access", ], ) # fig.update_traces( # hoverinfo="lon", # hovertemplate="<span style='font-size:20px'><b>%{customdata[0]}</b> </span><br> <br> <b>Water Access:</b> %{customdata[1]}<br> <b>Clinic Access:</b> %{customdata[2]}<br> <b>Floor Condition:</b> %{customdata[3]}<br> <b>Roof Condition:</b> %{customdata[4]}<br> <b>Latrine or Bathroom Access:</b> %{customdata[5]}", # ) fig.update_traces(marker_size=15) # ids='123test', # fig.add_trace(go.Scattermapbox( # lat=lat_val, # lon=lon_val, # mode='markers', # marker=go.scattermapbox.Marker(color='grey', # #below='123test', # allowoverlap=False, # size=18, # opacity=0.4 # ), # hoverinfo='none', # #coloraxis_showscale=False # )) fig.update_layout(hoverlabel=dict( bgcolor="white", font_size=16, font_family="Roboto")) fig.update_traces( #ids='123test', marker_size=15) # fig.add_trace(go.Scattermapbox( # lat=lat_val, # lon=lon_val, # mode='markers', # marker=go.scattermapbox.Marker(color='grey', # #below='123test', # allowoverlap=False, # size=18, # opacity=0.4 # ), # hoverinfo='none', # #coloraxis_showscale=False # )) fig.update_layout( autosize=True, # margins=dict{l:0}, title= "<b>Dominican Republic Health Data by Household</b><br>(Hover over map for details)", title_font_color='black', title_font_family="Roboto", font_family="Roboto", geo_scope="world", geo=dict( projection_scale=1000000, # this is kind of like zoom center=dict(lat=avg_lat, lon=avg_lon), ), # this will center on the point ) fig.update_layout( mapbox_style="mapbox://styles/msuarez9/ckmp4rt7e0qf517o1md18w9d1") fig.update_layout( legend_title=dict(font=dict(family="Roboto", size=20, color="black")), legend=dict( font_family="Roboto", font=dict(family="Roboto", size=18, color="black"), orientation="h", yanchor="bottom", xanchor="left", y=-0.09, # width = '90%' # x=0 ), ) #fig.update_layout(geo=dict(bgcolor= '#f8f7f6')) fig.update_layout({ 'plot_bgcolor': 'rgba(0, 0, 0, 0)', 'paper_bgcolor': 'rgba(0, 0, 0, 0)' }) return fig
def xception(): if request.method == "POST": instaid = request.form["instaid"] #scraping images with beautifulsoup url = "https://www.instagram.com/" + instaid browser = Browser('chrome') browser.visit(url) sleep(1) bs = BeautifulSoup(browser.html, 'html.parser') #finds all the images in website and puts url in df images = bs.find_all('img', {'src':re.compile('.jpg')}) image_urls = [] for image in images: image_urls.append(str(image['src'])) image_df = pd.DataFrame({"image":image_urls}) import numpy as np import tensorflow as tf from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.xception import ( Xception, preprocess_input, decode_predictions) from PIL import Image import requests from io import BytesIO model = Xception( include_top=True, weights='imagenet') prds = [] pcts = [] for i in image_urls: url = i response = requests.get(url) img = Image.open(BytesIO(response.content)) img = img.resize((299, 299), Image.NEAREST) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) predictions = model.predict(x) prds.append(decode_predictions(predictions, top=1)[0][0][1]) pcts.append(decode_predictions(predictions, top=1)[0][0][2]) # cnns.append(decode_predictions(predictions, top=3)[0]) image_df["predictions"] = prds image_df["%"] = pcts image_df.sort_values("%",ascending = False,inplace = True) image_df.reset_index(drop=True,inplace=True) # from IPython.display import Image # from IPython.core.display import HTML # def path_to_image_html(path): # return '<img src="'+ path + '" width="300" >' # print("Server received request for 'About' page...") # return (image_df.to_html(escape=False ,formatters=dict(image=path_to_image_html))) gkey = "AIzaSyA-Rjp6nOeJp6815Xt1Kkuxc5XKMiKl_yA" depart = request.form["depart"] target_radius = 1000 records = pd.DataFrame() target_list = image_df.drop_duplicates(subset="predictions", keep="first")["predictions"] targets = str(depart).split(",") for target in targets: # Build the endpoint URL target_url = (f'https://maps.googleapis.com/maps/api/geocode/json?address={target}&key={gkey}') geo_data = requests.get(target_url).json() target_adr = geo_data["results"][0]["formatted_address"] lat = geo_data["results"][0]["geometry"]["location"]["lat"] lng = geo_data["results"][0]["geometry"]["location"]["lng"] target_coordinates = str(lat) + "," + str(lng) # target_radius = 800 target_type = "" # set up a parameters dictionary for target_search in target_list: params = { "location": target_coordinates, "keyword": target_search, "radius": target_radius, "type": target_type, "key": gkey } # base url base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" # run a request using our params dictionary response = requests.get(base_url, params=params) places_data = response.json() n=0 # while int(n) > len(places_data): while int(n) < len(places_data["results"]): try: price=places_data["results"][int(n)]["price_level"] except KeyError: price = "NA" try: link=places_data["results"][int(n)]["place_id"] except KeyError: link = "NA" try: score = places_data["results"][int(n)]["rating"] except KeyError: score = "NA" try: reviews = places_data["results"][int(n)]["user_ratings_total"] except KeyError: reviews = "NA" try: lat = places_data["results"][int(n)]["geometry"]["location"]["lat"] lon = places_data["results"][int(n)]["geometry"]["location"]["lng"] # dist = pd.read_html(f"http://boulter.com/gps/distance/?from={target_coordinates}&to={poi_coord}&units=k") # distance = float(str(dist[1][1][1]).split(" ")[0]) except IndexError or TimeoutError: distance = "NA" # drive_url = f"https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={target_coordinates}&destinations={poi_coord}&key=AIzaSyA-Rjp6nOeJp6815Xt1Kkuxc5XKMiKl_yA" # drive_res = requests.get(drive_url).json() # distance = drive_res["rows"][0]["elements"][0]["distance"]["value"]/1000 # duration= int(drive_res["rows"][0]["elements"][0]["duration"]["value"]/60) # except KeyError: # distance = "NA" # drive_dur = "NA" # try: # walk_url = f"https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={target_coordinates}&destinations={poi_coord}&mode=walking&key=AIzaSyA-Rjp6nOeJp6815Xt1Kkuxc5XKMiKl_yA" # walk_res = requests.get(walk_url).json() # distance = walk_res["rows"][0]["elements"][0]["distance"]["value"]/1000 # walk_dur = int(walk_res["rows"][0]["elements"][0]["duration"]["value"]/60) # except KeyError: # walk_dur = "NA" # try: # transit_url = f"https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={target_coordinates}&destinations={poi_coord}&mode=transit&key=AIzaSyA-Rjp6nOeJp6815Xt1Kkuxc5XKMiKl_yA" # transit_res = requests.get(transit_url).json() # transit_dur = int(transit_res["rows"][0]["elements"][0]["duration"]["value"]/60) # except KeyError: # transit_dur = "NA" content = pd.DataFrame ({"depart":target_adr,"poi":target_search, "name":[places_data["results"][int(n)]["name"]], "score":score, "reviews":reviews, "price":price, "link":link, "address":[places_data["results"][int(n)]["vicinity"]], "lat":lat, "lon":lon}) # "drive":duration, # "public":transit_dur, # "walk":walk_dur}) records = records.append(content) n+=1 records.reset_index(drop = True,inplace = True) # records["link"]=records["link"].apply(lambda x: '<a href="https://www.google.com/maps/place/?q=place_id:{0}">link</a>'.format(x)) # return (records.to_html(escape=False)) records["size"] = 10 px.set_mapbox_access_token("pk.eyJ1IjoidGl2bWU3IiwiYSI6ImNrMWEwZDVtNDI4Zm4zYm1vY3o3Z25zejEifQ._yTPkj3nXTzor72zIevLCQ") fig = px.scatter_mapbox(records, lat="lat", lon="lon", color = "poi", size = "size", hover_name="name",zoom = 13) fig.update_layout(autosize=True,width=1500,height=750) # ,margin=go.layout.Margin(l=50,r=50,b=100,t=100,pad=4)) return(py.offline.plot(fig,output_type="div")) return render_template("xception.html")
import dash_bootstrap_components as dbc import dash_html_components as html import dash_core_components as dcc import dash # For Plotly Graphs and Charts import plotly.graph_objects as go import plotly.express as px import pandas as pd # Style for Scatterfig (Crime Pie Chart) style = "mapbox://styles/kshizi/ckkgih80b043k17p0krxnbssu" px.set_mapbox_access_token(open("data/.mapbox_token").read()) # Data Frame df = pd.read_csv('data/Raleigh_Police_Incidents.csv') # Data Wrangling list_in_dist = df['district'] count = list_in_dist.value_counts(dropna=True) count_tolist = list(count) index_tolist = count.index.tolist() # Wrangled Data Passed into Pie Chart labels = index_tolist values = count_tolist # Crime Pie Chart Figure fig = go.Figure(data=[ go.Pie(labels=labels, values=values,
### Dash App Imports import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State ### Mapping Imports import plotly.graph_objects as go import plotly.express as px ### MAPBOX CREDENTIALS ### # with open('./credentials.json', 'r') as f: # credentials = json.load(f) # MAPBOX_TOKEN = credentials['token'] px.set_mapbox_access_token('pk.eyJ1IjoidHFyYWhtYW4iLCJhIjoiY2l0bmh2dnU2MDRvZzJ6bDQ4OWFheXU3NCJ9.bY7m05QGUHV1jQvwwHX-FA') mapbox_access_token = 'pk.eyJ1IjoidHFyYWhtYW4iLCJhIjoiY2l0bmh2dnU2MDRvZzJ6bDQ4OWFheXU3NCJ9.bY7m05QGUHV1jQvwwHX-FA' #mapbox_access_token = 'pk.eyJ1IjoiamFja2x1byIsImEiOiJjajNlcnh3MzEwMHZtMzNueGw3NWw5ZXF5In0.fk8k06T96Ml9CLGgKmk81w' # Reading in the data isabela = pd.read_csv("isabela_duck_deployment.csv") ### PREPROCESSING ### # Getting the first duck_id from path isabela['duck_id'] = isabela['path'].apply(lambda x: x[:12]) # Creating a list of unique duck_id's duck_list = isabela['duck_id'].unique().tolist()