def chart(data, data_rolling, countries, by_million_inh=False, align_curves=False, last_d=15, offset_name='offset_confirmed', type_ppl="confirmed cases", name_fig="", since=False, min_rate=0, log=False, new=""): today = datetime.now().strftime("%Y-%m-%d %H:%M") ### Symbols symbols = [] for i in range(35): symbols.append(SymbolValidator().values[i]) random.shuffle(symbols) ### fig = go.Figure() i = 0 j = 0 x_an = np.array([]) y_an = np.array([]) countries_last_val = [] countries_array = [] for c in countries: if by_million_inh: val = data[c][len(data) - 1] / countries[c]['pop'] else: val = data[c][len(data) - 1] countries_last_val.append(val) countries_array.append(c) ind = np.argsort(countries_last_val) countries_array = np.array(countries_array) countries_array = countries_array[ind][::-1] for c in countries_array: if align_curves: offset = countries[c][offset_name] offset2 = -offset else: offset = 0 if offset == 0: offset2 = None if by_million_inh: pop = countries[c]['pop'] else: pop = 1 date = 'date' offset3 = 0 since_str = "" since_str_leg = "" if since: date = 'date_int' res = list(map(lambda i: i > min_rate, data[c + new].values / pop)) offset2 = 0 if True in res: ind = res.index(True) offset2 = -ind since_str_leg = " [since {} days]".format(len(data) - ind) offset3 = offset2 last_d = 0 offset = 0 since_str = " [since {}]".format(min_rate) #, type_ppl if by_million_inh: since_str = since_str[:-1] + "/1M inh.]" x = data[date][-last_d - offset:offset2] y = data[c + new][-last_d - offset3:] / pop if offset != 0: name_legend = '{} [delayed by {} days]'.format(c, -offset) else: name_legend = '{} {}'.format(c, since_str_leg) txt = ["" for i in range(len(data_rolling[c][-last_d - offset3:]))] txt[-1] = c fig.add_trace( go.Scatter( x=x, y=y, mode='markers', marker_color=colors[countries[c]['color']], legendgroup=c, marker_symbol=countries[c]['color'], marker_size=9, #marker_line_width=2, opacity=1, showlegend=True, name=name_legend)) fig.add_trace( go.Scatter(x=data_rolling[date][-last_d - offset:offset2], y=data_rolling[c + new][-last_d - offset3:] / pop, mode='lines', marker_color=colors[countries[c]['color']], opacity=1, legendgroup=c, showlegend=False, line=dict(width=2), name=name_legend)) i += 1 j += 1 if i >= len(colors): i = 0 if j >= 40: j = 0 if log and since and c == "Italy": date_start = data_rolling['date_int'].values[-last_d - offset] x = data_rolling["date_int"][-last_d - offset:offset2] max_values = 15 for (rate, rate_str) in [(2**(1 / 10), "x2 every 10 days"), (2**(1 / 7), "x2 every 7 days"), (2**(1 / 3), "x2 every 3 days"), (2**(1 / 2), "x2 every 2 days"), (2**(1 / 5), "x2 every 5 days")]: y = rate**( data_rolling["date_int"][-last_d - offset:offset2].values - date_start) * min_rate fig.add_trace( go.Scatter( x=x[:max_values + 1], y=y[:max_values + 1], mode='lines+text', marker_color="grey", opacity=1, #text = rate_str, textposition="bottom right", legendgroup="Tendance", showlegend=False, line=dict(width=1, dash='dot'), name="Tendance")) fig.add_trace( go.Scatter(x=[ data_rolling["date_int"] [-last_d - offset:offset2].values[max_values] ], y=[(rate**(data_rolling["date_int"] [-last_d - offset:offset2].values - date_start) * min_rate)[max_values]], mode='text', marker_color="grey", opacity=1, text=rate_str, textposition="bottom right", legendgroup="Tendance", showlegend=False, name="Tendance")) ### END LOOP ### align_str = "" if align_curves: align_str = " [aligned]" million_str = "" million_str_ax = "" if by_million_inh: million_str = " for 1M inhabitants" million_str_ax = "/ nb of inhabitants (million)" delayed = "" if align_curves: delayed = "— delayed for some countries" if since: delayed = "— since {} {} {}".format(min_rate, type_ppl, million_str) fig.update_annotations( dict(xref="x", yref="y", showarrow=True, arrowhead=7)) log_str = "linear" if log: log_str = "log" fig.update_layout( showlegend=True, title={ 'text': "COVID-19 <b>{}{}</b>{}{}".format(type_ppl, million_str, align_str, since_str), 'y': 0.95, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top' }, xaxis_title="Day {} {}".format(delayed, ''), yaxis_type=log_str, yaxis_title="Total {} {}".format(type_ppl, million_str), titlefont=dict(size=28), annotations=[ dict( xref='paper', yref='paper', x=0, y=1.05, showarrow=False, text= 'Last update: {} ; Last data: {} ; Data: CSSE ; Author: @guillaumerozier' .format(today, str(data['date'].values[-1])[:10])) ]) fig.update_xaxes(nticks=last_d) print("> graph built") if upload: py.plot(fig, filename=name_fig, auto_open=False) print("> graph uploaded") if show: fig.show() print("> graph showed") if export: path_log = "" if log: path_log = "log_yaxis/" fig.write_image("images/charts/{}{}.png".format(path_log, name_fig), scale=3, width=1100, height=700) fig.write_image("images/charts_sd/{}{}.png".format(path_log, name_fig), scale=0.5) plotly.offline.plot(fig, filename='images/html_exports/{}{}.html'.format( path_log, name_fig), auto_open=False) print("> graph exported\n") return fig
animation_frame="year", animation_group="country", # fig = px.scatter(px.data.gapminder(), x="gdpPercap", y="lifeExp", animation_frame="year", animation_group="country", size="pop", color="country", hover_name="country", log_x=True, size_max=100, range_x=[100, 100000], range_y=[25, 90]) fig.update_layout(height=650) st.write(fig) elif ops == "stocks": df = pd.read_csv("datasets/stocks.csv") fig = go.Figure() fig.add_trace(go.Scatter(x=df['date'], y=df['AAPL'], name="Apple")) fig.add_trace(go.Scatter(x=df['date'], y=df['AMZN'], name="Amazon")) fig.add_trace(go.Scatter(x=df['date'], y=df['FB'], name="Facebook")) fig.add_trace(go.Scatter(x=df['date'], y=df['GOOG'], name="Google")) fig.add_trace(go.Scatter(x=df['date'], y=df['NFLX'], name="Netflix")) fig.add_trace(go.Scatter(x=df['date'], y=df['MSFT'], name="Microsoft")) fig.layout.update(title_text='Time Series data with Rangeslider', xaxis_rangeslider_visible=True) st.write(fig) elif ops == "iris": df = pd.read_csv("datasets/iris.csv") y1 = df['sepal_length'] x1 = df['sepal_width'] y2 = df['petal_length'] x2 = df['petal_width'] color = df['species']
noise_var=5.0,standardized=True,correlated=True) SLR = StandardizedLinearRegression(X, y, beta) beta_estim = SLR.solve_linear_regression() r2_prob = SLR.r2() net_effects = SLR.net_effect() fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05, subplot_titles=("Net Effects", "Predictor Estimated Coefficients")) fig.add_trace(go.Scatter(x=np.arange(n), y=net_effects, mode='lines+markers', name='NEF'), row=1, col=1) fig.add_trace(go.Scatter(x=np.arange(n), y=beta_estim, mode='lines+markers', name='LS Coeff'), row=2, col=1) fig.update_layout(title={ 'text': "R2 = " + str(r2_prob), }, height=950, font=dict(family="Courier New, monospace",
df = pd.read_csv("class108.csv") weightList = df["Weight(Pounds)"].tolist() mean = statistics.mean(weightList) median = statistics.median(weightList) mode = statistics.mode(weightList) deviation = statistics.stdev(weightList) #fsds = first Standard start, fsde = first standard deviation end fsds,fsde = mean-deviation,mean+deviation ssds,ssde = mean-(2*deviation),mean+(2*deviation) tsds,tsde = mean-(3*deviation),mean+(3*deviation) fig = ff.create_distplot([weightList],["Weight"],show_hist=False) fig.add_trace(go.Scatter(x = [mean,mean], y = [0,0.17],mode = "lines", name = "mean")) fig.add_trace(go.Scatter(x = [fsds,fsds], y = [0,0.17],mode = "lines", name = "Standard Deviation 1")) fig.add_trace(go.Scatter(x = [fsde,fsde], y = [0,0.17],mode = "lines", name = "Standard Deviation 1")) fig.add_trace(go.Scatter(x = [ssds,ssds], y = [0,0.17],mode = "lines", name = "Standard Deviation 2")) fig.add_trace(go.Scatter(x = [ssde,ssde], y = [0,0.17],mode = "lines", name = "Standard Deviation 2")) fig.add_trace(go.Scatter(x = [tsds,tsds], y = [0,0.17],mode = "lines", name = "Standard Deviation 3")) fig.add_trace(go.Scatter(x = [tsde,tsde], y = [0,0.17],mode = "lines", name = "Standard Deviation 3")) fig.show() listOfDataWithinStdev1 = [result for result in weightList if result > fsds and result < fsde] listOfDataWithinStdev2 = [result for result in weightList if result > ssds and result < ssde] listOfDataWithinStdev3 = [result for result in weightList if result > tsds and result < tsde] print("{}% of data lies within standard deviation 1".format(len(listOfDataWithinStdev1)*100/len(weightList))) print("{}% of data lies within standard deviation 2".format(len(listOfDataWithinStdev2)*100/len(weightList))) print("{}% of data lies within standard deviation 3".format(len(listOfDataWithinStdev3)*100/len(weightList)))
# CSV_PATH: (string) path to the csv file containing the typing stats. # Example: /Users/kondavarsha/Documents/Python/type-racer-graph-pretty/output.csv CSV_PATH = '' df = pandas.read_csv(CSV_PATH) # IMAGE_PATH: (string) path to folder where the images will be output. # Example : /Users/kondavarsha/Documents/Python/type-racer-graph-pretty/images/ IMAGE_PATH = '' for i in range(1, len(df['Race #'] + 1)): datetime_obj = datetime.strptime(df["Date"][i], "%m/%d/%y") fig = go.Figure(data=go.Scatter( y=df['Speed'][0:i], x=df['Race #'][0:i], mode='markers', marker=dict( size=8, color=df['Accuracy'], #set color equal to a variable colorscale='Magma', # one of plotly colorscales showscale=True, colorbar=dict(title="Accuracy")))) fig.update_layout({ "title": { "text": "Speed in Races over time (2016-2019)" }, "xaxis": { "title": "Race #" }, "yaxis": { "title": "Speed (WPM)" } })
def pca(l_cluster): with open('datasetParsingDEF.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f'Column names are {", ".join(row)}') line_count += 1 else: print('WORD', row[1][:-4]) song_Strings.append(row[2]) song_Names.append(row[1][:-4]) line_count += 1 print(f'Processed {line_count} lines.') # Load Inutition Engineering pretrained model # Models names: 'eng_50', 'eng_100', 'eng_150' 'eng_200', 'eng_300' c2v_model = chars2vec.load_model(embedding) # Create word embeddings word_embeddings = c2v_model.vectorize_words(song_Strings) # Project embeddings on plane using the PCA projection_2d = sklearn.decomposition.PCA( n_components=2).fit_transform(word_embeddings) # Draw words on plane f = plt.figure(figsize=(8, 6)) plt.title("KMean - Divisione : " + n_clusterString + ' Cluster - Embedding : ' + embedding) #label_color = [LABEL_COLOR_MAP[l] for l in l_cluster] print(song_Names) trasformLabelColor(l_cluster) print(label_color_Final) i = 0 print(len(l_cluster)) modificaLabelColor() print(label_color_Final) print(len(projection_2d)) assex = [] assey = [] for j in range(0, len(projection_2d)): assex.append(projection_2d[j, 0]) assey.append(projection_2d[j, 1]) fig = go.Figure(data=go.Scatter( x=assex, y=assey, mode='markers', text=song_Names, marker=dict( size=16, color=label_color_Final, # set color equal to a variable showscale=True, ))) fig.update_xaxes(showgrid=False) fig.update_yaxes(showgrid=False) fig.update_layout(title_text=algo + ' ' + embedding + ' ' + n_clusterString, plot_bgcolor='rgb(236,241,243)') fig.show() for j in range(len(projection_2d)): print(j) plt.scatter(projection_2d[j, 0], projection_2d[j, 1], marker=('$' + 'o' + '$'), s=30, label=j, c=label_color_Final[j]) i = i + 1
def compute_os(stats): mfr_all = [] div_all = [] os_stats = {"batches": {}} for b in stats["batches"].keys(): print("##############") print("##############") print(b) if b not in os_stats["batches"].keys(): os_stats["batches"][b] = {} for e in stats["batches"][b].keys(): if e not in os_stats["batches"][b].keys(): os_stats["batches"][b][e] = {} divs = [] mfr_mean = [] mfr_sem = [] mbr_mean = [] mbr_sem = [] mfib_mean = [] mfib_sem = [] mburdur_mean = [] mburdur_sem = [] mnrs_mean = [] mnrs_sem = [] divs = stats["batches"][b][e].keys() divs_num = [ int(i.lower().replace("div", "")) for i in divs ] crr_mfr = [] crr_div = [] for d in divs: crr_div_int = int(d.lower().replace("div", "")) crr_mfr.append( stats["batches"][b][e][d]["mfr"]["mean"]) crr_div.append(crr_div_int) # mfr_mean.append( stats["batches"][b][e][d]["mfr"]["mean"]) mfr_sem.append(stats["batches"][b][e][d]["mfr"]["sem"]) # mbr_mean.append( stats["batches"][b][e][d]["mbr"]["mean"]) mbr_sem.append(stats["batches"][b][e][d]["mbr"]["sem"]) # mfib_mean.append( stats["batches"][b][e][d]["mfib"]["mean"]) mfib_sem.append( stats["batches"][b][e][d]["mfib"]["sem"]) # mburdur_mean.append( stats["batches"][b][e][d]["mburdur"]["mean"]) mburdur_sem.append( stats["batches"][b][e][d]["mburdur"]["sem"]) # mnrs_mean.append( stats["batches"][b][e][d]["mnrs"]["mean"]) mnrs_sem.append( stats["batches"][b][e][d]["mnrs"]["sem"]) # #div_int = int(d.lower().replace("div","")) #mfr_all.append(stats["batches"][b][e][d]["mfr"]["mean"]) min_val = min(crr_mfr) max_val = max(crr_mfr) print(min_val) print(max_val) crr_mfr_n = [(x - min_val) / (max_val - min_val) for x in crr_mfr] print(crr_mfr_n) for i in range(len(crr_mfr)): mfr_all.append(crr_mfr_n[i]) div_all.append(crr_div[i]) sort_index = np.argsort(np.array(divs_num)) divs = [divs[i] for i in sort_index] # mfr_mean = [mfr_mean[i] for i in sort_index] mfr_sem = [mfr_sem[i] for i in sort_index] # mbr_mean = [mbr_mean[i] for i in sort_index] mbr_sem = [mbr_sem[i] for i in sort_index] # mfib_mean = [mfib_mean[i] for i in sort_index] mfib_sem = [mfib_sem[i] for i in sort_index] # mburdur_mean = [mburdur_mean[i] for i in sort_index] mburdur_sem = [mburdur_sem[i] for i in sort_index] # mnrs_mean = [mnrs_mean[i] for i in sort_index] mnrs_sem = [mnrs_sem[i] for i in sort_index] # idx_os = np.argmax(mfr_mean) mfr_ss_mean = np.nanmean(np.array(mfr_mean[-3:]), dtype=np.float64) # idx_os_mbr = np.argmax(mbr_mean) mbr_ss_mean = np.nanmean(np.array(mbr_mean[-3:]), dtype=np.float64) # idx_os_mfib = np.argmax(mfib_mean) mfib_ss_mean = np.nanmean(np.array(mfib_mean[-3:]), dtype=np.float64) # idx_os_mburdur = np.argmax(mburdur_mean) mburdur_ss_mean = np.nanmean(np.array(mburdur_mean[-3:]), dtype=np.float64) # idx_os_mnrs = np.argmax(mnrs_mean) mnrs_ss_mean = np.nanmean(np.array(mnrs_mean[-3:]), dtype=np.float64) print("--------") print(e) print(divs) perc_mfr = 100 * (mfr_mean[idx_os] - mfr_ss_mean) / mfr_ss_mean perc_mbr = 100 * (mbr_mean[idx_os_mbr] - mbr_ss_mean) / mbr_ss_mean perc_mfib = 100 * (mfib_mean[idx_os_mfib] - mfib_ss_mean) / mfib_ss_mean perc_mburdur = 100 * (mburdur_mean[idx_os_mburdur] - mburdur_ss_mean) / mburdur_ss_mean perc_mnrs = 100 * (mnrs_mean[idx_os_mnrs] - mnrs_ss_mean) / mnrs_ss_mean os_stats["batches"][b][e]["perc_mfr"] = perc_mfr os_stats["batches"][b][e]["perc_mbr"] = perc_mbr os_stats["batches"][b][e]["perc_mfib"] = perc_mfib os_stats["batches"][b][e]["perc_mburdur"] = perc_mburdur os_stats["batches"][b][e]["perc_mnrs"] = perc_mnrs #pprint.pprint(os_stats) perc_mfr_all_x = [] perc_mfr_all_y = [] for b in os_stats["batches"]: for eidx, e in enumerate(os_stats["batches"][b].keys()): perc_mfr_all_y.append(os_stats["batches"][b][e]["perc_mfr"]) perc_mfr_all_x.append(b + "-" + str(eidx)) fig = px.bar(x=perc_mfr_all_x, y=perc_mfr_all_y, labels={ 'x': 'Batch names', 'y': '%DeltaMFR' }) fig.show() fig.write_image("overshoot_mean.pdf") sort_index = np.argsort(div_all) div_ord = [div_all[i] for i in sort_index] mfr_ord = [mfr_all[i] for i in sort_index] # calculate polynomial z = np.polyfit(div_ord, mfr_ord, 3) f = np.poly1d(z) print f # calculate new x's and y's x_new = np.linspace(div_ord[0], div_ord[-1], 50) y_new = f(x_new) fig = go.Figure() fig.add_traces( go.Scatter(x=div_all, y=mfr_all, type="scatter", mode="markers")) fig.add_traces(go.Scatter(x=x_new, y=y_new, type="scatter", mode="lines")) fig.show()
def index(): # Check if user is loggedin if 'loggedin' in session: cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor) cursor.execute('SELECT * FROM accounts WHERE id = %s', (session['id'], )) account = cursor.fetchone() # Fetch the first row only #Grab sensor value from current user accounts!!! device_id = account['dev_id'] # device_id = session['dev_id'] #Tells the number of records already present in table for specific device id cursor.execute("SELECT COUNT(id) FROM glucose_value WHERE sensor = %s", (device_id, )) old_rowcount = cursor.fetchone()["COUNT(id)"] print("Number of records present in glucose_value Table for", device_id, "=", old_rowcount) #Now specifing channels for each sensor alloted to user if device_id == "product1": thingspeak_cha = requests.get( 'https://api.thingspeak.com/channels/1077581/feeds.json') elif device_id == "product2": thingspeak_cha = requests.get( 'https://api.thingspeak.com/channels/1077581/feeds.json') elif device_id == "product3": thingspeak_cha = requests.get( 'https://api.thingspeak.com/channels/1077581/feeds.json') elif device_id == "product4": thingspeak_cha = requests.get( 'https://api.thingspeak.com/channels/1077581/feeds.json') elif device_id == "product5": thingspeak_cha = requests.get( 'https://api.thingspeak.com/channels/1077581/feeds.json') print(thingspeak_cha) dic_thingspeak = json.loads(thingspeak_cha.text) #Printing Dictionary print(dic_thingspeak) #Showing type as dictionary print(type(dic_thingspeak)) # Actually it's a dict with two keys "channel" and "feeds". # The first one has another dict for value, and the second a list of dicts. list_of_dic_feeds = dic_thingspeak["feeds"] # Printing list of Dictionaries having channel data print(list_of_dic_feeds) # Showing Type as list print(type(list_of_dic_feeds)) new_rowcountjson = len(list_of_dic_feeds) print("Number of records present in thingspeak channel =", new_rowcountjson) # Slicing list of data present in channel to get only the new records, # that are to be appended in MySQL glucose_value table new_newlist = list_of_dic_feeds[old_rowcount:] print("Number of new records present in thingspeak channel =", len(new_newlist)) # Iterate over the list, for inserting new data of thingspeak channel into MySQL glucose_value Table: for entry in new_newlist: # Getting the value for the specific keys present in list of dic n_id = entry["entry_id"] n_glucose = entry["field1"] n_date = entry["created_at"] insert_stmt = ( "INSERT INTO glucose_value (glucose, timestamp, sensor) " "VALUES (%s, %s, %s)") data = (n_glucose, n_date, device_id) cursor.execute(insert_stmt, data) mysql.connection.commit() # Now Retrieving data from MYSQL glucose_value Table to plot graph and Table using Plotly library cursor.execute("SELECT * FROM glucose_value WHERE sensor = %s", (device_id, )) tupleofdic = cursor.fetchall() listofdic = list(tupleofdic) glucolist = [] datelist = [] SNolist = [] for dic in listofdic: glucose_ = dic['glucose'] timestamp_ = dic['timestamp'] id_ = dic['id'] datelist.append(timestamp_) glucolist.append(glucose_) SNolist.append(id_) print(glucolist) print(datelist) print(SNolist) #For Starting S.No from 1 in the Table: print(len(SNolist)) S_No_list = [*range(1, (len(SNolist) + 1), 1)] # formatteddate_list = [] for x in datelist: formatteddate_list.append(x.strftime("%b %d, %Y")) print(formatteddate_list) formattedtime_list = [] for x in datelist: formattedtime_list.append(x.strftime("%H:%M:%S")) print(formattedtime_list) # # For Finding id of last element present in glucose_value table of specific sensor, # for displaying the last updated date and glucose value cursor.execute('SELECT MAX(id) FROM glucose_value WHERE sensor = %s', (device_id, )) max_id_dic = cursor.fetchone() max_id = max_id_dic["MAX(id)"] print(max_id) cursor.execute('SELECT * FROM glucose_value WHERE id = %s', (max_id, )) glcodata = cursor.fetchone() # Fetch the first row only print(glcodata) #Ploting graph using plotly format fig = go.Figure(data=go.Scatter(x=datelist, y=glucolist)) fig.update_layout(xaxis_title='Date', yaxis_title='Glucose (mg/dL)') fig.update_xaxes( rangeslider_visible=True, rangeselector=dict(buttons=list([ dict(count=1, label="1h", step="hour", stepmode="backward"), dict(count=1, label="1d", step="day", stepmode="backward"), dict(count=7, label="1w", step="day", stepmode="backward"), dict(count=1, label="1m", step="month", stepmode="backward"), dict(count=6, label="6m", step="month", stepmode="backward"), dict(count=1, label="YTD", step="year", stepmode="todate"), dict(count=1, label="1y", step="year", stepmode="backward"), dict(step="all") ]))) # fig.show() graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder) scatter = graphJSON #Ploting Table using plotly format headerColor = '#636EFA' rowEvenColor = 'rgb(179,205,227)' rowOddColor = 'white' fig1 = go.Figure(data=[ go.Table( header=dict(values=[ '<b>S.No</b>', '<b>Date</b>', '<b>Time</b>', '<b>Glucose (mg/dL)</b>' ], line_color='darkslategray', fill_color=headerColor, align=['left', 'center'], font=dict(color='white', size=22)), cells=dict( values=[ S_No_list, formatteddate_list, formattedtime_list, glucolist, ], line_color='darkslategray', # 2-D list of colors for alternating rows fill_color='rgb(203,213,232)', # fill_color=[[rowOddColor, rowEvenColor, rowOddColor, rowEvenColor, rowOddColor] * 5], align=['left', 'center'], font=dict(color='black', size=14), height=30)) ]) # fig1.show() graphJSON = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder) table = graphJSON # return render_template('index.html', account=account, params=params, plot=scatter, glcodata=glcodata, plot1=table) # User is not loggedin redirect to login page return redirect(url_for('newlogin'))
def main(): # page title st.title('Twitter Sentiment Analysis') activities = ['Analyze Tweets', 'About'] choice = st.sidebar.selectbox('Select Activity', activities) #Loading Models if choice == "Analyze Tweets": flag=st.sidebar.checkbox('Add Keyword') st.subheader('Input a tweet query') # user query user_input = st.text_input("Keyword", "Type Here.") if flag: user_input2 = st.text_input( "Another Keyword", "Type Here.") count=st.sidebar.slider("Number of Tweets", min_value=10, max_value=1000, value=100,step=10) bar=st.progress(0) if st.button("Submit"): with st.spinner('Wait for it...'): start=time.time() text_query = user_input queryTweet(text_query) bar.progress(10) vect, model = load_models() bar.progress(30) tw1 = getTweets(user_input, count) tw1_pred = predict(vect, model, tw1["Tweets"].tolist()) tw1_pred["Date"] = tw1["Date"] st.subheader(user_input) st.dataframe(tw1_pred) bar.progress(60) if(flag): tw2 = getTweets(user_input2, count) tw2_pred = predict(vect, model, tw2["Tweets"].tolist()) tw2_pred["Date"] = tw2["Date"] st.subheader(user_input2) st.dataframe(tw2_pred) # tdf["Date"]=df["Date"] if(flag): # scatter plot st.subheader("Scatter Plot") fig = make_subplots(rows=1, cols=2) fig.add_trace( go.Scatter( x=tw1_pred["Date"], y=tw1_pred["Sentiment"], name=user_input),row=1,col=1) fig.add_trace( go.Scatter( x=tw2_pred["Date"], y=tw2_pred["Sentiment"], name=user_input2),row=1,col=2) st.plotly_chart(fig) # pie chart st.subheader(user_input) val = tw1_pred["Sentiment"].value_counts().values fig = go.Figure() fig.add_trace(go.Pie(labels=['Positive', 'Negative'], values=val, name=user_input)) st.plotly_chart(fig) st.subheader(user_input2) val2 = tw2_pred["Sentiment"].value_counts().values fig = go.Figure() fig.add_trace(go.Pie(labels=['Positive', 'Negative'], values=val2, name=user_input2)) st.plotly_chart(fig) # bar chart st.subheader("Bar Chart") fig = go.Figure() fig.add_trace( go.Bar(x=['Negative', 'Positive'], y=val, name=user_input)) fig.add_trace( go.Bar(x=['Negative', 'Positive'], y=val2, name=user_input2)) fig.update_layout(title="{} v {}".format(user_input, user_input2), title_x=0.5, xaxis_title='Sentiment', yaxis_title='Number of Tweets') st.plotly_chart(fig) else: # plot st.subheader("Scatter Plot") fig = go.Figure() fig.add_trace(go.Scatter( x=tw1_pred["Date"], y=tw1_pred["Sentiment"], name=user_input)) st.plotly_chart(fig) # pie chart st.subheader("Pie Chart") val = tw1_pred["Sentiment"].value_counts().values fig = go.Figure() fig.add_trace(go.Pie(labels=['Positive', 'Negative'], values=val, name='First Tweet')) st.plotly_chart(fig) # bar chart st.subheader("Bar Chart") fig = go.Figure() fig.add_trace( go.Bar(x=['Negative', 'Positive'], y=val, name=user_input)) # fig.add_trace( # go.Bar(x=['Negative', 'Positive'], y=val2, name=user_input2)) fig.update_layout(title=user_input, title_x=0.5, xaxis_title='Sentiment', yaxis_title='Number of Tweets') st.plotly_chart(fig) bar.progress(100) st.balloons() end = time.time() print("Total Time: ",end - start) elif choice == "About": st.subheader("Orientation Project for Team Rigel") st.info("Twitter Sentiment Classifier trained on Sentiment 140 Dataset. Tweets preprocessed and TF-IDF computed with ngram=(1,3) and 10k words . Best performing model was Support Vector Classifier with 80% Accuracy. GetOldTweets is used for twitter scraping.") st.markdown( "Built by [Paul](https://github.com/talentmavingire/)" " ," " [Asad](https://github.com/AsadAliDD/)"" ,and" " [Maaz](https://github.com/maazzzzz/)")
def phaseCorrect_report(inFID, outFID, hdr, position, ppmlim=(2.8, 3.2), html=None): """ Generate report for phaseCorrect """ # from matplotlib import pyplot as plt from fsl_mrs.core import MRS import plotly.graph_objects as go from fsl_mrs.utils.preproc.reporting import plotStyles, plotAxesStyle # Turn input FIDs into mrs objects toMRSobj = lambda fid: MRS(FID=fid, header=hdr) plotIn = toMRSobj(inFID) plotOut = toMRSobj(outFID) widelimit = (0, 6) # Fetch line styles lines, colors, _ = plotStyles() # Make a new figure fig = go.Figure() # Add lines to figure def addline(fig, mrs, lim, name, linestyle): trace = go.Scatter(x=mrs.getAxes(ppmlim=lim), y=np.real(mrs.getSpectrum(ppmlim=lim)), mode='lines', name=name, line=linestyle) return fig.add_trace(trace) fig = addline(fig, plotIn, widelimit, 'Unphased', lines['in']) fig = addline(fig, plotIn, ppmlim, 'Search region', lines['emph']) if position is None: # re-estimate here. position = np.argmax(np.abs(plotIn.getSpectrum(ppmlim=ppmlim))) axis = [plotIn.getAxes(ppmlim=ppmlim)[position]] y_data = [np.real(plotIn.getSpectrum(ppmlim=ppmlim))[position]] trace = go.Scatter(x=axis, y=y_data, mode='markers', name='max point', marker=dict(color=colors['emph'], symbol='x', size=8)) fig.add_trace(trace) fig = addline(fig, plotOut, widelimit, 'Phased', lines['out']) # Axes layout plotAxesStyle(fig, widelimit, title='Phase correction summary') # Axes if html is not None: from plotly.offline import plot from fsl_mrs.utils.preproc.reporting import figgroup, singleReport from datetime import datetime import os.path as op if op.isdir(html): filename = 'report_' + datetime.now().strftime( "%Y%m%d_%H%M%S%f")[:-3] + '.html' htmlfile = op.join(html, filename) elif op.isdir(op.dirname(html)) and op.splitext(html)[1] == '.html': htmlfile = html else: raise ValueError('Report html path must be file or directory. ') opName = 'Phase correction' timestr = datetime.now().strftime("%H:%M:%S") datestr = datetime.now().strftime("%d/%m/%Y") headerinfo = 'Report for fsl_mrs.utils.preproc.phasing.phaseCorrect.\n'\ + f'Generated at {timestr} on {datestr}.' # Figures div = plot(fig, output_type='div', include_plotlyjs='cdn') figurelist = [ figgroup( fig=div, name='', foretext= f'Phase correction of spectra based on maximum in the range {ppmlim[0]} to {ppmlim[1]} ppm.', afttext=f'') ] singleReport(htmlfile, opName, headerinfo, figurelist) return fig else: return fig
def generate_candlestick_graph(pair: str, data: pd.DataFrame, trades: pd.DataFrame = None, indicators1: List[str] = [], indicators2: List[str] = [],) -> go.Figure: """ Generate the graph from the data generated by Backtesting or from DB Volume will always be ploted in row2, so Row 1 and 3 are to our disposal for custom indicators :param pair: Pair to Display on the graph :param data: OHLCV DataFrame containing indicators and buy/sell signals :param trades: All trades created :param indicators1: List containing Main plot indicators :param indicators2: List containing Sub plot indicators :return: None """ # Define the graph fig = make_subplots( rows=3, cols=1, shared_xaxes=True, row_width=[1, 1, 4], vertical_spacing=0.0001, ) fig['layout'].update(title=pair) fig['layout']['yaxis1'].update(title='Price') fig['layout']['yaxis2'].update(title='Volume') fig['layout']['yaxis3'].update(title='Other') fig['layout']['xaxis']['rangeslider'].update(visible=False) # Common information candles = go.Candlestick( x=data.date, open=data.open, high=data.high, low=data.low, close=data.close, name='Price' ) fig.add_trace(candles, 1, 1) if 'buy' in data.columns: df_buy = data[data['buy'] == 1] if len(df_buy) > 0: buys = go.Scatter( x=df_buy.date, y=df_buy.close, mode='markers', name='buy', marker=dict( symbol='triangle-up-dot', size=9, line=dict(width=1), color='green', ) ) fig.add_trace(buys, 1, 1) else: logger.warning("No buy-signals found.") if 'sell' in data.columns: df_sell = data[data['sell'] == 1] if len(df_sell) > 0: sells = go.Scatter( x=df_sell.date, y=df_sell.close, mode='markers', name='sell', marker=dict( symbol='triangle-down-dot', size=9, line=dict(width=1), color='red', ) ) fig.add_trace(sells, 1, 1) else: logger.warning("No sell-signals found.") if 'bb_lowerband' in data and 'bb_upperband' in data: bb_lower = go.Scattergl( x=data.date, y=data.bb_lowerband, name='BB lower', line={'color': 'rgba(255,255,255,0)'}, ) bb_upper = go.Scattergl( x=data.date, y=data.bb_upperband, name='BB upper', fill="tonexty", fillcolor="rgba(0,176,246,0.2)", line={'color': 'rgba(255,255,255,0)'}, ) fig.add_trace(bb_lower, 1, 1) fig.add_trace(bb_upper, 1, 1) # Add indicators to main plot fig = add_indicators(fig=fig, row=1, indicators=indicators1, data=data) fig = plot_trades(fig, trades) # Volume goes to row 2 volume = go.Bar( x=data['date'], y=data['volume'], name='Volume' ) fig.add_trace(volume, 2, 1) # Add indicators to seperate row fig = add_indicators(fig=fig, row=3, indicators=indicators2, data=data) return fig
def detail(request): # Handle file upload for the FPGA configuration file if request.method == 'POST': form = DocumentForm(request.POST, request.FILES) if form.is_valid(): # Upload the File to the Database newdoc = FPGAconfFiles(docfile=request.FILES['docfile']) newdoc.save() # Write FPGA Configuration call('FPGA-writeConfig -f ' + settings.BASE_DIR + "/" + newdoc.docfile.url, shell=True) else: form = DocumentForm() # Load all stored FPGA configuration files try: FPGAconfigFiles = FPGAconfFiles.objects.all() except ADCSensorReading.DoesNotExist: raise Http404("FPGA Configuration data does not exist") # Load the ADC Value database try: adcChvalue = ADCSensorReading.objects.all() except ADCSensorReading.DoesNotExist: raise Http404("ADC data does not exist" ) # In case of an Error display an Error 404 Screeen ### Plot the ADC Values ##### # We want to show the last 100 messages, ordered most-recent-last adcData = adcChvalue.order_by('-timestamp')[:100] y_data = [] x_data = [] for b in adcData: y_data.append(b.reading) x_data.append(b.timestamp) fig = go.Figure() # Create and style traces fig.add_trace( go.Scatter(x=x_data, y=y_data, name='Sensor Voltage', line=dict(color='royalblue', width=4, dash='dashdot'))) # Edit the layout fig.update_layout( title='Plot of recorded ADC data from a Soft IP-interface', xaxis_title='Time (UTC) [HH:MM:SS]', yaxis_title='ADC Voltage (V)') # store the plot object plot_div = plot(fig, output_type='div', include_plotlyjs=False) # render the HTML template with all values return render( request, "BoardInteraction/DisplayTemplate.html", context={ 'plot_div': plot_div, # Plot object 'obj': adcData, # ADC raw data 'documents': FPGAconfigFiles, # FPGA Configuration files 'form': form # Upload File form })
def summary(data, what, st): titles = [ title for title in data if title not in ['P.A. Bolzano', 'P.A. Trento'] ] fig = make_subplots(4, 5, shared_xaxes='all', shared_yaxes='all', subplot_titles=titles, vertical_spacing=.08) minus = 0 PALETTE = itertools.cycle(get_matplotlib_cmap('tab10', bins=8)) maxs = [] for i, name in enumerate(data): col = (i - minus) % 5 + 1 row = (i - minus) // 5 + 1 region = data[name] if name in ['P.A. Bolzano', 'P.A. Trento']: minus += 1 continue if what == 'Terapie Intensive': plot_data = region.terapia_intensiva.rolling( 7).mean() / region.popolazione * UNITA title = "Terapie Intensive per 100.000 abitanti" yscale = 'log' elif what == 'Nuovi Positivi': plot_data = region.nuovi_positivi.rolling( 7).mean() / region.popolazione * UNITA title = "Nuovi positivi per 100.000 abitanti" yscale = 'log' elif what == 'Percentuale tamponi positivi': plot_data = region.nuovi_positivi.rolling( 7).mean() / region.tamponi.diff().rolling(7).mean() * 100 title = "Percentuale tamponi positivi." yscale = 'linear' elif what == 'Deceduti': plot_data = region.deceduti.diff().rolling( 7).mean() / region.tamponi.diff().rolling(7).mean() * 100 title = "Deceduti giornalieri per 100.000 abitanti." yscale = 'log' maxs.append(plot_data.values[-90:].max()) fig.add_trace( go.Scatter(x=plot_data.index[-90:], y=plot_data.values[-90:], showlegend=False, name=title, marker=dict(color=next(PALETTE)), fill='tozeroy'), row, col) fig.update_xaxes(showgrid=True, gridwidth=1, tickangle=45, gridcolor='LightGrey') fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='LightGrey', range=[0, max(maxs)]) fig.update_layout( title=title, plot_bgcolor="white", margin=dict(t=50, l=10, b=10, r=10), # width=1300, height=500, autosize=True, ) PALETTE = itertools.cycle(get_matplotlib_cmap('tab10', bins=8)) for i in fig['layout']['annotations']: i['font'] = dict(size=15, color=next(PALETTE)) return fig
import pandas_datareader.data as web return web.DataReader(name=company, data_source='stooq') df = fetch() df.reset_index(inplace=True) df = df[:30] min_val = min(len(df), 30) app.layout = html.Div([ html.H4('Notowania spółki Amazon'), html.Table([html.Tr([html.Th(col) for col in df.columns])] + [ html.Tr([html.Td(df.iloc[i][col]) for col in df.columns]) for i in range(min_val) ]), dcc.Graph(figure=go.Figure(data=[ go.Scatter( x=df.Date, y=df.Close, mode='lines', fill='tozeroy', name='Amazon') ], layout=go.Layout( yaxis_type='log', title_text='Wykres cen Amazon', height=300, showlegend=True, ))), dcc.Graph( #graf figure=go.Figure( #figura data=[ #podajemy dane do wykresu go.Bar( #okreslamy rodzaj wykresu x=df.Date, y=df.Volume, name='Volume', marker_color='red',
def update_graph(stock_dropdown, regressor_dropdown, on, window): ''' Inputs: stock_dropdown str; filtered stock ticker regressor_dropdown str; filtered regressor histogram_switch bool; show/not show histogram Outputs: line_chart plotly figure for comparing actual vs. predicted prices histogram plotly figure; showing prediction errors card_header str; header for window-card card_info_header str; header for prediction info card card_info str; prediction information for card ''' # Load & prepare data for graphing # window_pred_data columns = y_pred, y_test, y_pred_last, date_arr window_pred_data = get_window_pred_data(stock_dropdown, regressor_dropdown, window) # y_pred array structure is different for three regressors if regressor_dropdown in [ 'Support Vector Regression', 'LASSO Regression', 'Elastic Net Regression' ]: y_pred = window_pred_data[0].tolist() y_pred_last = window_pred_data[2].tolist()[0] else: y_pred = [element[0] for element in window_pred_data[0].tolist()] y_pred_last = window_pred_data[2].tolist()[0][0] y_test = [element[0] for element in window_pred_data[1].tolist()] x_dates = [str(element) for element in window_pred_data[3][0]] # Calculate date for future prediction (window is only working days) if int(window) > 1: window_delta = int(window) * 2 window_delta = window_delta + 2 * (window_delta // 5) else: window_delta = int(window) future_pred_date = datetime.strptime( x_dates[-1][:10], '%Y-%m-%d') + timedelta(days=window_delta) future_pred_date = datetime.date(future_pred_date) zip_err = zip(y_pred, y_test) err = [pred - test for pred, test in zip_err] zip_err_p = zip(err, y_test) err_perc = [err / test for err, test in zip_err_p] #err_perc = err / y_test err_mean = sum(err) / len(err) err_min = min(err) err_max = max(err) err_5_range = np.count_nonzero((np.array(err_perc) <= 0.05) & ( np.array(err_perc) >= -0.05)) / len(err_perc) # Line Chart line_chart = go.Figure() line_chart.add_trace( go.Scatter(x=x_dates, y=y_test, name='Actual', mode='lines', line=dict(color='green'))) line_chart.add_trace( go.Scatter(x=x_dates, y=y_pred, name='Predicted', mode='lines', line=dict(color='orange'))) line_chart.update_layout(autosize=False, height=600, width=800) # Error switch if on == True: # Line Chart line_chart.add_trace( go.Scatter(x=x_dates, y=err, name='Error abs.', mode='lines', line=dict(color='red'))) # Histogram histogram = go.Figure() histogram.add_trace(go.Histogram(x=err_perc, histnorm='probability')) histogram.update_layout(autosize=False, height=600, width=800) # Card Output card_header = str(stock_dropdown) + ' | ' + str(regressor_dropdown) card_info_header = 'Prediction for ' + str(future_pred_date) card_info_1 = 'Adj. Close = ' + '{:.2f}'.format(y_pred_last) card_info_2 = 'Average Error = ' + '{:.2f}'.format(err_mean) card_info_3 = 'Min Error = ' + '{:.2f}'.format(err_min) card_info_4 = 'Max Error = ' + '{:.2f}'.format(err_max) card_info_5 = 'Errors within +/- 5% = ' + '{:.2%}'.format(err_5_range) pred_card = dbc.CardBody([ html.H5(card_info_header, className='card-title'), html.P(card_info_1, className='card-text'), html.Hr(), html.P(card_info_2, className='card-text'), html.P(card_info_3, className='card-text'), html.P(card_info_4, className='card-text'), html.P(card_info_5, className='card-text') ]) # Output 1-day window 5-day window 10-day window 20-day window return card_header, line_chart, pred_card, histogram, card_header, line_chart, pred_card, histogram, card_header, line_chart, pred_card, histogram, card_header, line_chart, pred_card, histogram
# In[ ]: airport_test_ma = moving_average(df['airport_test']) screening_test_ma = moving_average(df['screening_test']) # In[ ]: figb = go.Figure() #figb.add_trace(go.Scatter( # mode='markers', opacity=0.6, # x=df['date'], y=df['airport_test'], name='空港検疫陽性率')) figb.add_trace( go.Scatter(mode='markers', opacity=0.6, x=df['date'], y=df['screening_test'], name='スクリーニング陽性率')) #figb.add_trace(go.Scatter( # mode='lines', opacity=0.6, # x=df['date'], y=airport_test_ma, name='空港検疫陽性率移動平均')) figb.add_trace( go.Scatter(mode='lines', opacity=0.6, x=df['date'], y=screening_test_ma, name='陽性率移動平均')) figb.update_layout( title='Tokyo 2020 COVID-19 検査陽性率', template='plotly_dark', xaxis_title='date',
def write(): """Writes content to the app""" #ast.shared.components.title_awesome("Detail") # Titel Awesome_Streamlit # Page title st.title("Detailed view") # st.header('Hier kann ein Text rein') # read CSV # CSV for Pie Chart df = pd.read_csv( 'https://raw.githubusercontent.com/hannahkruck/VIS_Test1/Develop/piechart.csv', sep=';') #-----------------Markdown info----------------- st.markdown(''' <!-- https://www.w3schools.com/css/tryit.asp?filename=trycss_tooltip_transition & https://www.w3schools.com/css/tryit.asp?filename=trycss_tooltip_right--> <style> .tooltip { position: relative; display: inline-block; font-size:1.6rem; } .tooltip .tooltiptext { visibility: hidden; width: 50vw; background-color: #f1f3f7; color: #262730; text-align: justify; border-radius: 6px; padding: 5px; font-size:0.9rem; /* Position the tooltip */ position: absolute; z-index: 1; top: -5px; left: 105%; opacity: 0; transition: opacity 0.8s; } .tooltip:hover .tooltiptext { visibility: visible; opacity: 1; } </style> ''', unsafe_allow_html=True) st.markdown(''' <div class="tooltip">ⓘ <span class="tooltiptext"> <b>Pie Chart</b><br> The pie chart represents the age distribution worldwide for the selected year. <br><br> <b>Sankey Diagram</b><br> The Sankey diagram shows the distribution of asylum applications from the different countries of origin (left) to the different countries of destination (right). Top 10 destination countries of a year are illustrated here. <br><br> It should be noted that due to the overview, unknown data as well as data on overseas countries and territories have been removed from the dataset. In addition, for a few countries only temporary data has been provided. </span></div> ''', unsafe_allow_html=True) # Layout setting of the page c1, c2 = st.beta_columns((1, 1)) container = st.beta_container() st.write( '<style>div.Widget.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True) #-------------------------Create Sankey diagram------------------------------- #https://www.geeksforgeeks.org/sankey-diagram-using-plotly-in-python/ #https://coderzcolumn.com/tutorials/data-science/how-to-plot-sankey-diagram-in-python-jupyter-notebook-holoviews-and-plotly#2 # Variabel fuer Sankey diagramm yearVar = 2019 #daten einlesen & selectieren show_df = pd.read_csv( 'https://raw.githubusercontent.com/hannahkruck/VIS_Test1/Develop/Datensatz_Sankey_Diagramm_eng.csv', sep=';') #YEAR yearRows = show_df[show_df['Year'] != yearVar].index show_df.drop(yearRows, inplace=True) # Nodes & links & colors label_souce = show_df['Label_Source'].dropna(axis=0, how='any') label_souce2 = [] elementVar = '' for i in label_souce: if (i != elementVar): label_souce2.append(i) elementVar = i label_target = show_df['Label_Target'].dropna(axis=0, how='any') label = [*label_souce2, *label_target] source = show_df['Source'].dropna(axis=0, how='any') target = show_df['Target'].dropna(axis=0, how='any') value = show_df['Value'].dropna(axis=0, how='any') #color color_node = [ #Source Syria, Afghanistan, Venezuela, Irak, Colombia, Pakistan, Türkei, Nigeria, Iran, Albania '#40bf77', '#93beec', '#1ff91f', '#cd8162', '#a6a6a6', '#80e5ff', '#b299e6', '#ff33ff', '#CDC037', '#ff6a6a', #Target '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641', '#0B2641' ] color_link = [ '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#b8e0b8', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bed8f4', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#bef4be', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#e7c1b1', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#cccccc', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#80e5ff', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#c2adeb', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffccff', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffec80', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', '#ffcccc', ] # data to dict, dict to sankey link = dict(source=source, target=target, value=value, color=color_link) node = dict(label=label, pad=20, thickness=10, color=color_node) layout = dict( #"Top 10 Verteilung der Asylanträge eines Landes auf die verschiedenen Zielländer" #title= 'Top 10 Distribution of a Countries Asylum Applications among the various <br>Countries of Destination %s' % yearVar, height=800, font=dict(size=11), ) data = go.Sankey(link=link, node=node) # Eigenschaften Sanky Diagram Layout fig2 = go.Figure(data, layout=layout) #------------Create pie chart------------------- # Transfer data to list labels = df['year'].tolist() values = df['2019'].tolist() layout = dict(height=600, font=dict(size=11) #title='Age Distribution of Asylum Seekers Worldwide %s' ) data = go.Pie(labels=labels, values=values) # Create pie figure fig1 = go.Figure(data=[ go.Pie( labels=labels, values=values, textinfo='label+percent', insidetextorientation='radial', ) ]) # Features Pie Diagram Layout fig1 = go.Figure(data, layout=layout) #------------Create Timeline Years V. 2.0------------------- # read CSV for the histogram graph df = pd.read_csv( "https://raw.githubusercontent.com/hannahkruck/VIS_Test1/Develop/Histogram_mini.csv", encoding="utf8", sep=";") # use years for the x-axis and the worldwide amount of asylum applications for the y-axis fig3 = go.Figure( go.Scatter(x=df['year'], y=df['asylum_applications_worldwide'])) # customizing the graph fig3.update_layout( # customize width #autosize=False, width=1900, height=100, # hide labels yaxis={ 'visible': False, 'showticklabels': False }, # show every year as a label below xaxis={'type': 'category'}, # create white background to match with initial background of streamlit plot_bgcolor='rgb(255,255,255)', # set all margins and padding to zero to create full width graph margin=go.layout.Margin(l=0, r=35, b=0, t=0, pad=0)) #------------Create Slider Years V. 2.0------------------- year = st.slider("", (int(df["year"].min())), (int(df["year"].max()))) selected_year = year # Delete all cells, except one year (both maps) indexNames = df[df['year'] != selected_year].index df.drop(indexNames, inplace=True) with c1: st.subheader('Asylum seekers by age in Europe in the year %s' % selected_year) st.plotly_chart(fig1, use_container_width=True) with c2: st.subheader( 'Top 10 Distribution of a Countries Asylum Applications among the various Countries of Destination %s' % selected_year) st.plotly_chart(fig2, use_container_width=True) with container: st.plotly_chart(fig3, use_container_width=True)
async def ohlcv(request: Request, ticker: str, resolution: str, title="OHLCV"): token = settings.fh_key date_time = datetime.datetime.now() end = str(p_d.convert_to_unix(date_time)) start = str(p_d.convert_to_unix(p_d.subtract_date(date_time, 364))) params = { "token": token, "from": start, "to": end, "resolution": resolution.upper(), "symbol": ticker.upper(), } r = requests.get("https://finnhub.io/api/v1/stock/candle", params=params) r_json = r.json() dates = map(lambda x: p_d.format_time(x), r_json['t']) data = { "status": r_json['s'], "ticker": ticker, "resolution": resolution, "time": list(dates), "open": r_json['o'], "high": r_json['h'], "low": r_json['l'], "close": r_json['c'], "volume": r_json['v'] } fig_p = go.Figure( data=go.Scatter(x=data['time'], y=data['close'], mode='lines')) fig_v = go.Figure(data=go.Bar(x=data['time'], y=data['volume'])) fig_p.update_xaxes(spikemode="across+toaxis", spikedash='solid', spikethickness=2, spikecolor="lightblue") fig_p.update_layout(hovermode="x") fig_v.update_layout(hovermode="x") fig_pv = make_subplots(specs=[[{"secondary_y": True}]]) fig_pv.add_trace(go.Scatter(x=data['time'], y=data['close'], name="price"), secondary_y=False) fig_pv.add_trace( go.Bar(x=data['time'], y=data['volume'], name="volume"), secondary_y=True, ) fig_pv.update_yaxes(title_text="<b>Price</b>", secondary_y=False) fig_pv.update_yaxes(title_text="<b>Volume</b>", secondary_y=True) fig_pv.update_layout(hovermode="x") fig_pv.update_xaxes(spikemode="across+toaxis", spikedash='solid', spikethickness=1, spikecolor="lightblue") fig_div_price = plot(fig_p, output_type='div', include_plotlyjs=False, show_link=False, link_text="") fig_div_volume = plot(fig_v, output_type='div', include_plotlyjs=False, show_link=False, link_text="") fig_div_pv = plot(fig_pv, output_type='div', include_plotlyjs=False, show_link=False, link_text="") return templates.TemplateResponse( "ohlcv.html", { "request": request, "id": id, "fig_div_price": fig_div_price, "fig_div_volume": fig_div_volume, "fig_div_pv": fig_div_pv, "title": title })
# title = name, xaxis_title="x", yaxis_title="Amplitude", # yaxis = dict(scaleanchor = "x", scaleratio = 1 ), # <|<|<|<|<|<|<|<|<|<|<|<| legend=dict(orientation='h', yanchor='top', xanchor='left', y=1.1), margin=dict(l=5, r=5, b=5, t=5), font=dict(family="Computer Modern", color="black", size=18)) fig.add_trace( go.Scatter( name="PC Avg", # <|<|<|<|<|<|<|<|<|<|<|<| # x=[item for sublist in pseudoCyclesX for item in sublist.tolist() + [None]], y=pseudoCyclesY_avg, # fill="toself", mode="lines", line=dict( width=4, color="black", # showscale=False ), # visible = "legendonly" )) fig.add_trace( go.Scatter( name="PC Avg mirror", # <|<|<|<|<|<|<|<|<|<|<|<| x=X + m - 1, y=pseudoCyclesY_avg, # fill="toself", mode="lines", line=dict(
import plotly.graph_objects as go fig = go.Figure(data=[ go.Scatter(x=x, y=[ float(price), float(price1), float(price2), float(price3), float(price4), float(price5), float(price6), float(price7), float(price8), float(price9), float(price10), float(price11), float(price12), float(price13), float(price14), float(price15), float(price16), float(price17), float(price18), float(price19), float(price20), float(price21), float(price22), float(price23) ]) ])
'% Receita Própria: {PercPropria}').format( Municipio=row['Município'], ReceitaTotal="{:,.2f}".format(row['ReceitaTotal']), HABITANTES="{:,.2f}".format(row['HABITANTES']), ReceitaPropria="{:,.2f}".format(row['ReceitaPropria']), PercPropria="{:.2%}".format(row['PercPropria'] / 100)).format()) bubble_size.append(row['ReceitaTotal']) df['text'] = hover_text df['size'] = bubble_size sizeref = 2. * max(df['size']) / (100**2) fig.add_trace( go.Scatter(x=df['HABITANTES'], y=df['PercPropria'], name='Porto', text=df['text'], marker_size=df['size'])) # Tune marker appearance and layout fig.update_traces(mode='markers', marker=dict(sizemode='area', color=df['PercPropria'], sizeref=sizeref, line_width=2, showscale=True)) fig.update_layout( title='% Receita Própria x População', xaxis=dict( title='População',
last_day = data_csv.tail(1) confirme = last_day['Confirme'] death = last_day['Death'] hospitalized = last_day['Hospitalized'] recovered = last_day['Recovered'] all_date = data_csv['Date'] all_confirme = data_csv['Confirme'] all_death = data_csv['Death'] date = last_day['Date'] # Bar-Graphic confermed fig = go.Figure( data=[go.Bar(x=all_date, y=all_confirme, text='Cas confirmés')], layout={'title': 'Résultat positif au COVID-19 (depuis le 24 février)'}) # Graph of death fig2 = go.Figure(data=[go.Scatter(x=all_date, y=all_death)], layout={'title': 'Décès à l\'hôpital'}) external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) server = app.server app.title = 'Normandie.COVID-19' app.layout = html.Div([ html.Div([html.H2('Normandie.COVID-19')], className='header'), html.Div([ html.P([ dcc.Markdown(''' Source: [L’Agence régionale de santé de Normandie] (https://www.normandie.ars.sante.fr/) ''')
"Anzahl_y": "Kaufwahrscheinlichkeit" }) df_BCG["Kaufwahrscheinlichkeit in %"] = df_BCG["Kaufwahrscheinlichkeit"] * 100 df_BCG["Gewinn pro Verkauf in €"] = df_BCG["Gewinn"] / df_BCG["Anzahl"] # Scatter-Plot erstellen nach den Variablen "Kaufwahrscheinlichkeit in %" und "Gewinn pro Verkauf in €" pro Produkt fig = px.scatter(df_BCG, x=df_BCG["Kaufwahrscheinlichkeit in %"], y=df_BCG["Gewinn pro Verkauf in €"], color="Angebotenes Produkt") # Figure-Element mit den einzelnen Sektionen der BCG-MAtrix über den Scatter-Plot legen, um die Klassifizierung zu visualisieren fig.add_trace( go.Scatter(x=[12.5, 12.5], y=[900, 900], text=["<b>Poor Dogs</b>"], mode="text", showlegend=False)) fig.add_trace( go.Scatter(x=[12.5, 12.5], y=[1900, 1900], text=["<b>Questionmarks</b>"], mode="text", showlegend=False)) fig.add_trace( go.Scatter(x=[37.5, 37.5], y=[900, 900], text=["<b>Cash Cows</b>"], mode="text", showlegend=False)) fig.add_trace(
df_demanda = pd.read_csv('https://raw.githubusercontent.com/gustavoprietodaher/tpfinal/main/APP_VAR_MW_CABA_2017_2020.csv') L_Date = list(df_demanda['Date']) L_MW = list(df_demanda['MW']) L_Temp_avg = list(df_demanda['Temp_avg']) L_Temp_min = list(df_demanda['Temp_min']) L_Temp_max = list(df_demanda['Temp_max']) L_hPa = list(df_demanda['hPa']) L_Hum = list(df_demanda['Hum']) L_Wind_avg = list(df_demanda['Wind_avg']) L_Wind_max = list(df_demanda['Wind_max']) fig = make_subplots(rows=5, cols=1, shared_xaxes=True, vertical_spacing=0.01) fig.add_trace(go.Scatter(name='hPa', x=L_Date, y=L_hPa, line=dict(color='gold', width=1)), row=1, col=1) fig.add_trace(go.Line(name='Temp_avg', x=L_Date, y=L_Temp_avg, line=dict(color='lawngreen', width=2)), row=2, col=1) fig.add_trace(go.Scatter(name='Temp_min',x=L_Date, y=L_Temp_min, line=dict(color='deepskyblue', width=1, dash='dashdot')), row=2, col=1) fig.add_trace(go.Scatter(name='Temp_max',x=L_Date, y=L_Temp_max, line=dict(color='red', width=1, dash='dashdot')), row=2, col=1) fig.add_trace(go.Scatter(name='MW',x=L_Date, y=L_MW,line=dict(color='blue', width=1.5)), row=3, col=1) fig.add_trace(go.Scatter(name='Hum',x=L_Date, y=L_Hum,line=dict(color='deeppink', width=1.5)),
# self.x_direct = [-1,1][random.randrange(2)] # self.x_move = self.x_steps * self.x_direct # def yaxis(self, go, nm): # self.y_steps = nm.random.randint(1, 6) # self.y_direct = [-1,1][random.randrange(2)] # self.y_move = self.y_steps * self.y_direct external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = html.Div(children=[ html.H1(children='Hello Dash'), html.Div(children=''' Dash: A web application framework for Python. '''), dcc.Graph(id='Random Walk', figure=go.Figure( data=go.Scatter(x=x_position, y=y_position, mode='markers', name='I walk the line (many of them)', marker=dict(color=nm.arange(500), size=8, colorscale='Electric', showscale=True)))) ]) if __name__ == '__main__': app.run_server(debug=True)
</body> """ st.markdown(html, unsafe_allow_html=True) st.markdown('<hr></hr>', unsafe_allow_html=True) fig = make_subplots(rows=4, cols=2, horizontal_spacing=0.07, vertical_spacing=0.05, x_title="Date", y_title="% Change", specs=[[{}, {}], [{}, {}], [{}, {}], [{}, {}]], subplot_titles=("All", "Analytical", "Anger", "Confident", "Fear", "Joy", "Neutral", "Sadness")) fig.add_trace(go.Scatter(x=list(ddl['Datetime']), y=list(ddl['Analytical']), line=dict(color=colors[0]), name="Analytical"), row=1, col=1) fig.add_trace(go.Scatter(x=list(ddl['Datetime']), y=list(ddl['Anger']), line=dict(color=colors[1]), name="Anger"), row=1, col=1) fig.add_trace(go.Scatter(x=list(ddl['Datetime']), y=list(ddl['Confident']), line=dict(color=colors[2]), name="Confident"), row=1, col=1)
pd.to_datetime("2017-08-02"), ], ) ) for i,row in df.iterrows(): if(pd.isnull(row['__target'])): continue fig.add_trace( go.Scatter( x=[ row['start_time'], row['end_time'], row['end_time'], row['start_time'], ], y=[0, 0, 1, 1], fill="toself", fillcolor="darkviolet", # marker={'size':0}, mode="lines", hoveron="points+fills", # select where hover is active line_color="darkviolet", showlegend=False, # line_width=0, opacity=0.5, text=str(row['__target']), hoverinfo="text+x+y", ) ) fig.show()
def plotPitch(): import plotly.graph_objects as go #pitch df = pointGrid() fig = go.Figure() #tout terrain fig.add_shape(type="rect", x0=0, y0=87, x1=80, y1=120, line=dict( color="#9fa6b7", width=1, ), fillcolor='rgba(0,0,0,0)', layer="below", ) #demi cercle surface réparation fig.add_shape(type="circle", xref="x", yref="y", x0=32, y0=98, x1=48, y1=111, line_color="#9fa6b7", line_width=1, layer="below" ) #surface réparation fig.add_shape(type="rect", x0=18, y0=102, x1=62, y1=120, line=dict( color="#9fa6b7", width=1, ), fillcolor='#171b26', layer="below", ) #6.50 fig.add_shape(type="rect", x0=30, y0=114, x1=50, y1=120, line=dict( color="#9fa6b7", width=1, ), fillcolor='rgba(0,0,0,0)', layer="below", ) #buts fig.add_shape(type="rect", x0=36, y0=120, x1=44, y1=122, line=dict( color="#9fa6b7", width=1, ), fillcolor='rgba(0,0,0,0)', layer="below", ) fig.add_trace(go.Scatter( x=[40, 36, 44], y=[109, 120, 120], fill="toself", fillcolor='rgba(0,0,0,0)', opacity=0.5, line_width=0, mode='none', showlegend = False, hoverinfo='none')) #grid fig.add_trace(go.Scatter(x=df['x'], y=df['y'], mode='markers', marker_color='rgba(0,0,0,0)', marker_size=2, showlegend = False, hoverinfo='none')) #add shot position (update with click) fig.add_trace(go.Scatter(x=[40], y=[109], mode='markers', marker_color='rgba(0,0,0,0)', marker_size=13, showlegend = False, hoverinfo='none', selectedpoints=[0], marker_symbol='hexagram')) fig.update_layout(clickmode='event+select') fig.update_layout(yaxis=dict(scaleanchor="x", scaleratio=1)) fig.update_layout(yaxis_range=[85,122], yaxis_visible=False, yaxis_showticklabels=False, xaxis_visible=False, xaxis_showticklabels=False) fig.update_layout( margin=dict(l=0, r=0, t=0, b=0), paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)' ) fig.update_xaxes(range=(-1,81)) return fig
"""creates plots of train vs val loss and train vs val accuracy INPUTS df, dataframe of training stats by epoch RETURNS None""" #plot for loss fig = go.Figure(layout=go.Layout( title="Loss by Training Epoch", xaxis = dict( title = "Training Epoch" ), yaxis=dict( title = "Loss" ) )) fig.add_trace(go.Scatter(x=df["epoch"], y=df["training_loss"], mode='lines', name='training', line_color = '#1B848E')) fig.add_trace(go.Scatter(x=df["epoch"], y=df["val_loss"], mode='lines', name='validation', line_color = '#33B8B5')) fig.show() # plot for accuracy fig = go.Figure(layout=go.Layout( title="Accuracy by Training Epoch", xaxis = dict( title = "Training Epoch" ),
def plot_cities_forecast(forecasts: Dict[str, pd.DataFrame]): """ Creates the plotly figure for the cities forecast """ # Get color scheme from seaborn city_color_dictionary = dict( zip(forecasts["city"].unique(), [f"rgb{c}" for c in sns.color_palette("deep")])) fig = go.Figure() for city in forecasts["city"].unique(): # Plot real values fig.add_trace( go.Scatter(x=forecasts[forecasts["city"] == city].index, y=forecasts.loc[forecasts["city"] == city, "real_values"], name=city, legendgroup=city, line=dict(color=city_color_dictionary[city]), mode='lines')) # Plot Predictions forecasting_df = forecasts.loc[(forecasts["city"] == city) & (forecasts["real_values"].isna()), :] fig.add_trace( go.Scatter(x=forecasting_df.index, y=forecasting_df["forecast"], name=city, legendgroup=city, mode='lines+markers', line=dict(color=city_color_dictionary[city], width=1, dash='dot'), error_y=dict(type='data', array=forecasting_df["error"], visible=True), showlegend=False)) # Add a vertical line to mark the forecasting period fig.add_shape(type='line', yref="paper", xref="x", x0=forecasts.loc[forecasts["city"] == city, "real_values"].dropna().index.max(), y0=0, x1=forecasts.loc[forecasts["city"] == city, "real_values"].dropna().index.max(), y1=1, line=dict(color='black', width=0.5, dash="dash")) # Add annoation on the forecast line fig.add_annotation(x=forecasts.loc[forecasts["city"] == city, "real_values"].dropna().index.max(), y=1, yref='paper', xanchor="right", yanchor="top", showarrow=False, textangle=-90, text="Forecast Start") fig.layout.title.text = "Monthly Profit per city." fig.layout.yaxis.title = "Profit (€)" fig.layout.xaxis.title = "Date" fig.update_xaxes(rangeslider_visible=True) fig.update_layout(legend_title_text='City') return fig