def render_graph(df: DataFrame) -> None: """ Displays graph for specific session state Graph is displayed according to given dataframe :param df: Dataframe for which the graph is to be plotted :return: It returns nothing. It just plots and displays graph """ if df.empty: st.title("No Data to display") else: if session_state.graph_type == "X-Y Plotter" and len( session_state.column) == 1: st.error("Please select atleast Two tags in Tag selection") if session_state.data_type != "Live": session_state.column_statistic = st.multiselect( "Select Tags For Statistics", all_columns_filtered()) if not Enquiry(session_state.column_statistic): display_stats(df) if session_state.display_type == "Graph": if session_state.graph_type == "Bar Chart": bar_graph(df) elif session_state.graph_type == "Pie Chart": pie_graph(df) elif session_state.graph_type == "Doughnut Chart": doughnut_graph(df) elif session_state.graph_type == "Table": st.dataframe(df) elif session_state.graph_type == "Point Chart": point_chart(df) if session_state.data_type != "Live": if session_state.graph_type == "Trend Chart": trend_line_chart(df) elif session_state.graph_type == "Area Chart": area_chart(df) elif session_state.graph_type == 'X-Y Plotter': x, y = st.beta_columns(2) List = all_columns_filtered() session_state.column1 = x.selectbox( "Choose X axis Tag", List) if session_state.column1 != "": session_state.column2 = y.multiselect( "Choose Y axis Tag", list(col for col in List if col != session_state.column1)) if Enquiry(session_state.column2): st.success("Choose Y Tag to begin ") else: # x_y_graph(df, session_state.column1, session_state.column2) x_y_plot(df, session_state.column1, session_state.column2) else: if session_state.graph_type in session_state.Live_exlude_graph: st.success(session_state.graph_type + " Cannot be plotted on Live Data Type ")
def get_graph(graph: str, filter: int): "Retrieves the graph object" if graph == "bar_graph": graph = graphs.bar_graph(filter) elif graph == "scatterplot": graph = graphs.scatterplot(filter) return graph
def driver(): # Read given file file_name_with_extension = get_file_name() file_name = file_split.search(file_name_with_extension)[1] text_to_analyze = read_file(file_name_with_extension) # Collect data date_dictionary, time_dictionary, person_dictionary, word_dictionary, number_of_messages = collect_data( text_to_analyze) # Sort all Dictionaries here word_dictionary = OrderedDict(word_dictionary.most_common(20)) date_dictionary = sort_dictionary(date_dictionary) time_dictionary = sort_dictionary(time_dictionary, 'key') person_dictionary = sort_dictionary(person_dictionary) if not os.path.exists('output'): os.mkdir('output') #Generate graphs graphs.histogram(time_dictionary, 'Message Frequency Chart in ' + file_name, 'output/' + file_name + '-time_activity.png') graphs.bar_graph( word_dictionary, 20, 'Uses', 'Most used words in ' + str(number_of_messages) + ' messages in ' + file_name, 'output/' + file_name + '-word_frequency.png') graphs.bar_graph(date_dictionary, 20, 'Messages', 'Most Messages in ' + file_name, 'output/' + file_name + '-date_activity.png') graphs.bar_graph(person_dictionary, 20, 'Messages', 'Most active person in ' + file_name, 'output/' + file_name + '-person_activity.png') # Remove old data sheets output_file = 'output/' + file_name + '-data.xlsx' if os.path.isfile(output_file): os.unlink(output_file) #Add to excel sheet to_xl(date_dictionary, 'Dates', 'Date', 'No. of Messages', output_file) to_xl(person_dictionary, 'People', 'Sender', 'No. of Messages', output_file) to_xl(time_dictionary, 'Times', 'Time', 'No. of Messages', output_file) to_xl(word_dictionary, 'Words', 'Word', 'No. of Occurences', output_file)
def driver(): # Read given file file_name_with_extension = get_file_name() with open(file_name_with_extension) as json_file: json_data = json.load(json_file) # Collect data json_data, discussion_name = preprocess_data(json_data) processed_data = process_data(json_data) # Sort all Dictionaries here word_dictionary = OrderedDict(processed_data['word_dictionary'].most_common(150)) emoji_dictionary = OrderedDict(processed_data['emoji_dictionary'].most_common(20)) person_dictionary = sort_dictionary(processed_data['person_dictionary']) date_dictionary = sort_dictionary(processed_data['date_dictionary']) inverse_date_dictionary = revert_dictionary(sort_dictionary(processed_data['date_dictionary'])) number_of_messages = processed_data['number_of_messages'] total_chars = processed_data['total_chars'] total_call_data = processed_data['total_call_length'] if not os.path.exists('output'): os.mkdir('output') graphs.bar_graph( word_dictionary, 25, 'Uses', 'Most used words in ' + str(number_of_messages) + ' messages in ' + discussion_name, 'output/' + discussion_name + '_word_frequency.png' ) # emojis do not draw correctly on most systems because the font does not support them, so removing them for now # graphs.bar_graph( # emoji_dictionary, 20, 'Uses', # 'Most used emojis in ' + str(number_of_messages) + ' messages in ' + file_name, # 'output/' + file_name + 'emoji_frequency.png' # ) # # Prints the most used emojis as alternative to exporting an image with the respective graph for key, value in emoji_dictionary.items(): print(key + '\t-> ' + str(value)) # total chars per user and average chars per msg in the chat for key, value in total_chars.items(): print(key + '\t-> total: ' + str(value) + ' avg: ' + str(value / person_dictionary[key])) print(str(datetime.timedelta(seconds=total_call_data))) graphs.bar_graph( person_dictionary, 20, 'Messages', 'Most active person in ' + discussion_name, 'output/' + discussion_name + '_person_activity.png' ) # graphs.bar_graph( date_dictionary, 20, 'Messages', 'Most Messages with ' + discussion_name, 'output/' + discussion_name + '_date_activity.png' ) graphs.bar_graph( inverse_date_dictionary, 20, 'Messages', 'Least Messages in ' + discussion_name, 'output/' + discussion_name + '_inverse_date_activity.png' ) # graphs.histogram( processed_data['time_dictionary'], 'Message Frequency Chart in ' + discussion_name, 'output/' + discussion_name + '_time_activity.png' )
print( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" ) print( "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++" ) #print(score) mfcc_accuracy_array1.append(score1) mfcc_accuracy_array2.append(score2) mfcc_accuracy_array3.append(score3) #---------------------------------------------------------------------------------------# print("Done performing k-folds cross validation on MFCC data") print("Generating bar graph of MFCC k-folds results...") #Save Graph of MFCC K-Folds Accuracies bar_graph(mfcc_accuracy_array1, mfcc_accuracy_array2, mfcc_accuracy_array3, "MFCC K-Fold", os.path.join(figure_path, 'k_folds_results')) print("Done generating bar graph of MFCC k-folds results") print("Performing k-folds cross validation on Chroma data...") #Perform k-folds x-val on Chroma data #Model 1 is CNN with Dropout #Model 2 is CNN without Droput #Model 3 is CNN with Droput and increased Kernel Size #---------------------------------------------------------------------------------------# print( "???????????????????????????????????????????????????????????????????????") print("data shape: ", chroma_data_arr.shape) print("label shape: ", chroma_label_arr.shape) print( "???????????????????????????????????????????????????????????????????????") chroma_accuracy_array1 = []
# print(df) # df = df.loc[df["Timestamp"] != "Timestamp"] # types = list(value for value in df["Timestamp"]) df = df.loc[(df['Timestamp'].astype(np.int64) >= start) & (df['Timestamp'].astype(np.int64) <= End)] if start <= End: st.success('Start date: `%s %s`\n\nEnd date:`%s %s`' % (session_state.start_date, session_state.start_time, session_state.end_date, session_state.end_time)) else: st.error('Error: End date must fall after start date.') if session_state.graph_type == "Bar Chart": bar_graph(data=df, st=st, go=go) if session_state.graph_type == "Pie Chart": doughnut_graph(data=df) if live.button("live"): session_state.data_type = "live" if session_state.data_type == "live": df = select_data(st, hist=False, live=True) if session_state.graph_type == "Bar Chart": bar_graph(data=df, st=st, go=go) if session_state.graph_type == "Pie Chart": doughnut_graph(data=df) # st.rerun()
def analytics(): global EMAIL if EMAIL == "": return render_template("analyticsBlur.html", displayStatus="visible") else: user = db.find_one_or_404({"email": EMAIL})["memories"] scores = [] times = [] sentence = [] sentence2 = [] for i in user: sco = [] tim = [] sent = [] sent2 = [] newsent = i["new sentences"] for j in newsent: sco.append(j["score"]) tim.append(round((j["time"] - i["time"]).seconds / 60)) sent.append(j["sentence"]) sent2.append(i["original sentence"]) scores.append(sco) times.append(tim) sentence.append(sent) sentence2.append(sent2) jso = { "Scores": scores, "Times": times, "Old Sentence": sentence2, "New Sentence": sentence } df = pd.DataFrame( jso, columns=["Scores", "Times", "Old Sentence", "New Sentence"]) df.to_csv("userdata.csv") # jso = {"Scores": scores, "Times": times} # df = pd.DataFrame(jso, columns=["Scores", "Times"]) # df.to_csv("userdata.csv") print(scores) print(times) if len(scores) == 0 or len(scores) == 1: return render_template('analyticsNone.html') # temp = times[0][0] # flag = 0 # for time in times: # if time[0] != temp: # flag = 1 # break # if flag == 0: # return render_template('analyticsNone.html') id_memory = [[i] for i in range(1, len(scores) + 1)] # print(id_memory) memory_vs_time = svr_overtime(id_memory, scores) memory_vs_time = memory_vs_time.decode("utf-8") memory_vs_time_rf = rf_overtime(id_memory, scores) memory_vs_time_rf = memory_vs_time_rf.decode("utf-8") bargraph_vs_time = bg_overtime(id_memory, scores) bargraph_vs_time = bargraph_vs_time.decode("utf-8") svr_img = svr(times, scores) randomforest_img = random_forest(times, scores) bargraph_img = bar_graph(times, scores) svr_img = svr_img.decode("utf-8") randomforest_img = randomforest_img.decode("utf-8") bargraph_img = bargraph_img.decode("utf-8") exp_val = expected_value(scores) if len(scores) <= 10: return render_template('analyticsLess.html', bargraph_vs_time=bargraph_vs_time, memory_vs_time_rf=memory_vs_time_rf, memory_vs_time=memory_vs_time, exp_val=exp_val, svr_img=svr_img, randomforest_img=randomforest_img, bargraph_img=bargraph_img) cluster_imgs = analyse() cl1 = cluster_imgs[0].decode("utf-8") cl2 = cluster_imgs[1].decode("utf-8") cl3 = cluster_imgs[2].decode("utf-8") perfect_rate = cluster_imgs[3] forget_rate = cluster_imgs[4] type_rate = cluster_imgs[5] return render_template('analytics.html', bargraph_vs_time=bargraph_vs_time, memory_vs_time_rf=memory_vs_time_rf, memory_vs_time=memory_vs_time, exp_val=exp_val, perfect_rate=perfect_rate, forget_rate=forget_rate, type_rate=type_rate, cl1=cl1, cl2=cl2, cl3=cl3, svr_img=svr_img, randomforest_img=randomforest_img, bargraph_img=bargraph_img) return render_template('analytics.html')