def makeDashboards(symbol, sampleAmount): companyList = pd.read_csv("companylist.csv") companyRow = companyList[companyList["Symbol"] == symbol] retrievedName = companyRow.iat[0, 1] retrievedSector = companyRow.iat[0, 7] origStock = Scraper.Stock(symbol, retrievedName, retrievedSector) #get the comparisons compare.experiment(sampleAmount, origStock) compareData = pd.read_csv("output.csv") compareData = compareData.fillna(0) # output to static HTML file #get all of the compare stocks stockFile = open("stocks.p", "rb") stockList = pickle.load(stockFile) polarityScript, polarityHtml = polarityDashboard( origStock, stockList, compareData.sort_values(by=['WIKI_SIMILARITY'], ascending=False)) biasScript, biasHtml = biasDashboard( origStock, stockList, compareData.sort_values(by=['WIKI_SIMILARITY'], ascending=False)) relScript, relHtml = relevanceDashboard(compareData) return polarityScript, polarityHtml, biasScript, biasHtml, relScript, relHtml
test = [] test.append(test_s) process_stocks(test) #voc_sum_test, word_to_index_sum_test, voc_news_test, word_to_index_news_test = create_vocab(test) stock_mats_test = {} #stock name -->[mat_sum, mat_news] for s in test: stock_mats_test[s.symbol] = tf_calc(voc_sum, word_to_index_sum, voc_news, word_to_index_news, voc_ref, word_to_index_ref, s) sums_test, news_test, refs_test = separate_stocks(stock_mats_test) sum_prof_test = make_vec_profs(sums_test, voc_sum) news_prof_test = make_vec_profs(news_test, voc_news) refs_prof_test = make_vec_profs(refs_test, voc_ref) t_profs = [] t_profs.append(sum_prof_test) t_profs.append(news_prof_test) t_profs.append(refs_prof_test) toCSV, keys = get_CSV(sum_prof, news_prof, ref_prof, t_profs, stocks, test[0]) #keys = toCSV[0].keys() with open('output.csv', 'w') as output_file: dict_writer = csv.DictWriter(output_file, keys) dict_writer.writeheader() dict_writer.writerows(toCSV) if __name__ == '__main__': experiment(10, Scraper.Stock("GOOG", "Alphabet Inc", "Technology"))