/
market_sentimentalism2.py
111 lines (71 loc) · 2.79 KB
/
market_sentimentalism2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from flask import Flask
from flask import render_template
import etl
from facebook_etl import *
from flask import request
import model
app = Flask(__name__)
# url_for('static', filename='style.css')
@app.route('/sentiment/<ticker>/')
def get_sentiment(ticker=None):
score_dict = {}
start_date = request.args.get('start_date', None)
end_date = request.args.get('end_date', None)
if not start_date:
start_date = datetime.datetime(2015, 04, 01)
else:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
if not end_date:
end_date = start_date + datetime.timedelta(days=1)
else:
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
urls_dict = etl.construct_search_url_yh(start_date, end_date)
for dl in urls_dict:
articles = etl.news_scrape(urls_dict[dl])
score_dict[dl] = {'articles': articles}
score_dict[dl]['score'] = etl.aggregate_news_sentiment(articles)
file_path = path.join(path.dirname(__file__), 'data/sentiment_scores_daily_train.json')
with open(file_path, 'w+') as file:
json.dump(score_dict, file)
if ticker:
print 'ticker is ', ticker
return render_template('highcharts1.html', dictionary=json.dumps(score_dict, ensure_ascii=False))
@app.route('/fb/<id_or_name>/')
def fb(id_or_name):
access_token = request.args.get('access_token', None)
if not access_token:
d = {'Error': 'No Access Token provided'}
else:
d = get_fb_page(id_or_name, access_token)
# data = json.dumps(d)
return render_template("default.html", data=json.dumps(d))
@app.route('/tweets/<search_term>/')
def tw(search_term='AAPL'):
twits_bydate = {}
start_date = request.args.get('start_date', None)
end_date = request.args.get('end_date', None)
if not start_date:
start_date = datetime.datetime(2015, 04, 01)
else:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
if not end_date:
end_date = start_date + datetime.timedelta(days=1)
else:
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
d = etl.construct_search_url_tw(search_term, start_date, end_date)
for dt in d:
twits_bydate[dt] = etl.collect_historical_tweets(d[dt])
return render_template('default.html', data=json.dumps(twits_bydate))
@app.route('/forecast/')
def test():
d = etl.prepare_data_for_modeling()
res = model.linear_reg(d)
return render_template('default.html', data=json.dumps(res))
@app.route('/dashboard/')
def sentiment_dashboard():
data = etl.prepare_data_for_modeling()
data['date'] = data.index
dat_dict = data.to_dict(orient='records')
return render_template('highcharts1.html', dictionary=json.dumps(dat_dict))
if __name__ == '__main__':
app.run()