Example #1
0
def getNewsFromCorpus2():
    """
    Creation with the second corpus (The big one)
    Return:
        List of News
    """
    news = []
    with open("Fake.csv", "r") as f:
        # Parse of the CSV
        file = csv.reader(f, delimiter=',', quotechar='"')

        for index, line in enumerate(file):
            # if index%100==0:
            #     print("article n°"+str(index)+ " de Fake")
            # The csv is on the shape title,text,subject,date
            text = line[1]
            title = line[0]
            orientation = line[2]
            article = News("author", text, "links", orientation,
                           "mostly false", title)
            news.append(article)
    # Same for the 'True' file
    with open("True.csv", "r") as f:
        file = csv.reader(f, delimiter=',', quotechar='"')
        for index, line in enumerate(file):
            # if index%100==0:
            #     print("article n°"+str(index)+ " de True")
            text = line[1]
            title = line[0]
            orientation = line[2]
            article = News("author", text, "links", orientation, "mostly true",
                           title)
            news.append(article)

    return news
Example #2
0
def GetNewsListEs(url, time=dt.datetime.min, label='', maxOverdue=5):
    soup = News.GetSoup(url, 'lxml')

    maxPage = int(
        soup.body.select('div[id="pagerNoDiv"]')[0].select(
            'a[class="page-btn"]')[0].previous_sibling.text)
    newsList = []
    overdueCount = 0
    maxTime = dt.datetime.min
    for p in range(1, maxPage + 1):
        pageUrl = url[0:len(url) - 5] + '_' + str(p) + '.html'
        try:
            urlList = GetNewsUrlEs(pageUrl)
        except Exception as e:
            News.WriteLog(str(e) + '. url = ' + pageUrl)
            continue
        for newsUrl in urlList:
            try:
                news = GetNewsEs(newsUrl)
                news.label = label
                # if news.time <= time:
                if news.time.date() < time.date():
                    overdueCount += 1
                else:
                    newsList.append(news)
                    maxTime = news.time if news.time > maxTime else maxTime
                    print(news.url)
                    print(news.time)
                    print(news.title)
            except Exception as e:
                News.WriteLog(str(e) + ', url = ' + newsUrl)
                continue
            if overdueCount >= maxOverdue:
                return newsList, maxTime
    return newsList, maxTime
Example #3
0
def latlon(lat1, lon1, lat2, lon2):
    lat1 = int(News.get_db_latlon(float(lat1)))
    lon1 = int(News.get_db_latlon(float(lon1)))
    lat2 = int(News.get_db_latlon(float(lat2)))
    lon2 = int(News.get_db_latlon(float(lon2)))
    news = News.get_country_news(lat1, lon1, lat2, lon2)
    return json.dumps(news, sort_keys=True, indent=4, separators=(',', ': '))
Example #4
0
    def getNews(self):

        #Construct the source/ TODO is this used?
        source = self.news_input.text
        if ".com" not in source:
            source = source + ".com"
        source = "http://www." + source

        try:
            #Get the news
            articles = News.getTheNews(source)

            #Clear the panel
            self.top_articles.text = ""

            #Update the panel
            for i in range(5):
                self.top_articles.text += ("Title: " + articles[0][i] + "\n")
                for author in articles[1][i]:
                    self.top_articles.text += ("Author: " + author + " ")
                self.top_articles.text += "\n\n"

        except:
            #Invalid input
            self.top_articles.text = "Please enter a news topic to search for."
Example #5
0
def GetNews():
    while len(url_set) != 0:
        try:
            # 获取链接
            url = url_set.pop()
            url_old.add(url)
            # 获取信息
            article = News.News()
            article.url = url  # URL信息
            html = urllib.urlopen(article.url).read()
            soup = bs4.BeautifulSoup(html, 'html.parser')
            article.title = soup.find('title').get_text()  # 标题信息
            keywords='keywords'
            res0 = re.compile(keywords)
            if soup.find('meta', {'name': res0}).__getitem__('name') == "keywords":
                article.keywords = soup.find('meta', {'name': res0}).__getitem__('content')  # 作者
            else:
                article.keywords = ""
            author = 'author'
            res = re.compile(author)
            if soup.find('meta', {'name': res}).__getitem__('name') == "author":
                article.author = soup.find('meta', {'name': res}).__getitem__('content')  # 作者
            else:
                article.author = ""
            published_time= 'publishdate'
            res1 = re.compile(published_time)
            if soup.find('meta', {'name': res1}).__getitem__('name') == "publishdate":
                article.date = soup.find('meta', {'name': res1}).__getitem__('content')  # 作者
            else:
                article.date = ""
            article.content=soup.find('div', {'class': 'main_text'}).get_text()
            SaveNews(article)
        except Exception as e:
            print(e)
            continue
Example #6
0
def news():
    theNews = newsFeed.newsPlease()
    newsdisp.delete(1.0, END)
    if len(theNews):
        for i in range(0, len(theNews)):
            newsdisp.insert(END, theNews[i]["title"] + "\n")
            newsdisp.insert(END, theNews[i]["link"], "hlink")
            newsdisp.insert(END, "\n\n")
Example #7
0
def predict(link, mode):

    if mode == "corpus":
        article = link
    if mode == "url":
        toi_article = Article(link, language="en")
        toi_article.download()
        toi_article.parse()
        toi_article.nlp()
        text = toi_article.text
        article = News("author", text, "links", "orientation", "unk", "title")
    if mode == "eval":
        article = News("author", link, "links", "orientation", "unk", "title")
    article.clean_text()
    text = article.getCleanedText()
    joined_Text = np.array([' '.join(text)])

    resList = []
    #prediction for each model of our list

    resList = np.array(resList)
    output_name = sess.get_outputs()[0].name
    input_name = sess.get_inputs()[0].name
    pred = sess.run([output_name], {input_name: joined_Text})
    print(pred)

    display("prediction = " + str(pred[0]) + " % True", "yellow")
    if pred[0][0] == 'mostly false':
        return (0)
    return (1)
Example #8
0
def GetReportListEs(url, time=dt.datetime.min, label='', maxOverdue=5):
    maxPage = 1000
    newsList = []
    overdueCount = 0
    maxTime = dt.datetime.min
    urlList = []
    exitTraverse = False
    for i in range(1, maxPage + 1):
        jsonUrl = url.replace('&p=x&', '&p=' + str(i) + '&')
        try:
            urlSet = GetReportUrlEs(jsonUrl)
        except Exception as e:
            News.WriteLog(str(e) + '. url = ' + jsonUrl)
            continue
        for u in urlSet:
            newsTime = dt.datetime.strptime(u[len(u) - 19:len(u) + 1],
                                            '%Y-%m-%dT%H:%M:%S')
            if newsTime.date() < time.date():
                overdueCount += 1
            if overdueCount >= maxOverdue:
                exitTraverse = True
                break
            if newsTime > time:
                urlList.append(u)
                maxTime = newsTime if newsTime > maxTime else maxTime
        if exitTraverse:
            break

    for newsUrl in urlList:
        try:
            news = GetNewsEs(newsUrl)
            news.label = label
            news.time = dt.datetime.strptime(
                newsUrl[len(newsUrl) - 19:len(newsUrl) + 1],
                '%Y-%m-%dT%H:%M:%S')
            news.url = news.url[0:len(news.url) - 23]
            newsList.append(news)
            print(news.url)
            print(news.time)
            print(news.title)
        except Exception as e:
            News.WriteLog(str(e) + ', url = ' + newsUrl)
            continue

    return newsList, maxTime
Example #9
0
 def GET(self):
     weatherList = WeatherData.pullWeatherData()
     newsList = News.pullNews()
     scheduleList = GoogleCalendarEvents.getSchedule()
     homeworkList = GoogleCalendarEvents.getHomework()
     audioGreeting = AudioGreeting.greeting(weatherList[0],
                                            len(scheduleList))
     return render.index(weatherList, newsList, scheduleList, homeworkList,
                         audioGreeting)
Example #10
0
def get_news():

    titles = News.main()
    for title in titles:
        print("US Top News: " + title)
        print()
        #title = translator.translate(title,dest='zh-tw').text
        #msg = " \r\n"+title
        #lineNotifyMessage(token,msg)
        time.sleep(2.5)
Example #11
0
def SetPara(path, paraDict):
    try:
        file = open(path, 'w')
        for k, v in paraDict.items():
            file.write(k + ',' + v + os.linesep)
            file.flush()
    except Exception as e:
        News.WriteLog('Fail to open ' + os.path.join('.', 'config', 'es.txt'))
    finally:
        file.close()
def GetNews(url):
    global NewsCount, MaxNewsCount  #全局记录新闻数量
    while len(url_set) != 0:
        try:
            # 获取链接
            url = url_set.pop()
            url_old.add(url)

            # 获取代码
            html = urllib2.urlopen(url).read().decode('utf8')

            # 解析
            soup = bs4.BeautifulSoup(html, 'html.parser')
            pattern = 'http://\w+\.baijia\.baidu\.com/article/\w+'  # 链接匹配规则
            links = soup.find_all('a', href=re.compile(pattern))

            # 获取URL
            for link in links:
                if link['href'] not in url_old:
                    url_set.add(link['href'])

                    # 获取信息
                    article = News.News()
                    article.url = url  # URL信息
                    page = soup.find('div', {'id': 'page'})
                    article.title = page.find('h1').get_text()  # 标题信息
                    info = page.find('div', {'class': 'article-info'})
                    article.author = info.find('a', {
                        'class': 'name'
                    }).get_text()  # 作者信息
                    article.date = info.find('span', {
                        'class': 'time'
                    }).get_text()  # 日期信息
                    article.about = page.find('blockquote').get_text()
                    pnode = page.find('div', {
                        'class': 'article-detail'
                    }).find_all('p')
                    article.content = ''
                    for node in pnode:  # 获取文章段落
                        article.content += node.get_text() + '\n'  # 追加段落信息

                    SaveNews(article)

                    print NewsCount
                    break
        except Exception as e:
            print(e)
            continue
        else:
            print(article.title)
            NewsCount += 1
        finally:
            # 判断数据是否收集完成
            if NewsCount == MaxNewsCount:
                break
Example #13
0
def create_newsObject(titles, dates, details, company, links, newsItems):
    """
    Returns a list of news objects

    This method uses the titles, date, details, company and links
    to instantiate a news object and append it to the list newspieces
    """
    print("creating news objects list...")
    #create a news object and add it to the newsItems list
    newspiece = News.News(titles, dates, details, company,links)
    newsItems.append(newspiece)
    return newsItems
Example #14
0
def get_news(command_input):
    import News
    for token in command_input:
        if ".com" in token:
            source = token
    articles = News.getTheNews(source)
    articleText = ""
    for i in range(5):
        articleText += ("Title: " + articles[0][i] + "\n")
        for author in articles[1][i]:
            articleText += ("Author: " + author + " ")
        articleText += "\n\n"
    return articleText
Example #15
0
def GetNewsUrlEs(url):
    soup = News.GetSoup(url, 'lxml')

    newsListContent = soup.body.select('ul[id="newsListContent"]')[0]
    sumList = newsListContent.select('li')
    urlList = []
    for sum in sumList:
        sumContent = sum.select('div')[-1].select('p')
        title = sumContent[0].text.strip()
        info = sumContent[1].text.strip()
        time = sumContent[2].text.strip()
        pageUrl = sumContent[0].a['href']
        urlList.append(pageUrl)
    return urlList
Example #16
0
def GetNews():
    while len(url_set) != 0:
        try:
            # 获取链接
            url = url_set.pop()
            url_old.add(url)
            # 获取信息
            article = News.News()
            article.url = url  # URL信息
            html = urllib.urlopen(article.url).read()
            html = html.decode('gbk')
            html = html.encode('utf8')
            soup = bs4.BeautifulSoup(html, 'html.parser')
            article.title = soup.find('title').get_text()  # 标题信息
            keywords = 'KEYWords'
            res0 = re.compile(keywords)
            if soup.find('meta', {
                    'name': res0
            }).__getitem__('name') == "KEYWords":
                article.keywords = soup.find('meta', {
                    'name': res0
                }).__getitem__('content')  # 作者
            else:
                article.keywords = ""
            author = 'Author'
            res = re.compile(author)
            if soup.find('meta', {
                    'name': res
            }).__getitem__('name') == "Author":
                article.author = soup.find('meta', {
                    'name': res
                }).__getitem__('content')  # 作者
            else:
                article.author = ""
            published_time = 'publishdate'
            res1 = re.compile(published_time)
            if soup.find('meta', {
                    'name': res1
            }).__getitem__('name') == "publishdate":
                article.date = soup.find('meta', {
                    'name': res1
                }).__getitem__('content')  # 作者
            else:
                article.date = ""
            content = soup.select('.neirong')
            article.content = content[0].text
            SaveNews(article)
        except Exception as e:
            print(e)
            continue
Example #17
0
def getNewsFromCorpus3():
    news = []
    with open("Corpus4.csv", "r") as f:
        # Parse of the CSV
        file = csv.reader(f, delimiter=',', quotechar='"')

        for index, line in enumerate(file):
            # if index%100==0:
            #     print("article n°"+str(index)+ " de Fake")
            # The csv is on the shape title,text,subject,date
            text = line[0]
            title = ""
            orientation = ""
            article = News("author", text, "links", orientation, line[1],
                           title)
            news.append(article)
    return news
Example #18
0
def GetReportUrlEs(url):
    jsonText = News.OpenUrl(url)
    jsonData = json.loads(jsonText)
    data = jsonData['data']
    urlList = []
    for d in data:
        if isinstance(d, dict):
            dtStr = d['datetime']
            infoCode = d['infoCode']
        else:
            strSplit = d.split(',')
            dtStr = dt.datetime.strptime(
                strSplit[1], '%Y/%m/%d %H:%M:%S').strftime('%Y-%m-%dT%H:%M:%S')
            infoCode = 'hy,' + strSplit[2]
        urlList.append('http://data.eastmoney.com/report/' + dtStr[0:4] +
                       dtStr[5:7] + dtStr[8:10] + '/' + infoCode +
                       '.html?dt=' + dtStr)
    return urlList
Example #19
0
def GetPara(path):
    paraDict = {}
    try:
        file = open(path)
        lines = file.readlines()
        for line in lines:
            try:
                if line[-1] == os.linesep:
                    line = line[0:len(line) - 1]
                strSplit = line.split(',')
                paraDict[strSplit[0]] = strSplit[1]
            except Exception as e:
                continue
    except Exception as e:
        News.WriteLog('Fail to open ' + os.path.join('.', 'config', 'es.txt'))
    finally:
        file.close()

    return paraDict
Example #20
0
def news_command(bot, update):
    print("ok")
    try:
        user_text = update.message.text
        input_num = user_text.split(" ")
        limit_news = int(
            input_num[1]
        )  # Lấy tham số từ input truyền vào -> cào về bao nhiêu tin
        print(limit_news)
        news = News.GetNews(limit_news)
        print('news', news)
        for x in range(
                0, len(news)
        ):  # Deserialize dữ liệu json trả về từ file News.py lúc nãy
            message = json.loads(news[x])
            print(message)
            update.message.reply_text(message['title'] + "\n" +
                                      message['link'] + "\n" +
                                      message['description'])
    except (IndexError, ValueError):
        update.message.reply_text('Vui lòng chọn số lượng tin hiển thị!!')
Example #21
0
def GetNews():
    while len(url_set) != 0:
        try:
            # 获取链接
            url = url_set.pop()
            url_old.add(url)
            # 获取信息
            article = News.News()
            article.url = url  # URL信息
            html = urllib.urlopen(article.url).read()
            soup = bs4.BeautifulSoup(html, 'html.parser')
            article.title = soup.find('title').get_text()  # 标题信息
            keywords = 'keywords'
            res0 = re.compile(keywords)
            if soup.find('meta', {
                    'name': res0
            }).__getitem__('name') == "keywords":
                article.keywords = soup.find('meta', {
                    'name': res0
                }).__getitem__('content')  # 作者
            else:
                article.keywords = ""
            content = 'Description'
            res1 = re.compile(content)
            if soup.find('meta', {
                    'name': res1
            }).__getitem__('name') == "Description":
                article.content = soup.find('meta', {
                    'name': res1
                }).__getitem__('content')  # 作者
            else:
                article.content = ""
            author = soup.select('.qq_editor')
            article.author = author[0].text
            SaveNews(article)
        except Exception as e:
            print(e)
            continue
Example #22
0
def getNewsFromXML(link):
    """
    Transform an article of the first news (xml) corpus into a news object.
    Parameters:
        link : Name of the file (must be in the articles folder)
    """
    # Transformation of the xml in python dictionary
    article = schema.to_dict(link)
    # creation of default values (I don't remember how to make a class with variable parameters)
    author = "Anonyme"
    mainText = "empty"
    hyperlink = []
    orientation = "default"
    # Possible value : mostly true / mixture of true and false / mostly false / no factual content
    # For the needs of the learning, I transform the 'mixture of true and false ' into 'mostly false'.
    veracity = "default"
    title = "default"

    # Recuperation of everything in the xml
    if 'author' in article:
        author = article['author']
    if 'mainText' in article:
        mainText = article['mainText']
    if 'hyperlink' in article:
        hyperlink = article['hyperlink']
    if 'orientation' in article:
        orientation = article['orientation']
    if 'veracity' in article:
        if article['veracity'] == 'mixture of true and false':
            veracity = "mostly false"
        else:
            veracity = article['veracity']
    if 'title' in article:
        title = article['title']
    # Creation of a News instance
    newsInstance = News(author, mainText, hyperlink, orientation, veracity,
                        title)
    return newsInstance
Example #23
0
def GetNews(url, i):
    response = requests.get(url)
    html = response.text
    article = News.News()
    try:
        article.title = re.findall(r'<h2 id=".*?">(.+?)</h2>', html)
        article.content = re.findall(r'<div class="article">([\w\W]*?)</div>',
                                     html)

        t = ""
        for j in article.title:
            t += str('标题:' + j + '\n')
        c = ""
        for m in article.content:
            c += str(m)
        article.content1 = ' ' + '\n'.join(c.split(' ')).strip()

        file = codecs.open('/tmp/luo/news ' + str(i) + '.txt', 'w+')
        file.write(t + "\t" + article.content1)
        file.close()
        print('ok')
    except Exception as e:
        print('Error1:', e)
    response.close()
Example #24
0
def get_article_list(country_name):
    article_list = News.get_country_article_list(country_name)
    return json.dumps(article_list, sort_keys=True, indent=4, separators=(',', ': '))
Example #25
0
def get_article(article_id):
    article = News.get_article(article_id)
    return json.dumps(article, sort_keys=True, indent=4, separators=(',', ': '))
Example #26
0
import json
import os
import sys
path = os.path.dirname(os.path.realpath(__file__))+'/../backend/'
sys.path.append(path)

import News
import mysql
from flask import Flask, abort
app = Flask(__name__)

zoom_to_depth = {'4':'City', '3':'State', '2':'Country', '1':'International'}
zooms = ['1','2','3','4']
max_state_lat_lon_difference = News.get_db_latlon(20)
max_city_lat_lon_difference = News.get_db_latlon(5)

@app.route("/")
def hello():
    return "Hello World!"

@app.route("/article/<article_id>")
def get_article(article_id):
    article = News.get_article(article_id)
    return json.dumps(article, sort_keys=True, indent=4, separators=(',', ': '))

@app.route("/articlelist/<country_name>")
def get_article_list(country_name):
    article_list = News.get_country_article_list(country_name)
    return json.dumps(article_list, sort_keys=True, indent=4, separators=(',', ': '))

@app.route("/latlon/country/<lat1>/<lon1>/<lat2>/<lon2>")
Example #27
0
 def get(self):
   userInfo = getLoginInfo(self.request.uri)
   logging.info('Request params ' + self.request)
   if self.request.get('news'):
     logging.info('News search ...')
     resp = News.search(slef.request.get('q'),slef.request.get('p'))
     logging.info('after News search ...[' + resp + ']')
     self.response.out.write(resp)
     #return
   elif self.request.get('clear'):
     logging.info('Clear search ...')
     #cachedJson.clearData()
   elif self.request.get('email'):
     email = self.request.get('email')
     msg = '<p>Test for Html message <br/><ul> Some list <li> List 1</li><li>List 2</li></ul>'
     message = mail.EmailMessage(sender="*****@*****.**",
                           subject="SR Level broken ...")
     message.to = email
     message.body = """
     Dear Ali baba:
     """
     message.html = msg
     message.send()
     #logging.debug('Email has been send to [' + email + ']')
   # geting history table of the patterns as json format
   elif self.request.get('c'):
     msg = cachedJson.checkSRLevels()
     if msg and len(msg) > 0:
       message = mail.EmailMessage(sender="Fx-Monitor Support <*****@*****.**>",
                           subject="SR Level broken ...")
       message.to = "Ali baba <*****@*****.**>"
       message.body = """
       Dear Ali baba:
       
       """ 
       
       message.html = msg
       message.send()
   elif self.request.get('sendEmailForPatternAlert'):
     cachedJson.sendEmailForPatternAlert() #cron to send emails to user
   elif self.request.get('checkPatterns'):
     cachedJson.chekEmailPatterns() # cron run once a day and record patterns
   elif self.request.get('cron'):
     cachedJson.loadData()
   elif self.request.get('pair'):
     #cachedJson.clearData()
     pair = self.request.get('pair')
     logging.info('Pair request ...')
     cachedJson.loadData()
     if pair == 'gbpusd':
       hist_json = memcache.get('h_gbp')
     elif pair == 'eurusd':
       hist_json = memcache.get('h_eur')
     elif pair == 'usdchf':
       hist_json = memcache.get('h_chf')
     elif pair == 'usdcad':
       hist_json = memcache.get('h_cad')
     elif pair == 'audusd':
       hist_json = memcache.get('h_aud')
     elif pair == 'eurgbp':
       hist_json = memcache.get('h_eurgbp')
     elif pair == 'eurjpy':
       hist_json = memcache.get('h_eurjpy')
     elif pair == 'gbpjpy':
       hist_json = memcache.get('h_gbpjpy')
   
     big_f = []
     if hist_json is not None and len(hist_json):
       cnt = 0
       resultJSON = json.loads(hist_json)
       for elem in resultJSON['hist']:
         f = []
         cnt += 1
         if cnt > 20: break # only 20 rows to collect for now.later must implement some filter
         i = 0
         for e in elem:
          s = urllib.unquote(e).decode("utf-8")
          if i == 2: # fromat time
           l = s.split('T')
           if len(l) > 0:
            s = '&nbsp;' + l[0] + '&nbsp;'
          f.append(s )
          i += 1
         
         big_f.append(f)
       
       pair_html = '<p> For price distribution price check ... <br /> '
       pair_html += '<a href="/?pair=gbpusd">GBPUSD</a>, '
       pair_html += '<a href="/?pair=eurusd">EURUSD</a>, '
       pair_html += '<a href="/?pair=usdchf">USDCHF</a>, '
       pair_html += '<a href="/?pair=usdcad">USDCAD</a>, '
       pair_html += '<a href="/?pair=audusd">AUDUSD</a>, '
       pair_html += '<a href="/?pair=eurgbp">EURGBP</a>, '
       pair_html += '<a href="/?pair=gbpjpy">GBPJPY</a>, '
       pair_html += '<a href="/?pair=eurjpy">EURJPY</a>'
       
       if resultJSON['chart'] and len(resultJSON['chart']) != 0:
         user = users.get_current_user()
         if user is None:
          template_values = {
            'pair_html' : pair_html,
            'chart' : resultJSON['chart'] ,
            'pair' : self.request.get('pair'),
            'html_table' : big_f,
            'url' : userInfo[0],
            'url_text' : userInfo[1],
            'user' : userInfo[2]
          }
         else:
            template_values = {
            'pair_html' : pair_html,
            'chart' : resultJSON['chart'] ,
            'pair' : self.request.get('pair'),
            'html_table' : big_f,
            'url' : userInfo[0],
            'url_text' : userInfo[1],
            'user' : userInfo[2],
            'GBPUSD' : cachedJson.getUserPatternSettings(user.email(),'GBPUSD'),
           'EURUSD' : cachedJson.getUserPatternSettings(user.email(),'EURUSD'),
           'USDCHF' : cachedJson.getUserPatternSettings(user.email(),'USDCHF'),
           'USDCAD' : cachedJson.getUserPatternSettings(user.email(),'USDCAD'),
           'AUDUSD' : cachedJson.getUserPatternSettings(user.email(),'AUDUSD'),
           'EURGBP' : cachedJson.getUserPatternSettings(user.email(),'EURGBP'),
           'GBPJPY' : cachedJson.getUserPatternSettings(user.email(),'GBPJPY'),
           'EURJPY' : cachedJson.getUserPatternSettings(user.email(),'EURJPY'),
           'USDJPY' : cachedJson.getUserPatternSettings(user.email(),'USDJPY')
          }
       else:
         user = users.get_current_user()
         if user is None:
          template_values = {
           'pair_html' : pair_html,
           'pair' : self.request.get('pair'),
           'html_table' : big_f,
           'url' : userInfo[0],
           'url_text' : userInfo[1],
           'user' : userInfo[2]
          }
         else:
          template_values = {
           'pair_html' : pair_html,
           'pair' : self.request.get('pair'),
           'html_table' : big_f,
           'url' : userInfo[0],
           'url_text' : userInfo[1],
           'user' : userInfo[2],
          'GBPUSD' : cachedJson.getUserPatternSettings(user.email(),'GBPUSD'),
           'EURUSD' : cachedJson.getUserPatternSettings(user.email(),'EURUSD'),
           'USDCHF' : cachedJson.getUserPatternSettings(user.email(),'USDCHF'),
           'USDCAD' : cachedJson.getUserPatternSettings(user.email(),'USDCAD'),
           'AUDUSD' : cachedJson.getUserPatternSettings(user.email(),'AUDUSD'),
           'EURGBP' : cachedJson.getUserPatternSettings(user.email(),'EURGBP'),
           'GBPJPY' : cachedJson.getUserPatternSettings(user.email(),'GBPJPY'),
           'EURJPY' : cachedJson.getUserPatternSettings(user.email(),'EURJPY'),
           'USDJPY' : cachedJson.getUserPatternSettings(user.email(),'USDJPY')
          }
     path = os.path.join(os.path.dirname(__file__), 'index1.html')
     self.response.out.write(template.render(path,template_values))
   elif self.request.get('r'):
     cachedJson.loadData()
     #url_link = "https://script.google.com/macros/s/AKfycbzFDj3RD57LI-W8ppcyHVhNq_3-_MQ-WUP9sttWZoO8ocvhF-Dh/exec?r=1"
     pair = None
     if self.request.get('p'):
       pair = self.request.get('p')
       #url_link += "&p=" + self.request.get('p')
     #urlfetch.set_default_fetch_deadline(45)
     #result = urlfetch.fetch(url_link)
     #logging.debug(url_link)
     #resultJSON = json.loads(result.content)
     resultJSON = json.loads(memcache.get('sr_lines'))
     #logging.debug(urllib.unquote(resultJSON['0']).decode("utf-8"))
     table = '<table border = 1><tr><td>Pair</td><td>price</td><td>time</td></tr>'
     j = 0
     for e in resultJSON:
       table += '<tr>'
       if pair is None or (pair and pair.upper() == resultJSON[str(j)]['name'].upper()):
         table += '<td>' + resultJSON[str(j)]['name'] + '</td><td>' + str(resultJSON[str(j)]['price']) + '</td><td>' + resultJSON[str(j)]['time'] + '</td>'
       #logging.debug(resultJSON[str(j)]['time'])
       j += 1
     '''
       #table += '<tr>'
       #table += '<td>' + e['name'] + '</td><td>' + e['price'] + '</td><td>' + e['time']  + '</td>'
       #table += '</tr>'
     '''
     table += '</table>'
     
     pair_html = '<p> For price distribution price check ... <br /> '
     pair_html += '<a href="/?r=1&p=gbpusd">GBPUSD</a>, '
     pair_html += '<a href="/?r=1&p=eurusd">EURUSD</a>, '
     pair_html += '<a href="/?r=1&p=usdchf">USDCHF</a>, '
     pair_html += '<a href="/?r=1&p=usdcad">USDCAD</a>, '
     pair_html += '<a href="/?r=1&p=audusd">AUDUSD</a>, '
     pair_html += '<a href="/?r=1&p=eurgbp">EURGBP</a>, '
     pair_html += '<a href="/?r=1&p=gbpjpy">GBPJPY</a>, '
     pair_html += '<a href="/?r=1&p=eurjpy">EURJPY</a>'
     
     if self.request.get('p'):
       user = users.get_current_user()
       if user is None:
        template_values = {
         'pair_html' : pair_html,
         'pair' : self.request.get('p'),
         'table' : table,
         'url' : userInfo[0],
         'url_text' : userInfo[1],
         'user' : userInfo[2]
        }
       else:
        template_values = {
         'pair_html' : pair_html,
         'pair' : self.request.get('p'),
         'table' : table,
         'url' : userInfo[0],
         'url_text' : userInfo[1],
         'user' : userInfo[2],
         'GBPUSD' : cachedJson.getUserPatternSettings(user.email(),'GBPUSD'),
           'EURUSD' : cachedJson.getUserPatternSettings(user.email(),'EURUSD'),
           'USDCHF' : cachedJson.getUserPatternSettings(user.email(),'USDCHF'),
           'USDCAD' : cachedJson.getUserPatternSettings(user.email(),'USDCAD'),
           'AUDUSD' : cachedJson.getUserPatternSettings(user.email(),'AUDUSD'),
           'EURGBP' : cachedJson.getUserPatternSettings(user.email(),'EURGBP'),
           'GBPJPY' : cachedJson.getUserPatternSettings(user.email(),'GBPJPY'),
           'EURJPY' : cachedJson.getUserPatternSettings(user.email(),'EURJPY'),
           'USDJPY' : cachedJson.getUserPatternSettings(user.email(),'USDJPY')
        }  
     els
       user = users.get_current_user()
       if user is None:
        template_values = {
          'pair_html' : pair_html,
         'table' : table,
         'url' : userInfo[0],
         'url_text' : userInfo[1],
         'user' : userInfo[2]
        }
       else:
        template_values = {
         'pair_html' : pair_html,
         'pair' : self.request.get('p'),
         'table' : table,
         'url' : userInfo[0],
         'url_text' : userInfo[1],
         'user' : userInfo[2],
        'GBPUSD' : cachedJson.getUserPatternSettings(user.email(),'GBPUSD'),
           'EURUSD' : cachedJson.getUserPatternSettings(user.email(),'EURUSD'),
           'USDCHF' : cachedJson.getUserPatternSettings(user.email(),'USDCHF'),
           'USDCAD' : cachedJson.getUserPatternSettings(user.email(),'USDCAD'),
           'AUDUSD' : cachedJson.getUserPatternSettings(user.email(),'AUDUSD'),
           'EURGBP' : cachedJson.getUserPatternSettings(user.email(),'EURGBP'),
           'GBPJPY' : cachedJson.getUserPatternSettings(user.email(),'GBPJPY'),
           'EURJPY' : cachedJson.getUserPatternSettings(user.email(),'EURJPY'),
           'USDJPY' : cachedJson.getUserPatternSettings(user.email(),'USDJPY')
        }
       
     path = os.path.join(os.path.dirname(__file__), 'index1.html')
     self.response.out.write(template.render(path,template_values))
Example #28
0
def main(option):
    df = pd.read_csv('cities_location.csv')
    origin = (str(df.loc[0, "Latitude"]) + "," + str(df.loc[0, "Longitude"]))
    destination = (str(df.loc[option, "Latitude"]) + "," +
                   str(df.loc[option, "Longitude"]))
    print(option)
    if option is 0:
        # point out all location
        marker_list_flight = markerList(option)
        get_static_google_map("google_map_" + str(option),
                              zoom=2,
                              origin=origin,
                              option=option,
                              imgsize=(640, 640),
                              imgformat="png",
                              markers=marker_list_flight)
    elif option is len(df.index):
        # make map that shows all the markers
        marker_list_flight = markerListAll()
        get_static_google_map("google_map_" + str(option),
                              zoom=2,
                              origin=origin,
                              option=option,
                              imgsize=(640, 640),
                              imgformat="png",
                              markers=marker_list_flight)
    else:
        # marker_list_flight = markerList(option)
        # get_static_google_map("google_map_" + str(option), zoom=2, origin=origin,
        #                       option=option, imgsize=(640, 640), imgformat="png",
        #                       markers=marker_list_flight)
        getShortestPath(option)
        cities = nw.readCities()

        fname = cities[option - 1] + 'path.csv'
        with open(fname, "w", newline='') as writeFile:
            writeFile.truncate()
            writer = csv.writer(writeFile)
            for i in range(len(path_list)):
                for j in range(len(path_list[i])):
                    writer.writerow(path_list[i][j])
        writeFile.close()
        # print("path list >>\n", path_list)
        sort_list = []
        for i in range(len(path_list)):
            for j in range(len(path_list[i])):
                sort_list.append(path_list[i][j])
        quicksort(sort_list, 0, len(sort_list) - 1)
        #sort_list is contain the shortest path to destination
        fname = cities[option - 1] + '_sortedpath.csv'
        with open(fname, "w", newline='') as writeFile:
            writeFile.truncate()
            writer = csv.writer(writeFile)
            writer.writerow([
                "origin", "first city", "second city", "third city",
                "fourth city", "distance", "political", "points",
                "probabilityDistribution"
            ])
            writer.writerows(sort_list)
        writeFile.close()

        marker_list_flight = markerList(option)
        get_static_google_map("google_map_" + str(9),
                              zoom=2,
                              origin=origin,
                              option=option,
                              imgsize=(640, 640),
                              imgformat="png",
                              markers=marker_list_flight)
        return Image.open("google_map_9.png")
    return Image.open("google_map_" + str(option) + ".png")
Example #29
0
    # print("==>", pin)
    location = geolocator.geocode(pin)
    lati.append(location.latitude)
    longi.append(location.longitude)
    # print(location.address)
    # print((location.latitude, location.longitude))

# Add column 'Distance' to data frame and assign to list values
df['Latitude'] = lati
df['Longitude'] = longi

df.to_csv('cities_location.csv',
          index=None,
          header=['City', 'Latitude', 'Longitude'])

nw.newsMain()


def distance_to_all():
    # calculate all the distance between 2 city and save in distance_list
    distance_list.clear()
    df = pd.read_csv("cities_location.csv")
    for i in range(len(df.index)):
        city = []
        for j in range(len(df.index)):
            to = []
            to.append(df.iat[i, 0])
            to.append(df.iat[j, 0])
            to.append(
                getDistance(df.iat[i, 1], df.iat[i, 2], df.iat[j, 1],
                            df.iat[j, 2]))
Example #30
0
# Install libraries
#!pip install -U pandas_datareader

import os, sys
sys.path.append('collect')

import Finance as f
import News as n

# Initialization
pricesStartDate = '2000-01-01'
pricesEndDate = '2018-01-31'
pricesFile = 'data/historicPrices/prices.csv'
actionsFile = 'data/historicActions/actions.csv'
articlesFile = 'data/historicArticles/'
articleStartYear = 2011
articleEndYear = 2018
articleStartMonth = 4
articleEndMonth = 2

# Read in companies listed on NYSE and NASDAQ
companies = f.loadCompanies()

f.saveHistoricalPrices(companies, pricesStartDate, pricesEndDate, pricesFile)

f.saveHistoricalActions(companies[825:].reset_index(), pricesStartDate, pricesEndDate, actionsFile)

n.saveHistoricalNews(articleStartYear, articleStartMonth, articleEndYear, articleEndMonth, articlesFile)
Example #31
0
    FortniteGame = requests.get(
        "https://fortnitecontent-website-prod07.ol.epicgames.com/content/api/pages/fortnite-game",
        headers={
            'Accept-Language': language.lower()
        }).json()["battleroyalenews"]["news"]["motds"]

    if os.path.isfile(f"StoredNews.json") == False:
        with open("StoredNews.json", "w+") as file:
            file.write(json.dumps(FortniteGame))
    else:
        with open("StoredNews.json") as f:
            StoredNews = json.loads(f.read())

        if FortniteGame != StoredNews:
            print("Cambiamenti")
            News.GetBRNews(Language=language.lower())
            if twitterEnabled is True:
                try:
                    twitter = Twython(twitter_consumer_key,
                                      twitter_consumer_secret,
                                      twitter_access_token,
                                      twitter_access_token_secret)
                    twitter.verify_credentials()
                except:
                    print("Authentication Failed")

                os.system(
                    'ffmpeg -i NewsBR.mp4 -vcodec libx264 NewsBRTwitter.mp4')
                video = open('NewsBRTwitter.mp4', 'rb')
                response = twitter.upload_video(media=video,
                                                media_type='video/mp4',
Example #32
0
def main():
	global news

	if msg == "hi" or msg == "hey" or msg == "hello":
		me.SayTo(activator, "\nHello {0}, I am {1}.\nI can change news in various locations -- like Brynknot, Clearhaven, etc. Full list can be found on sign next to me.\nList of commands:\n^location NEWLOCATION^: Change location for adding/removing messages.\n^messages^: Show messages for chosen location.\n^revert^: Make me forget everything you have written for a message.\n^add MESSAGE^: Add a new message. You can use this multiple times to make longer messages. Use ~<nl>~ to indicate newline.\n^preview^: Preview what your message would look like.\n^save^: Save your complete message as a new message for the chosen location. You can also specify a comma-separated string of locations to save the message into (for example, ~save Brynknot, Greyton~).\n^remove ID^: Remove #ID message from chosen location.\n^remove all^: Remove all messages from chosen location.".format(activator.name, me.name))

	# Change current location.
	elif text[0] == "location" and len(text) > 1:
		location = WhatIsMessage().strip()[9:]
		info.slaying = location
		me.SayTo(activator, "\nChanged location I'm managing for you to '{0}'.".format(location))

	# Show messages.
	elif msg == "messages":
		if not info.slaying:
			me.SayTo(activator, "\nFirst select location you want to get messages for.")
		else:
			messages = news.get_messages()

			if messages:
				me.SayTo(activator, "\nThere are the following messages:\n")

				for i, message in enumerate(messages):
					activator.Write("#{0}: {1}: {2}".format(i + 1, message["time"], message["message"]), COLOR_NAVY)
			else:
				me.SayTo(activator, "\nNo messages in that location.")

	# Make the NPC forget the message that was made so far.
	elif msg == "revert":
		info.msg = ""
		me.SayTo(activator, "\nI have removed everything you have written.")

	# Add a message, replacing "<nl>" with actual newline character.
	elif text[0] == "add" and len(text) > 1:
		news_message = WhatIsMessage().strip()

		if news_message.lower().find("endmsg") == -1:
			info.msg += news_message[4:].replace("<nl>", "\n")
			me.SayTo(activator, "\nI have added your message.")
		else:
			activator.Write("Trying to cheat, are we?", COLOR_RED)
			LOG(llevInfo, "CRACK: Player {0} tried to write bogus message using news changer.\n".format(activator.name))

	# Preview what the new message would look like.
	elif msg == "preview":
		me.SayTo(activator, "\nText that would appear on a sign in chosen location:\n{0}: {1}".format(news.get_time(), info.msg))

	# Save a message.
	elif text[0] == "save":
		if not info.slaying and len(msg) <= 4:
			me.SayTo(activator, "\nFirst select location you want to save the message for.")
		else:
			if len(msg) > 5:
				locations = WhatIsMessage()[5:].strip().split(",")

				news.db.close()
				news = None

				for location in locations:
					try:
						news = News(location.strip())
						news.add_message(info.msg)
					finally:
						news.db.close()
						news = None
			else:
				news.add_message(info.msg)

			info.msg = ""
			me.SayTo(activator, "\nDone! I have added your message.")

	# Remove a message -- either all, or a specified message.
	elif text[0] == "remove" and len(text) > 1:
		if not info.slaying:
			me.SayTo(activator, "\nFirst select location you want to remove message from.")
		elif text[1] == "all":
			news.remove_all_messages()
			me.SayTo(activator, "\nRemoved all messages.")
		elif text[1].isdigit():
			id = int(text[1])

			if id > 0 and id <= len(news.get_messages()):
				news.remove_message(id - 1)
				me.SayTo(activator, "\nRemoved message #{0}.".format(id))