Exemplo n.º 1
0
def server_run():
    url3 = "https://stopgame.ru/news"
    resp = requests.get(url3)
    top = find_articles(url3, resp)
    publish_report(top)
    with codecs.open("StopGame.json", "r", encoding="utf-8") as outfile:
        jdata = json.load(outfile)
        outfile.close()
    return render_template('news.html', articles=jdata)
Exemplo n.º 2
0
 def test_url(self):
     url3 = urllib.urlretrieve("https://stopgame.ru/news", "test.txt")
     resp3 = get_html_page(url3)
     top = find_articles(url3, resp3)
     publish_report(top)
     with codecs.open("StopGame.json", "r", encoding="utf-8") as json_data:
         jdata = json.load(json_data)
         self.assertEqual(jdata['url'], url3)
         for article in jdata["articles"]:
             for art in article.values():
                 self.assertTrue(art)
     json_data.close()
Exemplo n.º 3
0
 def test_url(self):
     url3 = "https://stopgame.ru/news"
     resp3 = get_html_page(url3)  #status
     top = find_articles(url3, resp3)  #Parcer
     publish_report(top)  #write json
     with codecs.open("StopGame.json", "r",
                      encoding="utf-8") as json_data:  #open json
         jdata = json.load(json_data)  #load json
         self.assertEqual(
             jdata['url'][0], url3
         )  #url ink == url link(https://stopgame.ru/news" == https://stopgame.ru/news")
         for article in jdata["articles"]:
             for art in article.values():
                 self.assertTrue(art)  #titles in son not empty
     json_data.close()
Exemplo n.º 4
0
from lab_1.Crawler.Finder import get_html_page, find_articles, publish_report

url3 = "https://stopgame.ru/news"
resp3 = get_html_page(url3)
top = find_articles(url3, resp3)
publish_report(top)