def set_top_page(url): template_long = '<li>'\ '<article>'\ '<div class="container">'\ '<p class="time-stamp">%(time-stamp)</p>'\ '<ul class="tags">'\ '%(tags)'\ '</ul>'\ '<h2><i class="fas fa-square-full"></i>%(title)</h2>'\ '<p class="article">%(body)</p>'\ '<a class="continue" href="/blog/%(title)"><i class="fas fa-angle-right"></i>続きを読む</a>'\ '</div>'\ '</article>'\ '</li>' with open(url) as file: base = file.read() article_list = articles.get_articles(count=10) rendered = "" print(article_list) for arti in article_list: patterns = {} if len(arti.body) > 50: arti.body = arti.body[:50] arti.body += "..." print(arti.body) # engine = template_engine.TemplateEngine(arti, partial_temp=template_long) patterns['%(time-stamp)'] = arti.created_at patterns['%(title)'] = arti.title patterns['%(body)'] = arti.body_for_html() rendered += render(template_long, patterns) rendered = set_tags(rendered, arti.tags) # rendered += engine.render() + "\n" rendered = render(base, {"%(article)": rendered}) rendered = set_latest(rendered) return rendered # if __name__ == "__main__": # with open("../template/template.html") as file: # print(set_latest(file.read())) # pairs = { # '%(time-stamp)': datetime.now().strftime("%Y/%m/%d"), # '%(title)': 'タイトルのテストfrom replace_engine', # '%(body)': 'ボディのテスト', # } # with open("../template/template.html") as file: # print(render(file.read(), pairs))
def index(): """ Return index page """ article_list = articles.get_articles( db=DB, invisible=True, timeformat=CONFIG["system"]["time_format"], url=CONFIG["blog"]["url"]) return render_template("index.tmpl", blog=CONFIG["blog"], articles=article_list)
def index(): a = articles.get_articles() display = a[:PER_PAGE] more = True if a > display else False response = make_response( render_template( 'index.html', articles=display, more=more, debug=DEBUG ) ) response.headers['Cache-Control'] = 'max-age=3600' return response
def get_articles_fake_tests(): test_failed = False legit_subreddits = ['djwfojwi', '12312312', 'sdjfkla'] for s in legit_subreddits: url = "https://www.reddit.com/r/{}".format(s) print("Scraping: {}...".format(s)) arts = articles.get_articles(url) if len(arts) != 0: test_failed = True print("TEST FAILED!") if test_failed: print("[FAIL] Fake subreddits test") else: print("[PASS] Fake subreddits test")
def do_get(self, request): super().do_get(request) # print("self.path", self.path) # if self.path in ["blog", "blog/", "/", ""]: # self.response = Response(main.protocolVersion, States.OK) # self.response.body = replace_engine.set_top_page(main.DOCUMENT_ROOT + "/blog_top.html") # self.ext = "html" # return self.response head, tail = os.path.split(self.path) print("tail",tail) self.root, self.ext = os.path.splitext(tail) self.ext = self.ext.lstrip(".") print(self.ext) print(bool(self.ext)) if self.ext: normal = NormalController() return normal.do_get(request) if tail and not tail == "blog": print("in article") article = articles.get_articles(tail) if article: engine = TemplateEngine(article, "template") self.response = Response(main.protocolVersion, States.OK) self.response.body = engine.render() self.ext = "html" else: self.response = self.not_found() else: print("in blog_top") self.response = Response(main.protocolVersion, States.OK) self.response.body = replace_engine.set_top_page(os.path.join(main.DOCUMENT_ROOT, "blog_top.html")) self.ext = "html" # return self.response # self.response = Response(main.protocolVersion, States.Not_Found) # self.response.body = os.path.join(main.DOCUMENT_ROOT, "blog.html") # self.ext = "html" return self.response
def articles_list(page_num=1): page_num = int(page_num) if page_num == 1: return redirect('/', 301) a = articles.get_articles() display = a[(page_num-1)*PER_PAGE:page_num*PER_PAGE] if not display: abort(404) response = make_response( render_template( 'index.html', articles=display, current_page=page_num, pages=range(1, int(ceil(len(a)/float(PER_PAGE)))+1), debug=DEBUG ) ) response.headers['Cache-Control'] = 'max-age=3600' return response
def get_articles_legit_tests(): test_failed = False legit_subreddits = [ 'videos', 'technology', 'askreddit', 'programming', 'science', 'Muse', 'nekoatsume' ] for s in legit_subreddits: url = "https://www.reddit.com/r/{}".format(s) print("-- Scraping: {}".format(s)) arts = articles.get_articles(url) if len(arts) != 25: test_failed = True print("TEST FAILED!") if test_failed: print("[FAIL] get_articles legit test") else: print("[PASS] get_articles fake test")
def entry(): info = json.loads(request.get_data().decode('utf-8')) print(info) response = {} high = -1 articles = get_articles(info["event"]) for article in articles: # print("\n"*4, "########## ARTICLE ##########") score, valid = analyze(info, article) if valid and score > high: response = { "event": info["event"], "location": info["location"], "summary": article["title"], "url": article["url"], "date": info["date"] } print(response) return jsonify(response)
def post(self): '''Return the articles (nb: uses post due to token inclusion)''' logging.debug('%s'%self.request) content = articles.get_articles() self.response.headers['Content-Type'] = 'application/json' id_token = urllib.unquote(self.request.get('token')) user = None if id_token and '|' in id_token: user = authentication.get_user(id_token) if not user: # token has been tampered with or corrupted # try panopticlick-style stuff with ip and headers user = authentication.new_user(type='standard', ip=self.request.remote_addr) id_token = user.get_hash() #user.blackmark() # Send new token? If the user does not update the token, # then this step will repeat and we end up with lots of fake users :( else: user = authentication.new_user(type='standard', ip=self.request.remote_addr) id_token = user.get_hash() response = {"content": content, "token": id_token} self.write(json.dumps(response))
def article_list(): articles = get_articles(conn, 1) return render_template('articles.html', articles=articles)