def publish(request): if not request.user.is_authenticated(): print "User was not authenticated to write, redirected to registration." return register(request) if request.method == "POST": form = PublishForm(request.POST) if form.is_valid(): formtitle = form.cleaned_data['title'] formtext = form.cleaned_data['text'] formuser = request.user print formtitle print formtext print formuser.first_name + formuser.last_name art = Article(title=formtitle, text=formtext, user=formuser) art.save() return redirect('/') else: print "SOMETHING IS WRONG" return redirect('/') else: form = PublishForm() return render_to_response("write.html", { 'form': form }, context_instance = RequestContext(request) )
def save_to_db(self): if self.id.data: # edit article = Article.query.get(int(self.id.data)) else: article = Article() article.title = self.title.data article.short_description = self.short_description.data article.body = self.body.data article.published = self.published.data article.category_id = self.category.data article.authors = list(map(lambda member_id: Member.query.get(int(member_id)), self.authors.data)) article.tags = [] for tag in self.tags.data: tag_record = Tag.query.filter_by(name=tag).first() if not tag_record: tag_record = Tag(name=tag) db.session.add(tag_record) db.session.commit() article.tags.append(tag_record) if self.article_cover.data: article.cover = upload(self.article_cover.data) print("article", article) db.session.add(article) db.session.commit()
def print_hot(): items = VisitorLog.query_hot() def _filter(o): if o.url in ('https://wxnacy.com/', 'https://wxnacy.com/tool/', 'https://wxnacy.com/album/', ): return False if o.url.startswith('http://local.wxnacy.com') or \ o.url.startswith('http://localhost'): return False return True items = list(filter(lambda x: _filter(x), items)) items = items[0:10] for i in items: if 'vim.wxnacy.com' in i.url: print('VIM 练级手册: ', i.url) continue art = Article.query_item(url = i.url) if not art: art = Article.crawler(url = i.url) url = i.url if url.startswith('https://wxnacy.com'): url = url[len('https://wxnacy.com'):] fmt = '{}: {}'.format(art.name, url) print(fmt) pass
def search_article(page,type,keyword): categorys = Category.query.all() print(type) if not type: type = request.form.get('type') if not keyword: keyword = request.form.get('keyword') print(type) print(keyword) if type == "所有类型": if keyword == None or keyword == "": print("no keyword and no type") return redirect(url_for('list_article')) else: print("keyword and no type") article = Article.search_page(page=page,keyword=keyword) print(article.items) return render_template("admin/listarticle.html",article=article,keyword=keyword,categorys=categorys) else: if keyword == None or keyword == "": print("no keyword and type") article = Article.search_page(page=page,category=type) return render_template("admin/listarticle.html",article=article,type=type,categorys=categorys) else: print(" keyword and type") article = Article.search_page(page=page,keyword=keyword,category=type) return render_template("admin/listarticle.html",article=article,keyword=keyword,type=type,categorys=categorys)
def run(self, page=0): self.page = page while self.continued: self.load_page() page_contents = self.load_page_contents() if not page_contents: print(f'{self} 완료') return for url in page_contents: self.load_content(url) content = self.parse_content() if content is None: continue session = create_session() article = (session.query(Article).filter( Article.title == content.title).first()) if not article: article = Article() article.title = content.title article.body = content.text article.source = self.name session.add(article) session.commit() time.sleep(random.random() * 1 + 2) print(f'{self.page}페이지 탐색 완료')
def add_article(user, category, title, author, content): article = Article(user=user, category=category, title=title, author=author, content=content) article.save() # 分类中文章数量+1 category.count = category.count + 1 category.save() return article
def add_news_content(): """ 添加文章数据 :return: """ import xlrd file = request.files.get('file') with NamedTemporaryFile() as f: f.write(file.read()) f.seek(0) wb = xlrd.open_workbook(f.name) sheet = wb.sheet_by_index(0) nrows = sheet.nrows dataset = list() for i in range(nrows): dataset.append(sheet.row_values(i)) for i in range(1, len(dataset)): title = dataset[i][0] url = dataset[i][1] content = dataset[i][2] read_count = dataset[i][3] extra_count = dataset[i][4] Article.create(article_type_id=6, title=title, contents=content, cover_url=url, extra_add_count=extra_count, real_use_count=read_count) return jsonify({'code': 200})
def test_to_dict(self): """ Test of the "to_dict" method """ test_user = User(username="******", email="dummy data") db.session.add(test_user) db.session.commit() test_article = Article(title="Test 1", synthesis="Synthèse", user_id=test_user.id) db.session.add(test_article) db.session.commit() data = {} data = { "id": 1, "title": "Test 1", "synthesis": "Synthèse", } returned_data = test_article.to_dict() self.assertEqual(returned_data["id"], data["id"]) self.assertEqual(returned_data["title"], data["title"]) self.assertEqual(returned_data["synthesis"], data["synthesis"])
def index(page=1): """ 主页显示组别 """ if request.method == 'POST': # 获取前端请求数据 if not request.json or not 'group_id' in request.json: return jsonify({'code': 0, 'message': 'Missing Parameters'}) group_id = request.get_json().get('group_id') try: articles = Article.query.filter_by(group_id=group_id, is_delete=0) urls = [Article.to_dict(article) for article in articles] if len(urls) < 1: return jsonify({'code': 0, 'message': 'Data Not Found'}) return jsonify({'code': 1, 'urls': urls, 'message': 'Success'}) except: return jsonify({'code': 0, 'message': 'Database query failed'}) else: # 显示所有组别以及默认返回第一组的文章链接 current_page = request.args.get('current_page', page) g_count = Config.GROUPCOUNT group_count = request.args.get('group_count', g_count) # 默认一页显示10组 try: pagination = Group.query.filter_by(is_delete=0).paginate(current_page, group_count, error_out=False) groups = pagination.items groups = [Group.to_dict(group) for group in groups] count = Group.query.filter_by(is_delete=0).count() group_id = (page - 1) * g_count + 1 # 比如点击第一页的时候默认显示第一组的文章链接,点击第二页的时候默认显示第十一组的文章链接 articles = Article.query.filter_by(group_id=group_id, is_delete=0) urls = [Article.to_dict(article) for article in articles] if len(urls) < 1: urls = 'Data Not Found' return jsonify({'code': 1, 'groups': groups, 'count': count, 'urls': urls, 'message': 'Success'}) except: return jsonify({'code': 0, 'message': 'Database query failed'})
def deploy(deploy_type): from flask_migrate import upgrade from app.models import BlogInfo, User, Source, BlogView # 导入数据模型 # from app.models import ...... # upgrade database to the latest version upgrade() if deploy_type == 'product': # step_1:insert basic blog info BlogInfo.insert_blog_info() # step_2:insert admin account User.insert_admin(email='*****@*****.**', username='******', password='******') # step_3:insert source Source.insert_sources() # step_4:insert blog view BlogView.insert_view() # You must run `python manage.py deploy(product)` before run `python manage.py deploy(test_data)` elif deploy_type == 'test_data': Article.generate_fake(20) else: pass
def crawl(): current_app.logger.info(time.ctime() + ':Crawler begin.') Article.update() current_app.logger.info(time.ctime() + ':Crawler done.') current_app.logger.info(time.ctime() + ':Update table Constant begin.') Constant.update() current_app.logger.info(time.ctime() + ':Update table Constant done.')
def add_article(request): if request.method == "POST" and request.is_ajax(): # 获取post提交的数据 category_name = request.POST.get("category") title = request.POST.get("title") author = request.POST.get("author") content = request.POST.get("content") # print 'category:' + category_name + ' title:' + title + ' author:' + author + ' content:' + content #打印POST请求的内容 # 创建Article对象,保存文章 user = User.objects.get(username=request.user.username) category = Category.objects.get(name=category_name) add = request.POST.get("add") if add == "false": # 编辑文章 pass else: print request.POST.get("id") category.count = category.count + 1 category.save() article = Article(user=user, category=category, title=title, content=content, author=author) article.id = request.POST.get("id") # 如果是编辑文章将会获取到原文章的id, 如果是添加文章则获取到none article.save() category_list = Category.objects.filter(user=request.user) content_dict = {"categories": category_list} return HttpResponse("添加成功") else: category_list = Category.objects.filter(user=request.user) userprofile = UserProfile.objects.get(id=request.user.id) content_dict = {"categories": category_list, "userprofile": userprofile} return render(request, "admin/add_article.htm", content_dict)
def __init__(self): self.env = { 'add': lambda callback: self.add(callback), 'get': lambda callback: self.get(callback), 'remove': lambda callback: self.remove(callback), 'edit': lambda callback: self.edit(callback), 'hashpass': lambda rawpass: self.hashpass(rawpass) } self.add_callbacks = { 'category': lambda: self.new_category(), 'user': lambda: self.new_user(), 'article': lambda: self.new_article() } self.get_callbacks = { 'category': lambda args: Category.get_category(args[0]), 'categories': lambda args: Category.get_many_categories(), 'user': lambda args: User.get_user_by_name(args[0]), 'article': lambda args: Article.get_article(args[0]), 'articles': lambda args: Article.get_many_articles_by_category(args[0]) } self.remove_callbacks = { 'user': lambda args: User.delete_user(args[0]) } self.edit_callbacks = { 'article': lambda args: Article.edit_article(args[0], args[1], args[2]) }
def push_to_breitbot(self): try: is_exist = db.session.query(Article_Entry).filter( (Article_Entry.headline==self.headline), (Article_Entry.uploaded==True)).first() except: raise if not is_exist: session_object = Article_Entry( headline = self.headline, publish_date = self.pub_date.date() ) try: db.session.add(session_object) # Once the entry is flushed to the database, the unique id is exposed. # Name upload using that id. db.session.flush() self.target_name = "{}.pdf".format(session_object.id) session_object.target_name = self.target_name self._extract() self._upload() self._update(session_object) db.session.commit() except IntegrityError: db.session.rollback() except ClientError: db.session.rollback() raise ClientError else: raise FileExistsError
class TestArticle(unittest.TestCase): def setUp(self): self.new_article = Article("title", "https://image.tmdb.org/t/ptfukyy", "article", '2020', 'news') def tearDown(self): ''' tearDown method that does clean up after each test case has run. ''' Article.all_articles = [] def test_instance(self): self.assertTrue(isinstance(self.new_article, Article)) def test_check_instance_variables(self): self.assertEquals(self.new_article.title, 'title') self.assertEquals(self.new_article.image, "https://image.tmdb.org/t/ptfukyy") self.assertEquals(self.new_article.description, 'article') self.assertEquals(self.new_article.date, '2020') self.assertEquals(self.new_article.article, 'news') def test_save_article(self): ''' test_save_article test case to test if the article object is saved into the article list ''' self.new_article.save_article() # saving the new article self.assertEqual(len(Article.all_articles), 1)
def get_article(article_id): article = Article().query.filter_by(article_id=article_id).first() article.article_read_cnt = article.article_read_cnt + 1 db.session.add(article) db.session.commit() articles = Article().query.limit(8) return render_template('article.html', article=article, articles=articles)
def get_article_json(id: IntLike, only_from_cache=False, process_json=True) -> Union[dict, None]: """ Get article json by id `only_from_cache` is for multiget_article_json() """ key = Keys.article_json.format(id) data = rd.get(key) # none is returned if not exists if data is not None: json_article = json.loads(data.decode()) rd.expire(key, Keys.article_json_expire) if not process_json: return json_article return Article.process_json(json_article) if only_from_cache: return None article = Article.query.get(id) if article is None: return None json_article = article.to_json(cache=True) cache_article_json(json_article) if not process_json: return json_article return Article.process_json(json_article)
def put(self, args): data = Article.put_one( args[constants.article_id], Article(args.get(constants.article_type), args.get(constants.article_des), args.get(constants.article_content), args.get(constants.article_type))) return data
def test_save_source(self): '''Testcase method to test Article if object is saved''' article2 = Article( 'Arsenal run riots', 'Soll Campell', 'Arsenal start off their Premeire league campaign with a comfortablewin against newly promoted side Fulham', 'www.skysport.com', 'skysport.com', '2020-09-12T15:57:00Z') article2.save_article() self.assertEqual(len(Article.all_articles), 1)
def post(self): article_json = request.json article_id = article_json["id"] article_title = article_json["title"] Article.objects(id=article_id).update_one(set__title=article_title) return Article.objects.get(id=article_id).to_json()
def init_db(): current_app.logger.info('Database initilize begin...') db.drop_all() db.create_all() Author.init() Article.init() Constant.init() current_app.logger.info('initialize database done!')
def article_add(): art = Article(title="这是一个标题2", content="这是文章内容2") art2 = Article(title="这是标题3", content="这是内容3") # 将数据保存到数据库中 db.session.add(art) db.session.add(art2) db.session.commit() return '返回值:%s' % art.id
def setUp(self): """ run before each test """ self.new_article = Article( 'bbc news', 'Two hurt as car strikes protest in Seattle', 'The women are seriously injured as the vehicle careers into protesters on a closed highway', 'https://ichef.bbci.co.uk/news/1024/branded_news/8600/production/_113240343_gettyimages-1223820095.jpg', '"http://www.bbc.co.uk/news/world-us-canada-53291289', 'bbc')
def datainit(): from app.models import Role,User,Article,Category print('Category init') Category.insert_category() print('Role init') Role.insert_roles() print('User and Article generate') User.generate_fake(50) Article.generate_fake(50)
def articles_write(): form = ArticleWriteForm() # 글 쓰기 요청 if form.validate_on_submit(): title, url, content = form.title.data, form.url.data, form.content.data # URL 링크 형태의 글 일경우 중복되는 URL이 있는지 확인합니다. if url: check_duplicate_url = db.session.query(Article).filter_by(url=url) # 중복되는 글이 있으면 해당 글로 이동시켜줍니다. if check_duplicate_url.count() >= 1: return redirect(url_for("article", id=check_duplicate_url.first().id)) # 부하가 큰 HTML 다운로드를 여러번 하지 않기 위해 미리 DB에 추가시켜줍니다. article = Article(user_id=g.user.id, url=url, content=content) db.session.add(article) # URL은 있지만 title이 없을 경우 크롤링하여 제목을 가져옵니다. if url and not title: # User-Agent를 확인하는 웹사이트를 위해 User-Agent를 바꿔줍니다. headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'} # HTML을 다운로드 합니다. parsed = BeautifulSoup(urlopen(Request(url, headers=headers)).read()) # 먼저 <meta property='og-title'/>를 찾습니다. element = parsed.find(property="og:title") if element: title = element["content"][:80] # 찾지 못했으면 <title></title>를 찾습니다. else: element = parsed.title if element: title = parsed.title.string[:80] # 모두 찾지 못했으면 URL을 제목으로 합니다. else: title = url[:80] # 아무 URL도 없을 경우 if not title: title = "Untitled Document" article.title = title db.session.commit() # URL 링크 형태의 글이지만 내용이 있을 경우 댓글로 올려줍니다. if url and content: article.comment += 1 db.session.add(Comment(article_id=article.id, user_id=g.user.id, content=content)) db.session.commit() return redirect(url_for("index")) return render_template("articles-write.html", form=form)
def _load_mc_stories(rows=None): pull_freq = app.config["PULL_FREQ"] fetched_stories = mc.storyList( query, solr_filter=mc.dates_as_query_clause( datetime.datetime.now() - datetime.timedelta(seconds=pull_freq) - datetime.timedelta(days=3), datetime.datetime.now() - datetime.timedelta(days=3)), rows=rows) stories = {"query": query, "stories": []} for story in fetched_stories: if Article.query.filter_by(title=story["title"]).all(): print(f"Skipping Duplicate: {story['title']}") continue story_json = {} story_json["source_name"] = story["media_name"] story_json["source_url"] = story["media_url"] story_json["publish_date"] = story["publish_date"] story_json["title"] = story["title"] story_json["url"] = story["url"] article = article_api(story["url"]) article.download() soup = BeautifulSoup(article.html, 'html.parser') description = soup.find("meta", property="og:description")['content'] story_json["description"] = description favicon = favicon = soup.find("link", rel="icon")['href'] story_json["favicon"] = favicon article.parse() story_json["read_time"] = math.ceil( len(re.findall(r'\w+', article.text)) / 250) story_json["image"] = article.top_image story_json["article_text"] = article.text tags = [] story_tags = story['story_tags'] for tag in story_tags: one_tag = tag['tags_id'] tag_label = mc.tag(one_tag)["label"] if tag_label and "_" not in tag_label: tags.append(tag_label) story_json["tags"] = tags stories["stories"].append(story_json) new_article = Article() new_article.populate_from_mc(story_json) new_article.get_twitter_metadata() new_article.run_political_sentiment() new_article.match_source() db.session.add(new_article) db.session.commit() print(f"Added story: {story_json['title']}\n") return jsonify(fetched_stories)
def search(): if 'username' not in session or session['username'] is None: redirect(url_for('login')) user = {'username': session['username']} submit = request.args.get('submit') if ('seen' not in session or 'state' not in session): session['state'] = '0' session['seen'] = '' if submit == 'Refresh': user = {'username': session['username']} session['state'] = '0' session['seen'] = '' if os.path.exists(os.path.join(os.getcwd(), 'app/static/wordcloud.png')): os.remove(os.path.join(os.getcwd(), 'app/static/wordcloud.png')) return render_template( 'search.html', title=('Search'), article=[], user=user, sysout= "Let's start...\nQ1: Name a digital world issue that interests you and why it interests you?" "\nYou can be as descriptive as you wish. " "There is no word limit. You can provide suggestive words, write in note form, or write full sentences." ) articles = [] articlemodel = Article() state = session['state'] if submit == 'Rephrase': state = str(int(session['state']) - 1) articles, state, sysout = articlemodel.search(query=g.search_form.q.data, state=state, seen=session['seen']) session['state'] = state if state == 0: session['seen'] = '' elif int(state) % 2 == 0: seen = '' for artcl in articles: seen += '"' + artcl['id'] + '",' session['seen'] = session['seen'] + seen return render_template('search.html', title=('Search'), articles=articles, user=user, sysout=sysout, rephrase=(int(state) % 2 == 0))
def get(self): user = User(username='******', email='*****@*****.**') article = Article(title='邪神', content='威武雄壮') article.author = user tag1 = Tag(name='Java') tag2 = Tag(name='Python') article.tags.append(tag1) article.tags.append(tag2) db.session.add(article) return render_template('home/index.html')
def setUp(self): ''' Sets up the before all tests ''' self.user_admin = User(username='******', password_hash='admin', email='*****@*****.**') self.new_article = Article(article='article1', category='technology', user_id=self.user_admin)
def create_article(): """ View function for a User to create an article :return: the view to be displayed :rtype: str """ if current_user.is_guest and g.number_of_articles >= 5: return redirect(url_for("main.index")) tmp_article = Article.query.filter_by(title = "TMP").first() if tmp_article: references = tmp_article.references.all() else: references = [] form = CreateArticle() if form.validate_on_submit(): tmp_article = Article.query.filter_by(title = "TMP").first() if not tmp_article: tmp_article = Article(title = "TMP", synthesis = "tmp", user_id = int(current_user.id)) db.session.add(tmp_article) db.session.commit() tmp_article = Article.query.filter_by(title = "TMP").first() tmp_article.title = form.title.data tmp_article.synthesis = form.synthesis.data db.session.commit() flash("L'article a bien été ajouté") return redirect(url_for("main.user_articles_list")) return render_template("main/create_article.html", title = "Créer un article", form = form, user_id = current_user.id, current_article_id = -1, references = references, submit_button_title = "Ajouter")
def setUp(self): ''' Set up method to run before each test case ''' self.new_article = Article( 'the-next-web', 'PlayStation\'s $30 PS4 gamepad for kids is totally adorable', 'https://cdn0.tnwcdn.com/wp-content/blogs.dir/1/files/2017/10/PS4-Mini-gamepad-social.jpg', 'Sony\'s teamed up with Hori on its new $30 Mini Wired Gamepad, which is designed for younger PS4 players with smaller hands.', 'https://thenextweb.com/gaming/2017/10/19/playstations-30-ps4-gamepad-for-kids-is-totally-adorable/', '2017-10-19T13:00:00Z')
def index(): articles = Article().query.all() print(articles) page = request.args.get('page', 1, type=int) pagination = Article().query.paginate(page, per_page=3, error_out=False) posts = pagination.items print(posts) return render_template('main/index.html', articles=articles, posts=posts, pagination=pagination)
def new_article(): form = article_form(request.form) if request.method == "POST": if form.validate_on_submit() and current_user.is_authenticated: article = Article(content=form.content.data, author=User.objects(username=current_user.username).first(), author_name=current_user.username, title=form.title.data, create_time=datetime.utcnow()) article.save() flash('Created successfully.') return redirect(url_for('main.index')) return render_template("article/new_article.html", form=form)
def get_others(): articles = Article().query.filter_by(article_type='其他').all() page = request.args.get('page', 1, type=int) pagination = Article().query.filter_by(article_type='java').paginate( page, per_page=3, error_out=False) posts = pagination.items return render_template('main/index.html', articles=articles, posts=posts, pagination=pagination)
def success(request): aname = request.POST.get('aname') acontent = request.POST.get('acontent') author = request.POST.get('author') shijian = time.strftime('%Y-%m-%d', time.localtime()) article = Article(aname=aname, acontent=acontent, aauthor=author, atime=shijian) article.save() return render(request, 'success.html')
def post(self): data = request.get_json() or {} if not all(field in data for field in ['title', 'source', 'site']): abort(400, message='You must have 3 fields: title, source and site.') if Article.query.filter_by(title=data['source']).first(): abort(400, message='This article already exists!') article = Article() article.from_dict(data) db.session.add(article) db.session.commit() return article.to_dict(), 201
def fetch_news(): """ 1) Parses the search term from the url 2) Queries the Guardian API using the term 3) Adds a selection of the results to API """ # 1) # Parse the search term from the url term = request.args.get('term') term = sp_to_plus(term) # 2) # Set up the query_url query_url = GUARDIAN_ROOT + term + '&' + GUARDIAN_FIELDS + '&' + GUARDIAN_PAGE_SIZE # Query the Guadian API try: the_request = Request(query_url) except HTTPError as e: print e.code print e.read except URLError as e: print 'We failed to reach a server.' print 'Reason: ', e.readon else: opener = build_opener() f = opener.open(the_request) json_obj = simplejson.load(f) # Parse the returned json_obj to get the first 5 resp = json_obj["response"] results = resp["results"] # 3) # Loop through a number of the results objects for result in results: # Get the fields json object fields = result["fields"] # Create and save a new article document article = Article(title=result["webTitle"], image_url=fields["thumbnail"], terms=[term]) article.save() return "Stored %d documents containing the search term: '%s'" % (len(results), term)
def add_article (article, feed): if Article.added(article): return None article.feed_id = feed.id db.session.add(article) db.session.commit() return (article)
def get(self): # Get the search term from the request term = request.args.get('term') # If the search term exists, query the db w/ it if term is None: return "You need to specify a search term...you fool." else: term = sp_to_plus(term) articles = Article.objects(terms__icontains=term) return articles.to_json()
def teste_2(): from app.models import Publication, Article p1 = Publication(title='The Python Journal') p1.save() p2 = Publication(title='Science News') p2.save() p3 = Publication(title='Science Weekly') p3.save() a1 = Article(headline='Django lets you build Web apps easily') # a1.publications.add(p1) #You can’t associate it with a Publication until it’s been saved: a1.save() a1.publications.add(p1) a2 = Article(headline='NASA uses Python') a2.save() a2.publications.add(p1, p2) print() print() print() print() print() print() print()
def deploy(deploy_type): from flask.ext.migrate import upgrade from app.models import BlogInfo, User, ArticleTypeSetting, Source, \ ArticleType, Plugin, BlogView, Comment # upgrade database to the latest version upgrade() if deploy_type == 'product': # step_1:insert basic blog info BlogInfo.insert_blog_info() # step_2:insert admin account User.insert_admin(email='*****@*****.**', username='******', password='******', userlevel='admin') # step_3:insert system default setting ArticleTypeSetting.insert_system_setting() # step_4:insert default article sources Source.insert_sources() # step_5:insert default articleType ArticleType.insert_system_articleType() # step_6:insert system plugin Plugin.insert_system_plugin() # step_7:insert blog view BlogView.insert_view() # You must run `python manage.py deploy(product)` before run `python manage.py deploy(test_data)` if deploy_type == 'test_data': # step_1:insert navs Menu.insert_menus() # step_2:insert articleTypes ArticleType.insert_articleTypes() # step_3:generate random articles Article.generate_fake(100) # step_4:generate random comments Comment.generate_fake(300) # step_5:generate random replies Comment.generate_fake_replies(100) # step_4:generate random comments Comment.generate_fake(300)
def news_index(page=1): articles_query = Article.query.order_by('id desc') articles_paginate = articles_query.paginate(page, 5, False) articles = articles_paginate.items article_form = ArticleForm() if request.method == 'GET': if articles_paginate.pages != 0 and articles_paginate.pages < page: return redirect('/news') return render_template('news.html', article_form=article_form, articles=articles) else: if logged_in() and admin(): if article_form.validate_on_submit(): user = current_user() new_article = Article(request.form['title'], request.form['body']) new_article.user_id = user.id user.articles.append(new_article) db.session.add(new_article) db.session.commit() flash('Article posted successfully', 'success') return redirect('/news') else: flash_errors(article_form) return render_template('news.html', article_form=article_form, articles=articles)
def write(request): if request.session.get('username') != 'admin': info = "你不是小张!不过可以<a href='../about/'>查看小张</a>" return HttpResponse(info) if request.method == 'POST': title = request.POST['title'] body = request.POST['body'] author = request.POST['author'] article = Article() article.title = title article.body = body article.author = author article.create_time = datetime.now() article.save() return render(request, 'index.html', context_instance=RequestContext(request)) return render(request, 'write.html', context_instance=RequestContext(request))
def fetch_reddit(subreddits=['Foodforthought', 'YouShouldKnow', 'DepthHub', 'TrueReddit']): api = requests.Session() api.headers.update({'User-Agent': USER_AGENT}) for subreddit in subreddits: r = api.get('http://www.reddit.com/r/%s/new.json?sort=new' % subreddit) for link in r.json()['data']['children']: data = link['data'] try: article = Article.objects.get(url=data['url']) except Article.DoesNotExist: article = Article() content = fetch_article(data['url']) if not content: continue article.url = data['url'] article.title = data['title'] article.content = content article.save()
def add_link(link, liked): try: article = Article.objects.get(url=link['url']) except Article.DoesNotExist: article = Article() content = fetch_article(link['url']) if not content: return article.url = link['url'] article.title = link['title'] article.content = content article.save() personal, created = PersonalArticle.objects.get_or_create(article=article, user=user) if created: personal.read = True personal.liked = liked personal.save()
def import_pinboard(username, password, user): api = requests.get('http://api.pinboard.in/v1/posts/all?format=json', auth=requests.auth.HTTPBasicAuth(username, password)) for bookmark in api.json(): try: article = Article.objects.get(url=bookmark['href']) except Article.DoesNotExist: article = Article() content = fetch_article(bookmark['href']) if not content: continue article.url = bookmark['href'] article.title = bookmark['description'] article.content = content article.save() personal, created = PersonalArticle.objects.get_or_create(article=article, user=user) if created: personal.read = bookmark['toread'] == 'no' personal.liked = True personal.save()
def search_page(request): name = 'Rohit' t = get_template('search.html') nl_table = Article.search() html = t.render(Context({'nl_table' : nl_table})) return HttpResponse(html)
kontener = minipodrecznik if content_container_short_title == "paradoksy": kontener = paradoksy if content_container_short_title == "blog": kontener = blog temp = "" index += 1 while contents[index] != "*-*-\n": temp = temp + contents[index] index += 1 page_number = temp temp = "" index += 1 while contents[index] != "*-*-\n": temp = temp + contents[index] index += 1 date_time = strptime(temp.strip(), '%d/%m/%Y') date_time = datetime(*date_time[:6]) ## konwerja ze struct_time na datetime, ktore jest przyjmowane przez sqlalchemy temp = "" # print("tytul:" + title+"krotki tytul: " + short_title + "zawartość: " + body+ "kontener: " + type_name +"numer strony: " + page_number + "content_container"+content_container) article = Article(title=title, short_title=short_title, body=body, type_name=ArticleType(type_name=type_name), category=Category(category_name=category), page_number=int(page_number), content_container=kontener, date_time = date_time, short_body=short_body) article.user = piskorski db.session.add(article) db.session.commit()
def get(self, object_id): article = Article.objects(pk=object_id) return article.to_json()
def test_str(self): article = Article() article.entry_number = '7399422' article.author = 'R. Atia and N. Yamada' article.journal = 'IEEE Transactions on Smart Grid' article.title = 'Sizing and Analysis of Renewable Energy and Battery Systems in Residential Microgrids' article.year = '2016' article.volume = '7' article.number = '3' article.pages = '1204-1213' article.abstract = 'Accelerated development of eco-friendly technologies such as renewable energy smart grids and electric transportation will shape the future of electric power generation and supply. Accordingly the power consumption characteristics of modern power systems are designed to be more flexible which impact the system sizing. However integrating these considerations into the design stage can be complex. Under these terms this paper presents a novel model based on mixed integer linear programming for the optimization of a hybrid renewable energy system with a battery energy storage system in residential microgrids in which the demand response of available controllable appliances is coherently considered in the proposed optimization problem with reduced calculation burdens. The model takes into account the intrinsic stochastic behavior of renewable energy and the uncertainty involving electric load prediction and thus proper stochastic models are considered. This paper investigates the effect of load flexibility on the component sizing of the system for a residential microgrid in Okinawa. Also under consideration are different operation scenarios emulating technical limitations and several uncertainty levels.' article.keyword = 'battery storage plants;demand side management;distributed power generation;hybrid power systems;integer programming;linear programming;load forecasting;renewable energy sources;smart power grids;Okinawa;battery energy storage system;battery systems;demand response;eco-friendly technologies;electric load prediction;electric power generation;electric transportation;hybrid renewable energy system;load flexibility;mixed integer linear programming;power systems;residential microgrids;smart grids;Batteries;Home appliances;Load modeling;Microgrids;Optimization;Renewable energy sources;Stochastic processes;Design optimization;demand response;hybrid power systems;microgrids;performance analysis' article.doi = '10.1109/TSG.2016.2519541' article.issn = '1949-3053' benchmark = '@article{7399422,\n' \ ' abstract = {Accelerated development of eco-friendly technologies such as renewable energy smart grids and electric transportation will shape the future of electric power generation and supply. Accordingly the power consumption characteristics of modern power systems are designed to be more flexible which impact the system sizing. However integrating these considerations into the design stage can be complex. Under these terms this paper presents a novel model based on mixed integer linear programming for the optimization of a hybrid renewable energy system with a battery energy storage system in residential microgrids in which the demand response of available controllable appliances is coherently considered in the proposed optimization problem with reduced calculation burdens. The model takes into account the intrinsic stochastic behavior of renewable energy and the uncertainty involving electric load prediction and thus proper stochastic models are considered. This paper investigates the effect of load flexibility on the component sizing of the system for a residential microgrid in Okinawa. Also under consideration are different operation scenarios emulating technical limitations and several uncertainty levels.},\n' \ ' author = {R. Atia and N. Yamada},\n' \ ' doi = {10.1109/TSG.2016.2519541},\n' \ ' issn = {1949-3053},\n' \ ' journal = {IEEE Transactions on Smart Grid},\n' \ ' keyword = {battery storage plants;demand side management;distributed power generation;hybrid power systems;integer programming;linear programming;load forecasting;renewable energy sources;smart power grids;Okinawa;battery energy storage system;battery systems;demand response;eco-friendly technologies;electric load prediction;electric power generation;electric transportation;hybrid renewable energy system;load flexibility;mixed integer linear programming;power systems;residential microgrids;smart grids;Batteries;Home appliances;Load modeling;Microgrids;Optimization;Renewable energy sources;Stochastic processes;Design optimization;demand response;hybrid power systems;microgrids;performance analysis},\n' \ ' number = {3},\n' \ ' pages = {1204-1213},\n' \ ' title = {Sizing and Analysis of Renewable Energy and Battery Systems in Residential Microgrids},\n' \ ' volume = {7},\n' \ ' year = {2016}\n' \ '}\n\n' self.assertEqual(str(article), benchmark)
def list_article(page): article = Article.all_page(page) categorys = Category.query.all() return render_template("admin/listarticle.html",article=article,keyword=None,categorys=categorys)
def remove_article (article): if not Article.added(article): return db.session.delete(article) db.session.commit()
import app from app.models import Article Article.drop_collection()