def setUp(self): # Create a few Authors. self.au1 = Author(name='Author 1') self.au1.save() self.au2 = Author(name='Author 2') self.au2.save() # Create a couple of Articles. self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1) self.a1.save() self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1) self.a2.save() self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1) self.a3.save() self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1) self.a4.save() self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2) self.a5.save() self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2) self.a6.save() self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2) self.a7.save() # Create a few Tags. self.t1 = Tag(name='Tag 1') self.t1.save() self.t1.articles.add(self.a1, self.a2, self.a3) self.t2 = Tag(name='Tag 2') self.t2.save() self.t2.articles.add(self.a3, self.a4, self.a5) self.t3 = Tag(name='Tag 3') self.t3.save() self.t3.articles.add(self.a5, self.a6, self.a7)
def edit_book(id): book = Book.query.get(id) if book == None: flash("Book is not found") return redirect(url_for('books')) print book.authors form = EditBook(book=book) print form.data, 'hah' #form.authors.min_entries = len(book.authors) # print form.authors.data # form.authors[0].data = book.authors[0].name # for author in book.authors[1:]: # form.authors.append_entry(author.name) # print form.authors.data if form.validate_on_submit(): print form.data name = form.data['title'] newbook = Book.query.filter_by(name=name).first() if newbook and id != newbook.id: flash("Can't change name to existing one.") return redirect(url_for('edit_book', id=id)) book.name = name authors = list(set([Author.by_name(name=a) for a in form.data['authors'] if Author.exists(name=a)])) #print authors book.authors = authors db.session.commit() flash('Changes have been saved.') return redirect(url_for('edit_book', id=id)) return render_template('edit_book.html', form=form)
def new_session(request): q=Author(name=request.GET['name'],email=request.GET['email'],password=request.GET['password']) q.save() request.session['author_id']=q.id request.session['author_name']=q.name request.session['author_email']=q.email return render(request,'../../demo/templates/nsession.html',{"author":q})
def setUp(self): # Create a few Authors. self.au1 = Author(name="Author 1") self.au1.save() self.au2 = Author(name="Author 2") self.au2.save() # Create a couple of Articles. self.a1 = Article(headline="Article 1", pub_date=datetime(2005, 7, 26), author=self.au1) self.a1.save() self.a2 = Article(headline="Article 2", pub_date=datetime(2005, 7, 27), author=self.au1) self.a2.save() self.a3 = Article(headline="Article 3", pub_date=datetime(2005, 7, 27), author=self.au1) self.a3.save() self.a4 = Article(headline="Article 4", pub_date=datetime(2005, 7, 28), author=self.au1) self.a4.save() self.a5 = Article(headline="Article 5", pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2) self.a5.save() self.a6 = Article(headline="Article 6", pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2) self.a6.save() self.a7 = Article(headline="Article 7", pub_date=datetime(2005, 7, 27), author=self.au2) self.a7.save() # Create a few Tags. self.t1 = Tag(name="Tag 1") self.t1.save() self.t1.articles.add(self.a1, self.a2, self.a3) self.t2 = Tag(name="Tag 2") self.t2.save() self.t2.articles.add(self.a3, self.a4, self.a5) self.t3 = Tag(name="Tag 3") self.t3.save() self.t3.articles.add(self.a5, self.a6, self.a7)
def update_books(books = get_books()): opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] for book in books: try: b = Book.objects.filter(title=book['title']).count() print '>>>', b if not b: b = Book() b.title = book['title'] author = book['author'] last_name = author.split(' ')[-1] first_name = ' '.join(author.split(' ')[:-1]) try: author = Author.objects.get(first_name=first_name, last_name=last_name) except: author = Author(first_name=first_name, last_name=last_name) author.save() b.author = author b.external_url = 'http://en.wikipedia.org'+book['link'] try: content = opener.open('http://en.wikipedia.org'+book['link']).read() s = Soup(content) info = s.find('table', {'class':'infobox'}) img = info.find('img') if img: b.image = 'http:'+img.get('src') except: print "IMAGE FAILED FOR", book b.save() except Exception, e: print e print "WOAH TOTAL FAILURE", book
def copyFromArticles(self, request): '''Copies articles and authors from legacy Articles Kind into new Article and Author kinds''' user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') for article in Articles().all(): if '@' not in article.author: author_email = article.author + '@gmail.com' else: author_email = article.author a_key = ndb.Key(Author, author_email) author = a_key.get() # create new Author if not there if not author: author = Author( key = a_key, authorID = str(Author.allocate_ids(size=1)[0]), displayName = author_email.split('@')[0], mainEmail = author_email, ) author.put() self.copyArticlesKind(article, author) return BooleanMessage(data=True)
def insert_paper(): if request.method == 'POST': paper = db.session.query(Paper).filter_by(doi=request.json['doi']).first() if not paper: paper = Paper(year=request.json['year'], title=request.json['title'], abstract=request.json['abstract'], user_id=g.user.id, doi=request.json['doi']) db.session.add(paper) db.session.flush() for author in request.json['authors']: paper_author = db.session.query(Author).filter_by(name=author).first() if paper_author: paper_author.start_owning(paper) else: paper_author = Author(name=author) db.session.add(paper_author) db.session.flush() paper_author.start_owning(paper) db.session.commit() for doi in request.json['doi_refs']: ref_paper = db.session.query(Paper).filter_by(doi=doi).first() if ref_paper: paper.start_referencing(ref_paper) else: ref_paper = Paper(doi=doi) db.session.add(ref_paper) db.session.flush() paper.start_referencing(ref_paper) db.session.commit() return json.dumps(dict(data=request.json))
def make_author(author_link, commit=True): id = find_int(author_link['href']) username = unicode(author_link.string) key = db.Key.from_path('Author', int(id)) author = Author(key=key, username=username) if commit: author.put() return author
def testBasicModelPKCS7(self): """Try to sign a basic model """ # Sign auth1 = Author(name="Raymond E. Feist", title="MR") auth1.save() data_signed = self.c_cert.sign_model(auth1, self.c_pwd) result = self.c_cert.verify_smime(data_signed) self.assertTrue(result)
def update_author(author_id): """Updates the author matching the id author_id. Only the parameters to update or to add should be passed in the request body. """ author = Author.objects(id=author_id).get() patched = Author(**dict(chain(author.to_dict().items(), request.get_json().items()))) patched.save() return jsonify(patched.to_dict())
def addauthor(request): if request.POST: post = request.POST new_author = Author( Name = post["name"], Age = post["age"], Country = post["country"]) new_author.save() return render_to_response('addauthor.html',context_instance=RequestContext(request))
def delete_project(project_id): """Deletes the project matching the id project_id.""" project = Project.objects(id=project_id).get() project.delete() # Removes the project from the list of the author projects. Author.objects(id=project.author_id).update_one(pull__projects=project.id) return jsonify(project.to_dict())
def add_author(request): if request.POST: post = request.POST if post["AuthorID"] and post["Name"] and post["Age"] and post["Country"]: new_author = Author(AuthorID=post["AuthorID"], Name=post["Name"], Age=post["Age"], Country=post["Country"]) new_author.save() else: return HttpResponse("Please full all information.") return render_to_response("add_author.html")
def worker_authors(request): r = Repository.get(db.Key(request.POST["repo"])) logging.info("processing repository: %s" % r.name) base_url = "http://github.com/%s/%s" % (r.owner.name, r.name) url = base_url + "/network_meta" logging.info(" downloading network_meta from: %s" % url) try: s = urllib2.urlopen(url).read() except urllib2.HTTPError: logging.info("Probably bad repo, skipping.") return HttpResponse("Probably bad repo, skipping.\n") logging.info(" network_meta loaded") try: data = simplejson.loads(s) except ValueError: logging.info("Probably bad repo, skipping.") return HttpResponse("Probably bad repo, skipping.\n") logging.info(" network_meta parsed") dates = data["dates"] nethash = data["nethash"] url = "%s/network_data_chunk?nethash=%s&start=0&end=%d" % (base_url, nethash, len(dates)-1) logging.info(" downloading commits from: %s" % url) s = urllib2.urlopen(url).read() logging.info(" parsing commits...") data = simplejson.loads(s, encoding="latin-1") logging.info(" processing authors...") commits = data["commits"] m = [(x["author"], x["id"]) for x in commits] m = dict(m) logging.info(m) authors = m.keys() authors = list(set(authors)) authors.sort() logging.info(authors) queue = get_github_queue() for author in authors: q = User.gql("WHERE name = :1", author) u = q.get() if u is None: u = User(name=author, email="None") u.save() task = taskqueue.Task(url="/hooks/worker/user_email/", params={'user': u.key(), 'r_user_id': r.owner.name, 'r_repository': r.name, 'r_sha': m[u.name] }) queue.add(task) q = Author.gql("WHERE user = :1 AND repo = :2", u, r) a = q.get() if a is None: a = Author(repo=r, user=u) a.save() logging.info(" done.") return HttpResponse("OK\n")
def dispatch_request(self): form = AuthorForm() form.books.query = Book.query.all() if request.method == "POST": if form.validate_on_submit(): obj = Author() form.populate_obj(obj) obj.save() return redirect("/authors/") return render_template("author_add.html", form=form)
def author_add(): name = request.args.get('author') print name print Author.by_name(name) if name and not Author.by_name(name): author = Author(name) db.session.add(author) db.session.commit() #return jsonify(author=author) return render_template('author.html', author=author)
def author_add(request): if request.POST: post = request.POST new_author = Author( AuthorID = post["AuthorID"], Name = post["Name"], Age = post["Age"], Country = post["Country"]) new_author.save() return render_to_response("add_author.html")
def create(request): """ Create an author. """ name = request.POST["name"] author = Author(name=name) author_id = author.put().id() return HttpResponse("Created an author: %s %s " % (name, author_id))
def submitauthor(request): if request.POST: post = request.POST new_author = Author( AuthorID = post["id"], Name = post["name"], Age = post["age"], Country=post["country"] ) new_author.save() return HttpResponseRedirect('/authorlist/')
def add_author(request): """添加作者""" if request.POST: post = request.POST new_Author = Author( name = post['name'], age = post['age'], country = post['country'] ) new_Author.save() return render_to_response("add_author.html")
def dbSavePapersAndAuthors(papers, latestMailing=True): """Saves an array of paper information into the database. Returns numbers of new papers and authors added. If the latestMailing argument is true, then sets the paper dates to either today or tomorrow, regardless of the date from the arXiv. It sets to today if the function is run before 8pm ET, and to tomorrow otherwise. The idea is that this function should be run regularly every day, the night that the mailing goes out. If run late in the day before midnight, then the mailing has tomorrow's date. If run early in the day, e.g., if for some reason it didn't run when it should have, then the mailing was sent out yesterday and is for today. """ if latestMailing: latestMailingDate = datetime.date.today() now = datetime.datetime.now(pytz.timezone('US/Eastern')) cutoff = now.replace(hour=20,minute=0,second=0,microsecond=0) if now > cutoff: latestMailingDate += datetime.timedelta(days=+1) # note: The official mailing date is the day the email goes out, a few hours after the paper was made available numNewPapersAdded = numNewAuthorsAdded = 0 for paper in papers: authors = [] for author in paper['authors']: authorsWithSameName = Author.objects.filter(name=author) if authorsWithSameName: # author with same name already exists in database---don't add a duplicate a = authorsWithSameName[0] # there might be duplicates --- take the first (maybe fix later) else: a = Author(name=author) a.save() numNewAuthorsAdded += 1 authors.append(a) if Paper.objects.filter(arxivId=paper['arxivId']): continue # NOTE: If we make a mistake adding the paper the first time, this line will keep the code below from ever running to fix it if latestMailing: mailing_date = latestMailingDate else: mailing_date = mailingDate(paper['datePublished']) p = Paper( arxivId = paper['arxivId'], title = paper['title'], abstract = paper['abstract'], date_published = paper['datePublished'], date_mailed = mailing_date, #authors = authors, # ManyToManyField is set up later category = paper['category'], categories = paper['categories'], version = paper['version'], linkAbsPage = paper['linkAbsPage'], linkPdf = paper['linkPdf'] ) p.save() # need to save before setting up the ManyToMany field of authors for author in authors: # alternatively, to clear a ManyToMany field, use p.authors.clear() p.authors.add(author) p.save() numNewPapersAdded += 1 print "%d new papers, %d new authors added" % (numNewPapersAdded, numNewAuthorsAdded) return numNewPapersAdded, numNewAuthorsAdded
def addauthor(request): if request.POST: post = request.POST new_author = Author( AuthorID = post["authorID"], Name = post["name"], Age = post["age"], Country = post["country"] ) new_author.save() return HttpResponseRedirect("/add/") return render_to_response('addauthor.html')
def authors(): """Retrieves a list of authors from the database or create a new author. When retrieving a list of authors, the URL can contain pagination parameters page and per_page which default to 1 and 10 if omitted. """ if request.method == 'GET': return paginate(resource_name='authors', endpoint='authors', objects=Author.objects) author = Author(**request.get_json()) author.save() return jsonify(author.to_dict()), 201
def addauthor(req): if req.POST: post = req.POST newauthor = Author( AuthorID = post["authorID"], Name = post["name"], Age = post["age"], Country = post["country"],) newauthor.save() count=Author.objects.all() return render_to_response("addsucess.html", {"count":count}) count=Author.objects.all() return render_to_response("addauthor.html", {"count":count})
def authorinsert(request): ''' 新增作者 ''' if request.POST: post = request.POST new_author = Author( AuthorID= post["AuthorID"], Name = post["Name"], Country = post["Country"], Age = post["Age"]) new_author.save() return render_to_response('add_author.html',context_instance=RequestContext(request))
def addAuthors(repo_id): repository = DeployA.objects.get(pk=repo_id) realRepo = Repo(repository.repository.location) for commit in realRepo.iter_commits('master', max_count=10): if not Author.objects.filter(email=commit.author.email): author = Author() author.email = commit.author.email author.name = commit.author.name print(author.email) print(author.name) author.save() repository.authors.add(author) repository.save()
def add_author(request): if 'AuthorID' in request.POST and request.POST: post = request.POST new_author = Author( AuthorID = int(post['AuthorID']), Name = post['Name'], Age = post['Age'], Country = post['Country'], ) new_author.save() book_list = Book.objects.all() return render_to_response("AllInformation.html",{"book_list":book_list}) save_author = Author.objects.all() return render_to_response("AddAuthor.html",{"save_author":save_author})
def new_author(request): t = get_template('feedback.html') try: Author.objects.get(name=request.GET['name']) html = t.render(Context({'text':'作家已经存在'})) except: try: author = Author(name=request.GET['name'], age=request.GET['age'], country=request.GET['country']) author.save() html = t.render(Context({'text':'操作成功'})) except: html = t.render(Context({'text':'操作失败'})) return HttpResponse(html)
def admin_changepass(): oldpass = request.forms.get('oldpass').decode('utf-8') pass1 = request.forms.get('newpass').decode('utf-8') pass2 = request.forms.get('newpass2').decode('utf-8') try: user = Author.get(id=1) except Author.DoesNotExist: redirect('/admin') enpass = user.password if (is_password(oldpass, enpass) and pass1 == pass2): newpass = hexpassword(pass1) Author.update(password=newpass).where(id=1).execute() redirect('/admin') redirect('/admin/settings')
def addauth(request): # return render_to_response('add.html',) if request.POST: name = request.POST form= Author( AuthorID = name["AuthorID"], Name = Gname, Age = name["Age"], Country = name["Country"], ) form.save() List_Au[0] = name["AuthorID"] return HttpResponseRedirect(reverse('BookDB_add')) return render_to_response('addauth.html', {'form': AddForm_Author()})
def before_request(): g.user = Author.get_current()
def add_article(): current_year = request.args.get('year') if not current_year: current_year = datetime.datetime.today().year else: current_year = int(current_year) if request.method == 'GET': articles = [] for ar in ResearchPub.query.filter(extract('year', ResearchPub.cover_date) == current_year)\ .order_by(ResearchPub.cover_date.desc()): authors = [] for au in ar.authors: authors.append({ 'id': au.id, 'personal_info_id': au.personal_info_id, 'firstname': au.firstname, 'lastname': au.lastname }) articles.append({ 'id': ar.scopus_id, 'title': ar.title, 'cover_date': ar.cover_date, 'citedby_count': ar.citedby_count, 'scopus_link': ar.scopus_link, 'publication_name': ar.publication_name, 'doi': ar.doi, 'authors': authors, 'abstract': ar.abstract, }) return jsonify(articles) if request.method == 'POST': data = request.get_json() pub = ResearchPub.query.filter_by(scopus_id=data['scopus_id']).first() if not pub: pub = ResearchPub(scopus_id=data.get('scopus_id'), citedby_count=data.get('citedby_count'), title=data.get('title'), cover_date=datetime.datetime.strptime( data.get('cover_date'), '%Y-%m-%d'), abstract=data.get('abstract'), doi=data.get('doi'), scopus_link=data.get('scopus_link'), publication_name=data.get('publication_name')) else: # update the citation number and cover date because it can change. pub.citedby_count = data.get('citedby_count') pub.cover_date = datetime.datetime.strptime( data.get('cover_date'), '%Y-%m-%d'), for subj in data['subject_areas']: s = SubjectArea.query.get(subj['code']) if not s: s = SubjectArea(id=subj['code'], area=subj['area'], abbr=subj['abbreviation']) db.session.add(s) pub.areas.append(s) for author in data['authors']: scopus_id = ScopusAuthorID.query.get(author.get('author_id')) personal_info = StaffPersonalInfo.query\ .filter_by(en_firstname=author['firstname'], en_lastname=author['lastname']).first() if author.get('afid'): affil = Affiliation.query.get(author.get('afid')) else: affil = None country = Country.query.filter_by( name=author.get('country', 'Unknown')).first() if not country: country = Country(name=author.get('country', 'Unknown')) db.session.add(country) if not affil and author.get('afid'): affil = Affiliation(id=author.get('afid'), name=author.get('afname', 'Unknown'), country=country) db.session.add(affil) if scopus_id: if author.get('afid'): # update the current affiliation scopus_id.author.affil_id = author.get('afid') else: scopus_id = ScopusAuthorID(id=author.get('author_id')) author_ = Author.query.filter_by( firstname=author.get('firstname'), lastname=author.get('lastname')).first() if not author_: author_ = Author(firstname=author.get('firstname'), lastname=author.get('lastname'), affil_id=author.get('afid'), h_index=int(author.get('h_index')) if author.get('h_index') else None, personal_info=personal_info) scopus_id.author = author_ scopus_id.author.h_index = int( author.get('h_index')) if author.get('h_index') else None pub.authors.append(scopus_id.author) db.session.add(scopus_id) print('saving publication {}'.format(pub.title[:30])) db.session.add(pub) db.session.commit() return jsonify(data)
def copyArticlesKind(self, article, author): """Create new Article and Comment objects from old Articles object, returning True if success.""" article_id = Article.allocate_ids(size=1, parent=author.key)[0] article_key = ndb.Key(Article, article_id, parent=author.key) a = article_key.get() if a: return # copy ArticleForm/ProtoRPC Message into dict data = db.to_dict(article) data['key'] = article_key if 'comments' in data: for comment in data['comments']: #Create new Comment object comment_author_email = str(loads(str(comment))[1]) a_key = ndb.Key(Author, comment_author_email or 'unknown') comment_author = a_key.get() # create new Author if not there if not comment_author: comment_author = Author( key=a_key, authorID=str(Author.allocate_ids(size=1)[0]), displayName=comment_author_email.split('@')[0], mainEmail=comment_author_email, ) comment_author.put() comment_data = { 'comment': loads(str(comment))[0], 'authorName': comment_author.displayName if comment_author else 'unknown', 'authorID': comment_author.authorID if comment_author else 'unknown', 'dateCreated': loads(str(comment))[2] } comment_id = Comment.allocate_ids(size=1, parent=article_key)[0] comment_key = ndb.Key(Comment, comment_id, parent=article_key) comment_data['key'] = comment_key # create Comment Comment(**comment_data).put() del data['comments'] if 'tags' in data: #del data['tags'] try: data['tags'] = str(data['tags']).split(', ') except UnicodeEncodeError: del data['tags'] if 'tags' in data and data['tags'] == [""]: del data['tags'] if 'id' in data: del data['id'] if data['view'] == None: del data['view'] else: data['view'] = { 'Publish': 'PUBLISHED', 'Preview': 'NOT_PUBLISHED', 'Retract': 'RETRACTED' }[str(data['view'])] data['legacyID'] = str(article.key().id()) data['authorName'] = author.displayName del data['author'] data['dateCreated'] = data['date'] del data['date'] # create Article Article(**data).put()
def get_authors(): authors = list(Author.select()) return jsonify(author_schema.dump(authors, many=True).data)
from db import Session from models import Author session = Session() author = Author(username='******', password='******') session.add(author) session.commit() # book = Book(author_id=author.id, title='Example Title 3') # session.add(book) # session.commit() # Queries # result = session.query(Author).order_by(Author.id.desc()).slice(2,4).all() # print(result) # author = session.query(Author).first() # print(author.books) # Update & Delete # Relationship # Modularization session.close()
def add(search_query, author, title): fl = [ 'id', 'author', 'first_author', 'bibcode', 'id', 'year', 'title', 'abstract', 'doi', 'pubdate', "pub", "keyword", "doctype", "identifier", "links_data" ] if author: search_query += "author:" + author if title: search_query += "title:" + title papers = list(ads.SearchQuery(q=search_query, fl=fl)) if len(papers) == 0: selection = ads.search.Article exit() elif len(papers) == 1: selection = papers[0] # type:ads.search.Article else: # first_ten = itertools.islice(papers, 10) first_ten = papers[:10] single_paper: ads.search.Article for index, single_paper in enumerate(first_ten): print(index, single_paper.title[0], single_paper.first_author) selected_index = click.prompt('select paper', type=int) selection = papers[selected_index] # type:ads.search.Article assert len(selection.doi) == 1 doi = selection.doi[0] try: paper = Paper.get(Paper.doi == doi) print("this paper has already been added") exit(1) except peewee.DoesNotExist: pass print("fetching bibcode") q = ads.ExportQuery([selection.bibcode]) bibtex = q.execute() print("saving in db") paper = Paper() assert len(selection.title) == 1 paper.doi = doi paper.title = selection.title[0] paper.abstract = selection.abstract paper.bibcode = selection.bibcode paper.year = selection.year paper.pubdate = selection.pubdate paper.pdf_downloaded = False paper.first_author = Author.get_or_create(name=selection.first_author)[0] paper.publication = Publication.get_or_create(name=selection.pub)[0] paper.doctype = Doctype.get_or_create(name=selection.doctype)[0] paper.arxiv_identifier = [ ident for ident in selection.identifier if "arXiv:" in ident ][0].split("arXiv:")[-1] paper.bibtex = bibtex links = [json.loads(string) for string in selection.links_data] print(links) paper.save() authors = [Author.get_or_create(name=name)[0] for name in selection.author] for author in db.batch_commit(authors, 100): PaperAuthors.create(author=author, paper=paper) keywords = [ Keyword.get_or_create(keyword=keyword)[0] for keyword in selection.keyword ] for keyword in db.batch_commit(keywords, 100): PaperKeywords.create(keyword=keyword, paper=paper) print("fetching PDF") arxiv_url = "https://arxiv.org/pdf/{id}".format(id=paper.arxiv_identifier) r = requests.get(arxiv_url, stream=True) print(arxiv_url) with open('library/{filename}.pdf'.format(filename=paper.id), 'wb') as f: chunk_size = 1024 # bytes file_size = int(r.headers.get('content-length', 0)) progress_length = math.ceil(file_size // chunk_size) with click.progressbar(r.iter_content(chunk_size=20), length=progress_length) as progress_chunks: for chunk in progress_chunks: f.write(chunk) paper.pdf_downloaded = True paper.save()
def create_books(): # create three Author objects author1 = Author(name='author1', id=1) author2 = Author(name='author2', id=2) author3 = Author(name='author3', id=3) db.session.add(author1) db.session.add(author2) db.session.add(author3) db.session.commit() # create two Book objects book1 = Book(title='book1', id='11', description='desc1', price='$5') book2 = Book(title='book2', id='12', description='desc2', price='$12') db.session.add(book1) db.session.add(book2) db.session.commit() book1.wrote.append(author1) db.session.commit() book1.wrote.append(author2) db.session.commit() book1.wrote.append(author3) db.session.commit() book2.wrote.append(author2) db.session.commit() book2.wrote.append(author1) db.session.commit() # Querying #--------- someAuthor = Author.query.filter_by(name='author1').first() print("Author data:") print(someAuthor) # someAuthor is an object. We can access its id, name and books attributes using # the dot operator. print(someAuthor.id) print(someAuthor.name) ''' print(someAuthor.books) print(someAuthor.books[0]) print(someAuthor.books[0].id) ''' someBook = Book.query.filter_by(title='book1').first() print("Book data:") print(someBook) # someBook is an object. We can obtain its id and title using the dot operator. print(someBook.id) print(someBook.title) print(someBook.wrote) print(someBook.wrote[0]) print(someBook.wrote[0].id) for author in someBook.wrote: print(author.name)
def has_get(author_id): with db_session: a = Author.get(author_id=author_id) if a: return True return False
def crawl_author(): """ Crawls Google Scholar in order to retrieve information about an author. """ # The ID of the author in Google Scholar. scholar_id = request.form['scholar_id'] print 'Crawl author ' + scholar_id + '.' # Retrieve the author with that ID (if any). author = Author.query.filter_by(scholar_id=scholar_id).first() if author is None: author = Author() cookie_jar = CookieJar() opener = build_opener(HTTPCookieProcessor(cookie_jar)) install_opener(opener) url = 'https://scholar.google.com/citations' params = urlencode({ 'hl': 'en', 'view_op': 'list_works', 'sortby': 'pubdate', 'user': scholar_id, 'cstart': 0, 'pagesize': 20 }) req = Request(url + '?' + params) opener.open(req) res = opener.open(req) doc = html.parse(res) no_content = doc.xpath( './/div[contains(text(), "Sorry, no content found for this URL")]') if len(no_content): print 'Author ' + scholar_id + ' not found.' return 'Done.' author.scholar_id = scholar_id nname = doc.find('.//div[@id="gsc_prf_in"]') if nname is not None: # The name of the author. author.name = nname.text_content() nemaildomain = doc.find('.//div[@id="gsc_prf_ivh"]') if nemaildomain is not None: # The domain where the author has an email. author.email_domain = nemaildomain.text_content().split( " - ")[0].split()[-1] ncitations = doc.find('.//table[@id="gsc_rsb_st"]') if ncitations is not None: # The total citations for the author. author.total_citations = ncitations.xpath('.//tr[2]/td')[1].text # The h-index for the author. author.h_index = ncitations.xpath('.//tr[3]/td')[1].text # The i10-index for the author. author.i10_index = ncitations.xpath('.//tr[4]/td')[1].text params = urlencode({ 'hl': 'en', 'view_op': 'citations_histogram', 'user': scholar_id }) req = Request(url + '?' + params) opener.open(req) res = opener.open(req) doc = html.parse(res) # The citations per year for the author. author_citations_per_year = [] nhistogram = doc.find('.//div[@id="gsc_md_hist_b"]') if nhistogram is not None: years = [x.text for x in nhistogram.xpath('.//span[@class="gsc_g_t"]')] for a in nhistogram.xpath('.//a[@class="gsc_g_a"]'): i = int(a.get('style').split('z-index:')[1]) year = int(years[-i]) citations_per_year = AuthorCitationsPerYear.query.filter_by( author_id=author.id, year=year).first() if citations_per_year is None: citations_per_year = AuthorCitationsPerYear() citations_per_year.year = int(years[-i]) citations_per_year.citations = int( a.xpath('./span[@class="gsc_g_al"]')[0].text) author_citations_per_year.append(citations_per_year) author.citations_per_year = author_citations_per_year params = urlencode({ 'hl': 'en', 'view_op': 'list_colleagues', 'user': scholar_id }) req = Request(url + '?' + params) opener.open(req) res = opener.open(req) doc = html.parse(res) # The co-authors of the author. author_coauthors = [] for a in doc.xpath('.//h3[@class="gsc_1usr_name"]//a'): co_scholar_id = a.get('href').split('user='******'&hl')[0] coauthor = Author.query.filter_by(scholar_id=co_scholar_id).first() if coauthor is None: coauthor = Author() coauthor.scholar_id = co_scholar_id author_coauthors.append(coauthor) author.coauthors = author_coauthors # The publications. author_publications = [] cstart = 0 pagesize = 100 while True: params = urlencode({ 'hl': 'en', 'view_op': 'list_works', 'sortby': 'pubdate', 'user': scholar_id, 'cstart': cstart, 'pagesize': pagesize }) req = Request(url + '?' + params) opener.open(req) res = opener.open(req) doc = html.parse(res) for tr in doc.xpath('.//tr[@class="gsc_a_tr"]'): a = tr.find('.//td[@class="gsc_a_t"]//a') # NOTE: When there are no publications, there is a single tr. # <tr class="gsc_a_tr"><td class="gsc_a_e" colspan="3">There are no articles in this profile.</td></tr> if a is None: continue purl = a.get('href') # The ID of the publication in Google Scholar. pub_scholar_id = purl.split('citation_for_view=')[1] # Retrieve the publication with that ID (if any). publication = Publication.query.filter_by( scholar_id=pub_scholar_id).first() if publication is None: publication = Publication() publication.scholar_id = pub_scholar_id # The title of the publication. publication.title = a.text_content() pub_nyear = tr.find('.//td[@class="gsc_a_y"]//span') if pub_nyear is not None: year_of_publication = pub_nyear.text_content().strip() if year_of_publication: # The year of the publication. publication.year_of_publication = int(year_of_publication) pub_ncitations = tr.find('.//a[@class="gsc_a_ac"]') if pub_ncitations is not None: total_citations = pub_ncitations.text_content().strip() if total_citations: # The total citations for the publication. publication.total_citations = int(total_citations) author_publications.append(publication) if doc.xpath('.//button[@id="gsc_bpf_next"]')[0].get("disabled"): break cstart += 100 author.publications = author_publications # When information about the author was retrieved from Google Scholar. author.retrieved_at = datetime.datetime.now() db.session.add(author) db.session.commit() print 'Crawled author ' + scholar_id + '.' return 'Done.'
def get(self, name): author = Author.select().where(Author.name == name).get() self.render_template("author.html", {'author': author})
def get(self): authors = Author.select() self.render_template("authors.html", {'authors': authors})
def setUpClass(cls): """ # Creates a new database for the unit test to use """ cls.engine.execute('DROP TABLE IF EXISTS authors;') cls.engine.execute('DROP TABLE IF EXISTS books;') cls.engine.execute('DROP TABLE IF EXISTS series;') cls.engine.execute('DROP TABLE IF EXISTS publishers;') cls.engine.execute('DROP TABLE IF EXISTS assoc_author_book;') cls.engine.execute('DROP TABLE IF EXISTS assoc_author_series;') Base.metadata.create_all(cls.engine) dostoyevsky = Author(name="Fyodor Dostoyevsky", date_of_birth="1821-11-11", nationality="Russian" # homepage_URL = null ) keats = Author(name="John Keats", date_of_birth="1821-2-23", nationality="British", occupation="Poet" # homepage_URL = null ) tolstoy = Author(name="Leo Tolstoy", date_of_birth="1828-9-9", nationality="Russian" # homepage_URL = null ) murakami = Author(name="Haruki Murakami", date_of_birth="1949-1-12", nationality="Japanese", homepage_URL="www.harukimurakami.com") # adding publishers penguin = Publisher(name="Penguin", founder="Allen Lane", year_founded="1935", country="United Kingdom", status="Active") vintage = Publisher(name="Vintage", founder="Alfred A. Knopf, Sr.", year_founded="1954", country="United States", status="Active") knopf = Publisher(name="Knopf", founder="Alfred A. Knopf, Sr.", country="United States", status="Active") oxford = Publisher(name="Oxford University Press", country="United Kingdom") oneq84 = Series(title="1Q84", genre="Magical Realism", num_books=3, status=1) oneq84.written_by = [murakami] bis = Book(title="1Q84, #1-3", isbn="0307593312") bis.written_by = [murakami] bis.publisher = knopf bis.in_series = oneq84 war_and_peace = Book(title="War and Peace", isbn="0192833987") notes_from_underground = Book(title="Notes from Underground", isbn="067973452X") crime_and_punishment = Book(title="Crime and Punishment", isbn="0143058142") tolstoy.wrotes = [war_and_peace] dostoyevsky.wrotes = [notes_from_underground] oxford.published_book = [war_and_peace] vintage.published_book = [notes_from_underground] crime_and_punishment.written_by = [dostoyevsky] crime_and_punishment.publisher = penguin # books to publishers: many to one tolkien = Author(name="J. R. R. Tolkien", date_of_birth="1892-1-3", nationality="British", occupation="Philologist") houghton_mifflin_harcourt = Publisher(name="Houghton Mifflin Harcourt", founder="Henry Oscar Houghton", country="United States", status="Active") del_rey = Publisher(name="Del Rey", founder="Lester del Rey", status="Active") hobbit = Book(title="The Hobbit", isbn="0618260307") hobbit.written_by = [tolkien] hobbit.publisher = houghton_mifflin_harcourt rings = Series(title="The Lord of the Rings", genre="Epic", num_books=3, status=1) ring1 = Book(title="The Fellowship of the Ring", isbn="0618346252") ring1.written_by = [tolkien] ring1.publisher = houghton_mifflin_harcourt ring1.in_series = rings ring2 = Book(title="The Two Towers", isbn="0618346260") ring2.written_by = [tolkien] ring2.publisher = houghton_mifflin_harcourt ring2.in_series = rings ring3 = Book(title="The Return of the King", isbn="0345339738") ring3.written_by = [tolkien] ring3.publisher = del_rey ring3.in_series = rings # add The Lord of the Rings related cls.session.add(houghton_mifflin_harcourt) cls.session.add(del_rey) cls.session.add(ring2) cls.session.add(ring1) cls.session.add(ring3) cls.session.add(hobbit) # add publishers cls.session.add(knopf) cls.session.add(penguin) cls.session.add(vintage) cls.session.add(oxford) # add books cls.session.add(crime_and_punishment) # add authors cls.session.add(dostoyevsky) cls.session.add(keats) cls.session.add(tolstoy) cls.session.add(murakami) # add series cls.session.add(oneq84) cls.session.commit()
def add_book(): book_form = AddNewItemBookForm(radio='book') magazine_form = AddNewItemMagazineForm(radio='magazine') if request.method == 'GET': if 'logged_in' not in session: message_body = 'You are not logged in.' message_title = 'Error!' return render_template('message.html', message_title=message_title, message_body=message_body) book_form.radio.data = 'book' return render_template('add_book.html', book_form=book_form, magazine_form=magazine_form, book_error=book_form.errors, magazine_error=magazine_form.errors) else: if book_form.submit1.data and book_form.validate(): tmp_authors = [ [book_form.first_name.data, book_form.surname.data], [book_form.first_name_1.data, book_form.surname_1.data], [book_form.first_name_2.data, book_form.surname_2.data], ] new_authors = [] for first_name, surname in tmp_authors: if first_name != '' and surname != '': author = Author.query.filter_by(first_name=first_name, last_name=surname).first() if not author: new_author = Author(first_name=first_name, last_name=surname) new_authors.append(new_author) db.session.add(new_author) db.session.commit() else: new_authors.append(author) tmp_tag = Tag.query.filter_by(name=book_form.tag.data).first() if not tmp_tag: new_tag = Tag(name=book_form.tag.data) db.session.add(new_tag) db.session.commit() else: new_tag = tmp_tag new_book = Book(title=book_form.title.data, table_of_contents=book_form.table_of_contents.data, language=book_form.language.data, category=book_form.category.data, tags=[new_tag], description=book_form.description.data, isbn=book_form.isbn.data, authors=new_authors, original_title=book_form.original_title.data, publisher=book_form.publisher.data, pub_date=datetime(year=int( book_form.pub_date.data), month=1, day=1)) if book_exists(new_book): message_body = 'This book already exists.' message_title = 'Oops!' return render_template('message.html', message_title=message_title, message_body=message_body) db.session.add(new_book) db.session.commit() message_body = 'The book has been added.' message_title = 'Success!' return render_template('message.html', message_title=message_title, message_body=message_body) if magazine_form.submit2.data and magazine_form.validate(): tmp_tag = Tag.query.filter_by(name=magazine_form.tag.data).first() if not tmp_tag: new_tag = Tag(name=magazine_form.tag.data) db.session.add(new_tag) db.session.commit() else: new_tag = tmp_tag new_magazine = Magazine( title=magazine_form.title_of_magazine.data, table_of_contents=magazine_form.table_of_contents.data, language=magazine_form.language.data, category=magazine_form.category.data, tags=[new_tag], description=magazine_form.description.data, year=datetime(year=int(magazine_form.pub_date.data), month=1, day=1), issue=magazine_form.issue.data) db.session.add(new_magazine) db.session.commit() message_body = 'The magazine has been added.' message_title = 'Success!' return render_template('message.html', message_title=message_title, message_body=message_body) if magazine_form.submit2.data: book_form.radio.data = 'magazine' return render_template('add_book.html', book_form=book_form, magazine_form=magazine_form, book_error=book_form.errors, magazine_error=magazine_form.errors) else: book_form.radio.data = 'book' return render_template('add_book.html', book_form=book_form, magazine_form=magazine_form, book_error=book_form.errors, magazine_error=magazine_form.errors)
def author_add(request): author = Author() return author_manager(request, author)
def new_author(author_dict): author = Author(author_dict['name']) db.session.add(author) db.session.commit()
def validate_author(self, value): if not Author.filter(Author.id == value).exists(): raise ValidationError("Can't find author")
from datetime import date from base import Session, engine from models import Base, Author, Book Base.metadata.create_all(engine) session = Session() author_1 = Author(name='J.R.R. Tolkien', birth=date(1892, 1, 3)) author_2 = Author(name='J.K. Rowling', birth=date(1965, 7, 31)) author_3 = Author(name='Stephen King', birth=date(1947, 9, 21)) book_1 = Book(title='The Hobbit', published_in=date(1937, 9, 21), author_id=1) book_2 = Book(title='The Lord of the Rings', published_in=date(1954, 7, 29), author_id=1) book_3 = Book(title='Harry Potter', published_in=date(1997, 6, 26), author_id=2) book_4 = Book(title='Carrie', published_in=date(1974, 1, 1), author_id=3) book_5 = Book(title='Salem Lot', published_in=date(1975, 1, 1), author_id=3) book_6 = Book(title='The Shining', published_in=date(1977, 1, 1), author_id=3) book_7 = Book(title='Rage', published_in=date(1977, 1, 1), author_id=3) session.add(author_1) session.add(author_2) session.add(author_3) session.add(book_1) session.add(book_2) session.add(book_3) session.add(book_4) session.add(book_5) session.add(book_6) session.add(book_7)
def post(self, request): param = QueryDict(request.body) uuid = param.get('uuid') title = param.get('title') time = param.get('time') origin = param.get('origin') _authors = param.getlist('authors') link = param.get('link') _tags = param.getlist('tags') content = param.get('content') refer_to = param.getlist('reference') score = param.get('score') try: year, month = time.split('-') year, month = int(year), int(month) publish_time = datetime.date(year, month, 1) except Exception as e: logger.error(traceback.format_exc(e)) return JsonResponse({'msg': '提供的日期{}有误'.format(time)}, status=500) for _tag in _tags: try: _tag = int(_tag) _ = ResearchTag.objects.get(research_tag_id=_tag) except Exception as e: logger.error(traceback.format_exc(e)) return JsonResponse({'msg': '错误的标签{}'.format(_tag)}, status=500) tags = ResearchTag.objects.filter( research_tag_id__in=[int(_t) for _t in _tags]) author_ids = [] for _author in _authors: if _author.isdigit(): author_ids.append(int(_author)) elif Author.objects.filter(name=_author).exists(): a = Author.objects.get(name=_author).author_id author_ids.append(a) else: a = Author(name=_author) a.save() author_ids.append(a.author_id) authors = Author.objects.filter(author_id__in=author_ids) try: score = int(score) except Exception as e: logger.error(traceback.format_exc(e)) return JsonResponse({'msg': '错误的评分分数格式'}, status=500) if not Paper.objects.filter(paper_uuid=uuid).exists(): # 新建的场合 try: comment = PaperComment(content=content) comment.save() paper = Paper(paper_uuid=uuid, title=title, publish_origin=origin, publish_time=publish_time, author=authors, link=link, tag=tags, comment=comment, self_score=score) paper.save() redis.set(self.LATEST_KEY, str(uuid_gen.uuid4())) except Exception as e: logger.error(traceback.format_exc(e)) return JsonResponse({'msg': '保存失败'}, status=500) else: return JsonResponse({ 'next': reverse('paperdb.detail', kwargs={'paper_uuid': paper.paper_uuid}) }) try: # 编辑的场合 paper = Paper.objects.get(paper_uuid=uuid) except Exception as e: logger.error(traceback.format_exc(e)) return JsonResponse({'msg': '错误的uuid/未找到相关论文记录'}, status=404) else: paper.title = title paper.publish_time = publish_time paper.publish_origin = origin paper.author = authors paper.link = paper.link paper.tag = tags paper.self_score = score try: paper.save() except Exception as e: logger.error(traceback.format_exc(e)) return JsonResponse({'msg': '保存失败'}, status=500) if paper.comment is None: if content != '': comment = PaperComment(content=content) comment.save() paper.comment = comment paper.save() elif content != paper.comment.content.replace( '\r\n', '\n'): # traditional下的换行符出入 paper.comment.content = content paper.comment.save() for refer_to_paper in Paper.objects.filter(paper_uuid__in=refer_to): if not Reference.objects.filter( reference_src=paper, reference_trg=refer_to_paper).exists(): reference = Reference(reference_src=paper, reference_trg=refer_to_paper) reference.save() return JsonResponse({ 'next': reverse('paperdb.detail', kwargs={'paper_uuid': paper.paper_uuid}) })
def get_authors_info(author_url): """ 获取发帖, 回帖作者的个人信息 """ uuid = author_url.split('/')[-1] author = Author() author.author_uuid = uuid sel = Selector( text=requests.get("https://me.csdn.net/" + uuid, headers=HEADERS).text) name = "" for item in sel.xpath("//div[@class='lt_title']/text()").extract(): name += item.strip() author.name = name author.original_blog_nums = int( sel.xpath("//div[@class='me_chanel_det_item access']//span/text()"). extract()[0].strip()) author.desc = sel.xpath( "//div[@class='description clearfix']//p/text()").extract()[0].strip() author.rank = sel.xpath( "//div[@class='me_chanel_det_item access']//span/text()").extract( )[1].strip() author.follower_nums = sel.xpath( "//div[@class='fans']//a//span/text()").extract()[0].strip() author.following_nums = sel.xpath( "//div[@class='att']//a//span/text()").extract()[0].strip() sel = Selector( text=requests.get("https://me.csdn.net/bbs/" + uuid, headers=HEADERS).text) author.post_topic_nums = sel.xpath( "//div[@class='me_chanel_det_item access']//span/text()").extract( )[0].strip() author.answer_topic_nums = sel.xpath( "//div[@class='me_chanel_det_item access']//span/text()").extract( )[1].strip() author.end_topic_percentage = sel.xpath( "//div[@class='me_chanel_det_item access']//span/text()").extract( )[2].strip() sel = Selector(text=requests.get("https://me.csdn.net/bbs/ask/" + uuid, headers=HEADERS).text) author.post_question_nums = sel.xpath( "//div[@class='me_chanel_det_item access']//span/text()").extract( )[0].strip() author.answer_question_nums = sel.xpath( "//div[@class='me_chanel_det_item access']//span/text()").extract( )[1].strip() if Author.select().where(Author.author_uuid == author.author_uuid): author.save() else: author.save(force_insert=True)
def create_author(): db.session.add(Author(request.form['name'])) db.session.commit() return redirect(url_for('index_author'))
def load (): global aus global pus global isbn engine = create_engine('sqlite:////u/yipuwang/cs373-idb/flaskapp/data/test.db', echo=True) # engine = create_engine('postgresql://*****:*****@localhost:5432/testdb', echo=True) Session = sessionmaker(bind=engine) session = Session() engine.execute('DROP TABLE IF EXISTS authors;') engine.execute('DROP TABLE IF EXISTS books;') engine.execute('DROP TABLE IF EXISTS series;') engine.execute('DROP TABLE IF EXISTS publishers;') engine.execute('DROP TABLE IF EXISTS assoc_author_book;') engine.execute('DROP TABLE IF EXISTS assoc_author_series;') # Base.metadata.drop_all(engine) Base.metadata.create_all(engine) for author in authors: # print(author.keys()) author_db_format = Author( name = author['name'], date_of_birth = author['date_of_birth'], nationality = author['nationality'], homepage_URL = author['homepage_url'], occupation = author['occupation'], image = author['image'] ) aus[author_db_format.name] = author_db_format for publisher in publishers: publisher_db_format = Publisher( name = publisher["name"], founder = publisher["founder"], year_founded = publisher["year_founded"], country = publisher["country"], status = publisher["status"], image = publisher['image'] ) pus[publisher_db_format.name] = publisher_db_format session.add(publisher_db_format) for book in books: book_db_format = Book( title = book["title"], isbn = book["isbn"], image_link = book['image_link'], description = book['description'], amazon_product_url = book['amazon_url'] ) # print(book.keys()) if book_db_format.isbn not in isbn: if book['written_by'] != '' and book['written_by'] in aus and book['publisher'] in pus: book_db_format.written_by = [aus[book['written_by']]] isbn[book_db_format.isbn] = book_db_format book_db_format.publisher = pus[book['publisher']] session.add(book_db_format) session.commit() session.close()
def create_authors(): book = load_json('books.json') counter = 0 dupe = False for oneBook in book: title = oneBook['title'] publishers_attribute = oneBook['publishers'] for i in publishers_attribute: publisher = i['name'] authors_attribute = oneBook['authors'] for j in authors_attribute: author = j['name'] try: born = j['born'] except: born = "None" try: education = j['education'] except: education = "None" try: nationality = j['nationality'] except: nationality = "None" try: desc = j['description'] except: desc = "None" try: alma = j['alma_mater'] except: alma = "None" try: wiki = j['wikipedia_url'] except: wiki = "None" try: died = j['died'] except: died = "None" try: img_url = j['image_url'] except: img_url = "None" newAuthor = Author(id=counter, name=author, born=born, education=education, nationality=nationality, description=desc, alma_mater=alma, wikipedia_url=wiki, image_url=img_url) cursor.execute("SELECT name FROM author") rows = cursor.fetchall() for row in rows: if author in row: dupe = True continue if (not dupe): db.session.add(newAuthor) db.session.commit() counter += 1 print("we added an author into the DB! it was: " + author) else: print("there was a dupe!") dupe = False
def getAuthorAndSubmissionInfo(listOfFiles): for csvFile in listOfFiles: if str(csvFile.name) == "author.csv": authorFile = csvFile else: submissionFile = csvFile # Set up individual file objects author = Author(authorFile) submission = Submission(submissionFile) parsedResult = {} # Handling combination authorList = [] for submissionInfo in submission.lines: for authorInfo in author.lines: if str(submissionInfo[9]) == 'accept' and str( authorInfo[0]) == str(submissionInfo[0]): authorList.append({ 'name': authorInfo[1] + " " + authorInfo[2], 'country': authorInfo[4], 'affiliation': authorInfo[5] }) # Computation of results countries = [ele['country'] for ele in authorList if ele] topCountries = Counter(countries).most_common(10) parsedResult['topAcceptedCountries'] = { 'labels': [ele[0] for ele in topCountries], 'data': [ele[1] for ele in topCountries] } affiliations = [ele['affiliation'] for ele in authorList if ele] topAffiliations = Counter(affiliations).most_common(10) parsedResult['topAcceptedAffiliations'] = { 'labels': [ele[0] for ele in topAffiliations], 'data': [ele[1] for ele in topAffiliations] } topCountriesLabel = [ele[0] for ele in topCountries] topCountriesKeywordDict = dict((el, []) for el in topCountriesLabel) for submissionInfo in submission.lines: for authorInfo in author.lines: # Check that submission # are equal, then check author's country is in topCountries if str(authorInfo[0]) == str( submissionInfo[0]) and authorInfo[4] in topCountriesLabel: allKeywords = str(submissionInfo[8]).lower().replace( "\r", "").split("\n") topCountriesKeywordDict[authorInfo[4]].extend(allKeywords) # Different format to match requirements by client headerArr = ["countries", "keyword", "count"] topCountryKeywordArr = [] topCountryKeywordArr.append(headerArr) for country in topCountriesLabel: temp = Counter(topCountriesKeywordDict[country]).most_common(1) arr = [country, temp[0][0], temp[0][1]] topCountryKeywordArr.append(arr) parsedResult['topCountryKeyword'] = [topCountryKeywordArr] # Merge all data into one dict # finalResults = merge_two_dicts(authorResult, parsedResult) # finalResults = merge_two_dicts(finalResults, submissionResult) return {'infoType': 'authorAndSubmission', 'infoData': parsedResult}
class LookupTests(TestCase): #def setUp(self): def setUp(self): # Create a few Authors. self.au1 = Author(name='Author 1') self.au1.save() self.au2 = Author(name='Author 2') self.au2.save() # Create a couple of Articles. self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1) self.a1.save() self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1) self.a2.save() self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1) self.a3.save() self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1) self.a4.save() self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2) self.a5.save() self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2) self.a6.save() self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2) self.a7.save() # Create a few Tags. self.t1 = Tag(name='Tag 1') self.t1.save() self.t1.articles.add(self.a1, self.a2, self.a3) self.t2 = Tag(name='Tag 2') self.t2.save() self.t2.articles.add(self.a3, self.a4, self.a5) self.t3 = Tag(name='Tag 3') self.t3.save() self.t3.articles.add(self.a5, self.a6, self.a7) def test_exists(self): # We can use .exists() to check that there are some self.assertTrue(Article.objects.exists()) for a in Article.objects.all(): a.delete() # There should be none now! self.assertFalse(Article.objects.exists()) def test_lookup_int_as_str(self): # Integer value can be queried using string self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)), ['<Article: Article 1>']) @skipUnlessDBFeature('supports_date_lookup_using_string') def test_lookup_date_as_str(self): # A date lookup can be performed using a string search self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) def test_iterator(self): # Each QuerySet gets iterator(), which is a generator that "lazily" # returns results using database-level iteration. self.assertQuerysetEqual(Article.objects.iterator(), [ 'Article 5', 'Article 6', 'Article 4', 'Article 2', 'Article 3', 'Article 7', 'Article 1', ], transform=attrgetter('headline')) # iterator() can be used on any QuerySet. self.assertQuerysetEqual( Article.objects.filter(headline__endswith='4').iterator(), ['Article 4'], transform=attrgetter('headline')) def test_count(self): # count() returns the number of objects matching search criteria. self.assertEqual(Article.objects.count(), 7) self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3) self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0) # count() should respect sliced query sets. articles = Article.objects.all() self.assertEqual(articles.count(), 7) self.assertEqual(articles[:4].count(), 4) self.assertEqual(articles[1:100].count(), 6) self.assertEqual(articles[10:100].count(), 0) # Date and date/time lookups can also be done with strings. self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3) def test_in_bulk(self): # in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects. arts = Article.objects.in_bulk([self.a1.id, self.a2.id]) self.assertEqual(arts[self.a1.id], self.a1) self.assertEqual(arts[self.a2.id], self.a2) self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk(set([self.a3.id])), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk([1000]), {}) self.assertEqual(Article.objects.in_bulk([]), {}) self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1}) self.assertEqual(Article.objects.in_bulk(iter([])), {}) self.assertRaises(TypeError, Article.objects.in_bulk) self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah') def test_values(self): # values() returns a list of dictionaries instead of object instances -- # and you can specify which fields you want to retrieve. identity = lambda x:x self.assertQuerysetEqual(Article.objects.values('headline'), [ {'headline': u'Article 5'}, {'headline': u'Article 6'}, {'headline': u'Article 4'}, {'headline': u'Article 2'}, {'headline': u'Article 3'}, {'headline': u'Article 7'}, {'headline': u'Article 1'}, ], transform=identity) self.assertQuerysetEqual( Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'), [{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}], transform=identity) self.assertQuerysetEqual(Article.objects.values('id', 'headline'), [ {'id': self.a5.id, 'headline': 'Article 5'}, {'id': self.a6.id, 'headline': 'Article 6'}, {'id': self.a4.id, 'headline': 'Article 4'}, {'id': self.a2.id, 'headline': 'Article 2'}, {'id': self.a3.id, 'headline': 'Article 3'}, {'id': self.a7.id, 'headline': 'Article 7'}, {'id': self.a1.id, 'headline': 'Article 1'}, ], transform=identity) # You can use values() with iterator() for memory savings, # because iterator() uses database-level iteration. self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(), [ {'headline': u'Article 5', 'id': self.a5.id}, {'headline': u'Article 6', 'id': self.a6.id}, {'headline': u'Article 4', 'id': self.a4.id}, {'headline': u'Article 2', 'id': self.a2.id}, {'headline': u'Article 3', 'id': self.a3.id}, {'headline': u'Article 7', 'id': self.a7.id}, {'headline': u'Article 1', 'id': self.a1.id}, ], transform=identity) # The values() method works with "extra" fields specified in extra(select). self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'), [ {'id': self.a5.id, 'id_plus_one': self.a5.id + 1}, {'id': self.a6.id, 'id_plus_one': self.a6.id + 1}, {'id': self.a4.id, 'id_plus_one': self.a4.id + 1}, {'id': self.a2.id, 'id_plus_one': self.a2.id + 1}, {'id': self.a3.id, 'id_plus_one': self.a3.id + 1}, {'id': self.a7.id, 'id_plus_one': self.a7.id + 1}, {'id': self.a1.id, 'id_plus_one': self.a1.id + 1}, ], transform=identity) data = { 'id_plus_one': 'id+1', 'id_plus_two': 'id+2', 'id_plus_three': 'id+3', 'id_plus_four': 'id+4', 'id_plus_five': 'id+5', 'id_plus_six': 'id+6', 'id_plus_seven': 'id+7', 'id_plus_eight': 'id+8', } self.assertQuerysetEqual( Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()), [{ 'id_plus_one': self.a1.id + 1, 'id_plus_two': self.a1.id + 2, 'id_plus_three': self.a1.id + 3, 'id_plus_four': self.a1.id + 4, 'id_plus_five': self.a1.id + 5, 'id_plus_six': self.a1.id + 6, 'id_plus_seven': self.a1.id + 7, 'id_plus_eight': self.a1.id + 8, }], transform=identity) # You can specify fields from forward and reverse relations, just like filter(). self.assertQuerysetEqual( Article.objects.values('headline', 'author__name'), [ {'headline': self.a5.headline, 'author__name': self.au2.name}, {'headline': self.a6.headline, 'author__name': self.au2.name}, {'headline': self.a4.headline, 'author__name': self.au1.name}, {'headline': self.a2.headline, 'author__name': self.au1.name}, {'headline': self.a3.headline, 'author__name': self.au1.name}, {'headline': self.a7.headline, 'author__name': self.au2.name}, {'headline': self.a1.headline, 'author__name': self.au1.name}, ], transform=identity) self.assertQuerysetEqual( Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'), [ {'name': self.au1.name, 'article__headline': self.a1.headline}, {'name': self.au1.name, 'article__headline': self.a2.headline}, {'name': self.au1.name, 'article__headline': self.a3.headline}, {'name': self.au1.name, 'article__headline': self.a4.headline}, {'name': self.au2.name, 'article__headline': self.a5.headline}, {'name': self.au2.name, 'article__headline': self.a6.headline}, {'name': self.au2.name, 'article__headline': self.a7.headline}, ], transform=identity) self.assertQuerysetEqual( Author.objects.values('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'), [ {'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name}, {'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name}, ], transform=identity) # However, an exception FieldDoesNotExist will be thrown if you specify # a non-existent field name in values() (a field that is neither in the # model nor in extra(select)). self.assertRaises(FieldError, Article.objects.extra(select={'id_plus_one': 'id + 1'}).values, 'id', 'id_plus_two') # If you don't specify field names to values(), all are returned. self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(), [{ 'id': self.a5.id, 'author_id': self.au2.id, 'headline': 'Article 5', 'pub_date': datetime(2005, 8, 1, 9, 0) }], transform=identity) def test_values_list(self): # values_list() is similar to values(), except that the results are # returned as a list of tuples, rather than a list of dictionaries. # Within each tuple, the order of the elements is the same as the order # of fields in the values_list() call. identity = lambda x:x self.assertQuerysetEqual(Article.objects.values_list('headline'), [ (u'Article 5',), (u'Article 6',), (u'Article 4',), (u'Article 2',), (u'Article 3',), (u'Article 7',), (u'Article 1',), ], transform=identity) self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], transform=identity) self.assertQuerysetEqual( Article.objects.values_list('id', flat=True).order_by('id'), [self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id], transform=identity) self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}) .order_by('id').values_list('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], transform=identity) self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}) .order_by('id').values_list('id_plus_one', 'id'), [ (self.a1.id+1, self.a1.id), (self.a2.id+1, self.a2.id), (self.a3.id+1, self.a3.id), (self.a4.id+1, self.a4.id), (self.a5.id+1, self.a5.id), (self.a6.id+1, self.a6.id), (self.a7.id+1, self.a7.id) ], transform=identity) self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}) .order_by('id').values_list('id', 'id_plus_one'), [ (self.a1.id, self.a1.id+1), (self.a2.id, self.a2.id+1), (self.a3.id, self.a3.id+1), (self.a4.id, self.a4.id+1), (self.a5.id, self.a5.id+1), (self.a6.id, self.a6.id+1), (self.a7.id, self.a7.id+1) ], transform=identity) self.assertQuerysetEqual( Author.objects.values_list('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'), [ (self.au1.name, self.a1.headline, self.t1.name), (self.au1.name, self.a2.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t2.name), (self.au1.name, self.a4.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t3.name), (self.au2.name, self.a6.headline, self.t3.name), (self.au2.name, self.a7.headline, self.t3.name), ], transform=identity) self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True) def test_get_next_previous_by(self): # Every DateField and DateTimeField creates get_next_by_FOO() and # get_previous_by_FOO() methods. In the case of identical date values, # these methods will use the ID as a fallback check. This guarantees # that no records are skipped or duplicated. self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>') self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>') self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date) self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>') self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>') self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>') def test_escaping(self): # Underscores, percent signs and backslashes have special meaning in the # underlying SQL code, but Django handles the quoting of them automatically. a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)) a8.save() self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'), [ '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'), ['<Article: Article_ with underscore>']) a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)) a9.save() self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'), [ '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'), ['<Article: Article% with percent sign>']) a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)) a10.save() self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'), ['<Article: Article with \ backslash>']) def test_exclude(self): a8 = Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)) a9 = Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)) a10 = Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)) # exclude() is the opposite of filter() when doing lookups: self.assertQuerysetEqual( Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 1>', ]) def test_none(self): # none() returns an EmptyQuerySet that behaves like any other QuerySet object self.assertQuerysetEqual(Article.objects.none(), []) self.assertQuerysetEqual( Article.objects.none().filter(headline__startswith='Article'), []) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article').none(), []) self.assertEqual(Article.objects.none().count(), 0) self.assertEqual( Article.objects.none().update(headline="This should not take effect"), 0) self.assertQuerysetEqual( [article for article in Article.objects.none().iterator()], []) def test_in(self): # using __in with an empty list should return an empty query set self.assertQuerysetEqual(Article.objects.filter(id__in=[]), []) self.assertQuerysetEqual(Article.objects.exclude(id__in=[]), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) def test_error_messages(self): # Programming errors are pointed out with nice error messages try: Article.objects.filter(pub_date_year='2005').count() self.fail('FieldError not raised') except FieldError, ex: self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' " "into field. Choices are: author, headline, id, pub_date, tag") try: Article.objects.filter(headline__starts='Article') self.fail('FieldError not raised') except FieldError, ex: self.assertEqual(str(ex), "Join on field 'headline' not permitted. " "Did you misspell 'starts' for the lookup type?")
from sqlalchemy.orm import sessionmaker from bootstrap import db from models import Author if __name__ == '__main__': engine = create_engine( 'postgresql+psycopg2://postgres:postgres@localhost:5432') conn = engine.connect() try: conn.execute("commit") conn.execute("drop database flask_rest") except Exception as other: print("Can't delete 'flask_rest' because of:", other) try: conn.execute("commit") conn.execute("create database flask_rest") except Exception as other: print("Can't create 'flask_rest' because of:", other) conn.close() from flask_sqlalchemy import SQLAlchemy db.create_all() authors = [Author(firstname='Max', lastname='Ramalho')] db.session.add_all(authors) db.session.commit() db.session.close()
#!/usr/local/bin/python3 from datetime import date from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from models import Author, Book # connection with sqlite database engine = create_engine('sqlite:///books_authors.db', echo=True) # get sesion Session = sessionmaker(bind=engine) session = Session() # inserting authors author_1 = Author('Author1') author_2 = Author('Author2') author_list = (author_1, author_2) session.add_all(author_list) session.commit() # inserting books book1 = Book('Book1', date(2019, 1, 1), '123456789') book1.authors.append(author_1) session.add(book1) session.commit() # book query book = session.query(Book).filter(Book.isbn == '123456789').first() print(book)
def populate_authors(n=40): return [ Author(first_name=g.person.name(), last_name=g.person.last_name()) for _ in range(n) ]
def createmy(resquest): print 'meng' a = Author(first_name="ahrca", last_name="bbb") a.save() return HttpResponse("yes")
def logout(): current_user = Author.get_current() if current_user: current_user.logout() return redirect(url_for('post_list'))
def update_book(form, item): item.title = form.title.data item.table_of_contents = form.table_of_contents.data item.language = form.language.data item.category = form.category.data item.description = form.description.data item.isbn = form.isbn.data item.original_title = form.original_title.data item.publisher = form.publisher.data item.pub_date = datetime(year=int(form.pub_date.data), month=1, day=1) try: tag = Tag.query.filter_by(name=item.tags_string).first() tag.name = form.tag.data except AttributeError: new_tag = Tag(name=form.tag.data) item.tags.append(new_tag) db.session.add(new_tag) db.session.commit() try: author = Author.query.filter_by( first_name=item.authors[0].first_name, last_name=item.authors[0].last_name).first() author.first_name = form.first_name.data author.last_name = form.surname.data except IndexError: if form.first_name.data and form.surname.data: author = Author.query.filter_by( first_name=form.first_name.data, last_name=form.surname.data).first() if not author: new_author = Author(first_name=form.first_name.data, last_name=form.surname.data) item.authors.append(new_author) db.session.add(new_author) db.session.commit() try: author_1 = Author.query.filter_by( first_name=item.authors[1].first_name, last_name=item.authors[1].last_name).first() author_1.first_name = form.first_name_1.data author_1.last_name = form.surname_1.data except IndexError: if form.first_name_1.data and form.surname_1.data: author = Author.query.filter_by( first_name=form.first_name_1.data, last_name=form.surname_1.data).first() if not author: new_author = Author(first_name=form.first_name_1.data, last_name=form.surname_1.data) item.authors.append(new_author) db.session.add(new_author) db.session.commit() try: author_2 = Author.query.filter_by( first_name=item.authors[2].first_name, last_name=item.authors[2].last_name).first() author_2.first_name = form.first_name_2.data author_2.last_name = form.surname_2.data except IndexError: if form.first_name_2.data and form.surname_2.data: author = Author.query.filter_by( first_name=form.first_name_2.data, last_name=form.surname_2.data).first() if not author: new_author = Author(first_name=form.first_name_2.data, last_name=form.surname_2.data) item.authors.append(new_author) db.session.add(new_author) db.session.commit() db.session.commit()