def testVersionedUnique(self): """ use versioned_unique to force unique on field value """ self.assertFalse(hasattr(models.Author, 'versioned_unique')) setattr(models.Author, 'versioned_unique', ['name']) self.assertEqual(models.Author.versioned_unique, ['name']) a1 = models.Author.objects.get(vid=1) # should fail on second(duplicate) instance a2 = models.Author(name=a1.name) with self.assertRaises(ValidationError): a2.validate_unique() # multiple versions are not affected (can co-exists) klass = a1.get_version_class() self.assertEqual( klass.normal.filter(object_id=a1.object_id).count(), 1) a1.publish() self.assertEqual( klass.normal.filter(object_id=a1.object_id).count(), 2) self.assertTrue( klass.normal.filter(object_id=a1.object_id, state=a1.DRAFT).exists()) self.assertTrue( klass.normal.filter(object_id=a1.object_id, state=a1.PUBLISHED).exists()) a1.validate_unique()
def edit_author(id_author): author = db.session.query( models.Author).filter(models.Author.id == id_author).first() books = set(string_to_list_process(request.form['books'].split(','))) name_auth = request.form['name'] if not name_auth.strip() and not books: flash(u'Changes have not been applied', 'error') return redirect(url_for('authors')) if not name_auth.strip(): name_auth = author.name else: q = db.session.query( models.Author).filter(models.Author.name == name_auth) if db.session.query(q.exists()).scalar() and name_auth != author.name: flash(u'This name is already exists', 'error') return redirect(url_for('authors')) db.session.delete(author) db.session.commit() author = models.Author(name=name_auth, books=get_checked_books(books)) db.session.add(author) db.session.commit() return redirect(url_for('authors'))
def insertar_imagenes(lang='es'): ''' Extract Mysql info: image & author ''' sql_images = '''SELECT imagen as image, id_imagen as id_image, fecha_creacion as created, ultima_modificacion as modificated FROM imagenes''' sql_images_autor = '''SELECT imagen as image, id_imagen as id_image, fecha_creacion as created, ultima_modificacion as modificated, autores.id_autor as id_author, autor as first_name, email_autor as email FROM autores, imagenes WHERE imagenes.id_autor = autores.id_autor''' cursor.execute(sql_images_autor) # genera una colección diferente por idioma Image = getattr(models, 'AraImage_' + lang) for im in cur_a_dict(cursor): autor = models.Author(id_author=im.pop('id_author'), first_name=im.pop('first_name').decode('latin1'), email=im.pop('email')) _im = Image(**im) _im.author = autor _im.save()
def testVersionedUnique(self): """ use versioned_unique to force unique on field value """ self.assertFalse(hasattr(models.Author, 'versioned_unique')) setattr(models.Author, 'versioned_unique', ['name']) self.assertEqual(models.Author.versioned_unique, ['name']) a1 = models.Author.objects.get(vid=1) # should fail on second(duplicate) instance a2 = models.Author(name=a1.name) with self.assertRaises(ValidationError): a2.validate_unique() # multiple versions are not affected (can co-exists) a3 = models.Author(object=a1.object,name=a1.name) a3.validate_unique()
def post(self, relpath): if relpath == 'live': # Get first (and only) result. live_data = models.LiveData.all().get() if live_data is None: live_data = models.LiveData() live_data.gdl_page_url = self.request.get('gdl_page_url') or None #if live_data.gdl_page_url is not None: live_data.put() return self.redirect('/database/live') elif relpath == 'author': try: given_name = self.request.get('given_name') family_name = self.request.get('family_name') author = models.Author( key_name=''.join([given_name, family_name]).lower(), given_name=given_name, family_name=family_name, org=self.request.get('org'), unit=self.request.get('unit'), city=self.request.get('city'), state=self.request.get('state'), country=self.request.get('country'), homepage=self.request.get('homepage') or None, google_account=self.request.get('google_account') or None, twitter_account=self.request.get('twitter_account') or None, email=self.request.get('email') or None, lanyrd=self.request.get('lanyrd') == 'on') lat = self.request.get('lat') lon = self.request.get('lon') if lat and lon: author.geo_location = db.GeoPt(float(lat), float(lon)) author.put() except db.Error, e: # TODO: Doesn't repopulate lat/lng or return errors for it. form = models.AuthorForm(self.request.POST) if not form.is_valid(): sorted_profiles = models.get_sorted_profiles( update_cache=True) template_data = { 'sorted_profiles': sorted_profiles, 'profile_amount': len(sorted_profiles), 'author_form': form } return self.render( data=template_data, template_path='database/author_new.html', relpath=relpath) else: self.redirect('/database/author')
def _getInstance(cls): """ create an instance of class and init its params """ import models sig = signature(getattr(models.__dict__.get(cls), "__init__")) if len(sig.parameters) == 2: obj = models.__dict__.get(cls)("") else: obj = models.__dict__.get(cls)(models.Book(""), models.Author("")) return obj
def addauthor(): try: name = request.form['name'] db.session.add(models.Author(name=name)) db.session.commit() except: print(sys.exc_info()[2]) finally: print('closed') return jsonify({'status': 'success'})
def __init__(self, author_id, parent=None): super(EditAuthorDialog, self).__init__(parent) self.ui = Ui_EditAuthorDialog() self.ui.setupUi(self) if author_id: self.id = author_id self.fetch_from_db() else: self.id = None self.author = models.Author() self.accepted.connect(self.save_to_db)
def author_details(author_id, connection): """Returns information about a single author, including a list of all their papers. Arguments: - connection: a database Connection object. - author_id: the Rxivist-issued ID of the author being queried. Returns: - An Author object containing information about that author's publications and contact info. """ result = models.Author(author_id) result.GetInfo(connection) return result
def _AddTestAuthors(self): memcache.flush_all() f = file(os.path.dirname(__file__) + '/database/profiles.yaml', 'r') for profile in yaml.load_all(f): author = models.Author( key_name=unicode(profile['id']), given_name=unicode(profile['name']['given']), family_name=unicode(profile['name']['family']), org=unicode(profile['org']['name']), unit=unicode(profile['org']['unit']), city=profile['address']['locality'], state=profile['address']['region'], country=profile['address']['country'], google_account=str(profile.get('google')), twitter_account=profile.get('twitter'), email=profile['email'], lanyrd=profile.get('lanyrd', False), homepage=profile['homepage'], geo_location=db.GeoPt(profile['address']['lat'], profile['address']['lon'])) author.put() f.close()
def buildObjects(db): u1 = models.User(name="Meghana", email="*****@*****.**") u1.set_password('admin') a1 = models.Author(name='John Williams') b1 = models.Book(title="Once upon a time", description="Great book") b2 = models.Book(title="Second upon a time", description="Another classic") l = models.Library() b1.author = a1 b2.author = a1 l.user = u1 assoc = models.AssociationBookLibrary(library=l) assoc.book = b2 db.session.add(a1) db.session.add(b1) db.session.add(b2) db.session.add(u1) db.session.add(l) db.session.commit()
def commit_to_db(self, post_data: dict): """ *************************************DICT STRUCTURE************************************* url: https://geekbrains.ru/posts/bezopasnost-veb-novyj-fakultativ-ot-hacktory post_title: «Безопасность веб» — новый факультатив от Hacktory post_publish_date: 2020-10-23T11:48:00+03:00 image_url: https://d2xzmw6cctk25h.cloudfront.net/geekbrains/public/ckeditor_assets/pictures/9860/retina-f0d622a24fa84ace868a3e5606fb1c09.png author: Geek Brains author_url: https://geekbrains.ru/users/63 post_tags: ['web', 'программирование'] comments: [] Saving data to BD :param post_data: :return: """ """ *************************************DB STRUCTURE************************************* Post id = Column(Integer, autoincrement=True, primary_key=True) url = Column(String, unique=True, nullable=False) title = Column(String, unique=False, nullable=False) publish_date = Column(Datetime, unique=False, nullable=False) img_url = Column(String, unique=False, nullable=True) author_id = Column(Integer, ForeignKey('author.id')) author = relationship("Author", back_populates='posts') tag = relationship('Tag', secondary=tag_post, back_populates='posts') Writer id = Column(Integer, autoincrement=True, primary_key=True) name = Column(String, unique=False, nullable=False) url = Column(String, unique=True, nullable=False) posts = relationship("Post") Tag id = Column(Integer, autoincrement=True, primary_key=True) name = Column(String, unique=False, nullable=False) url = Column(String, unique=True, nullable=False) posts = relationship('Post', secondary=tag_post) """ author = models.Author(name=post_data['author'], url=post_data['author_url']) author_check = self.db.query( models.Author).filter(models.Author.url == author.url).first() if author_check: author = author_check self.db.add(author) post = models.Post(url=post_data['url'], title=post_data['post_title'], publish_date=post_data['post_publish_date'], img_url=post_data['image_url'], author=author) self.db.add(post) tag = models.Tag(name=post_data['post_tags'][0][0], url=post_data['post_tags'][0][1], posts=[post]) tag_check = self.db.query( models.Tag).filter(models.Tag.url == tag.url).first() if tag_check: tag = tag_check self.db.add(tag) post.tag.append(tag) for itm in post_data['comments']: author = models.Author(name=itm[0], url=itm[1]) author_check = self.db.query( models.Author).filter(models.Author.url == author.url).first() if author_check: author = author_check self.db.add(author) comment = models.Comment(text=itm[2], author=author, posts=post) self.db.add(comment) self.db.commit()
def insert_articles_into_db(collection_of_articles): """ Insert a dictionary of publications into separate database rows from a BibTex file. """ paper_search_fields = ["title", "author", "year", "month", "volume",\ "number", "pages", "abstract", "doi", "keywords",] # Collection of all the models that # will be written in bulk paper_models = [] author_models = [] journal_models = [] journal_list = [] for paper_item in collection_of_articles: paper_record = {} author_record = {} keys_in_paper_item = paper_item.keys() for search_item in paper_search_fields: # There are some fields that are ambiguous. # These are dealt later. if not (search_item=="journal" or \ search_item=="booktitle" ): if search_item in keys_in_paper_item: paper_record[search_item] = paper_item[search_item] else: paper_record[search_item] = "" if "journal" in keys_in_paper_item: paper_record["journal"] = paper_item["journal"] if "booktitle" in keys_in_paper_item: paper_record["journal"] = paper_item["booktitle"] paper_record["authors"] = paper_item["author"] list_of_authors = paper_item["author"].split(" and ") for author_entry in list_of_authors: author_entry = author_entry.strip() author_record["full_name"] = author_entry # The model is created but not witten to the database. author_model_item = models.Author( full_name=author_record["full_name"]) author_models.append(author_model_item) if paper_record["journal"] not in journal_list: journal_model_item = models.Journal(name=paper_record["journal"], ) journal_models.append(journal_model_item) journal_list.append(paper_record["journal"]) paper_model_item = models.Paper(paper_title=paper_record["title"], \ paper_year=paper_record["year"], \ paper_volume=paper_record["volume"], \ paper_issue=paper_record["number"], \ paper_pages=paper_record["pages"], \ paper_month=paper_record["month"], \ paper_doi=paper_record["doi"], \ paper_abstract=paper_record["abstract"], \ paper_keywords=paper_record["keywords"], \ paper_authors=paper_record["authors"], \ paper_journal=paper_record["journal"] ) paper_models.append(paper_model_item) # The models are written in bulk models.Paper.objects.bulk_create(paper_models) models.Journal.objects.bulk_create(journal_models) models.Author.objects.bulk_create(author_models) return
def insert_articles_from_web(collection_of_articles): """ Insert a dictionary of publications into separate database rows from the IEEE gateway. """ paper_search_fields = ["title", "authors", "affiliations", "pubtitle", "punumber", \ "pubtype", "publisher", "volume", "issue", "py", "spage", \ "epage", "abstract", "issn", "arnumber", "doi", "publicationId", \ "partnum", "mdurl", "pdf", "term", "month"] journal_search_fields = ["pubtitle", "pubtype", "publisher", "issn"] all_paper_list = [] all_author_list = [] all_journal_list = [] paper_models = [] author_models = [] journal_models = [] for paper_item in collection_of_articles: paper_record = {} journal_record = {} author_record = {} keys_in_paper_item = paper_item.keys() for search_item in paper_search_fields: # Some fields are different from the database. # These are dealt with below. if not (search_item=="spage" or \ search_item=="epage" or \ search_item=="term" or \ search_item=="publicationId" or \ search_item=="partnum" or \ search_item=="arnumber" ): if search_item in keys_in_paper_item: paper_record[search_item] = paper_item[search_item] else: paper_record[search_item] = "" print(search_item, paper_record[search_item]) if "spage" in keys_in_paper_item: paper_record["pages"] = paper_item["spage"] + "-" else: paper_record["pages"] = "-" if "epage" in keys_in_paper_item: paper_record["pages"] += paper_item["epage"] if "term" in keys_in_paper_item: paper_record["keywords"] = "" for term_item in paper_item["term"]: paper_record["keywords"] += term_item + ", " paper_record["keywords"] = paper_record["keywords"][:-2] else: paper_record["keywords"] = "" if "publicationId" in keys_in_paper_item: paper_record["arnumber"] = paper_item["publicationId"] elif "partnum" in keys_in_paper_item: paper_record["arnumber"] = paper_item["partnum"] elif "arnumber" in keys_in_paper_item: paper_record["arnumber"] = paper_item["arnumber"] for search_item in journal_search_fields: journal_record[search_item] = paper_item[search_item] # Authors are separated by ; but need to be separated by and list_of_authors = paper_item["authors"].split(";") list_of_authors_in_paper = "" for count, author_entry in enumerate(list_of_authors): author_entry = author_entry.strip() author_record["full_name"] = author_entry all_author_list.append(author_record) author_model_item = models.Author( full_name=author_record["full_name"]) author_models.append(author_model_item) list_of_authors_in_paper += author_entry if count < len(list_of_authors) - 1: list_of_authors_in_paper += " and " paper_record["authors"] = list_of_authors_in_paper paper_model_item = models.Paper(paper_title=paper_record["title"], \ paper_year=paper_record["py"], \ paper_volume=paper_record["volume"], \ paper_issue=paper_record["issue"], \ paper_number=paper_record["punumber"], \ paper_pages=paper_record["pages"], \ paper_month=paper_record["month"], \ paper_doi=paper_record["doi"], \ paper_abstract=paper_record["abstract"], \ paper_keywords=paper_record["keywords"], \ paper_journal=paper_record["pubtitle"], \ paper_authors=paper_record["authors"], \ paper_arnumber=paper_record["arnumber"], \ paper_url=paper_record["mdurl"], \ paper_pdflink=paper_record["pdf"], \ publisher_organization=paper_record["publisher"], \ publisher_issn_number=paper_record["issn"], \ publisher_type=paper_record["pubtype"] ) paper_models.append(paper_model_item) all_paper_list.append(paper_record) journal_model_item = models.Journal(name=paper_record["pubtitle"], \ organization=paper_record["publisher"], \ issn_number=paper_record["issn"], \ pub_type=paper_record["pubtype"] ) journal_models.append(journal_model_item) all_journal_list.append(journal_record) # if "affiliations" in paper_item.keys(): # new_institution = models.Institution() # new_institution.name = paper_item["affiliations"] # new_institution.save() # # if list_of_authors_in_paper: # for author_item in list_of_authors_in_paper: # new_affiliation = models.Affiliation() # new_affiliation.institution = new_institution # new_affiliation.author = author_item # new_affiliation.year = new_paper_entry.paper_year # new_affiliation.save() models.Paper.objects.bulk_create(paper_models) models.Journal.objects.bulk_create(journal_models) models.Author.objects.bulk_create(author_models) return
def post(self, relpath): if relpath == 'live': # Get first (and only) result. live_data = models.LiveData.all().get() if live_data is None: live_data = models.LiveData() live_data.gdl_page_url = self.request.get('gdl_page_url') or None live_data.put() return self.redirect('/database/live') elif relpath == 'author': try: given_name = self.request.get('given_name') family_name = self.request.get('family_name') author = models.Author( key_name=''.join([given_name, family_name]).lower(), given_name=given_name, family_name=family_name, org=self.request.get('org'), unit=self.request.get('unit'), city=self.request.get('city'), state=self.request.get('state'), country=self.request.get('country'), homepage=self.request.get('homepage') or None, google_account=self.request.get('google_account') or None, twitter_account=self.request.get('twitter_account') or None, email=self.request.get('email') or None, lanyrd=self.request.get('lanyrd') == 'on') lat = self.request.get('lat') lon = self.request.get('lon') if lat and lon: author.geo_location = db.GeoPt(float(lat), float(lon)) author.put() except db.Error: pass else: self.redirect('/database/author') elif relpath == 'resource': author_key = models.Author.get_by_key_name( self.request.get('author')) author_key2 = models.Author.get_by_key_name( self.request.get('second_author')) if author_key.key() == author_key2.key(): author_key2 = None tags = (self.request.get('tags') or '').split(',') tags = [x.strip() for x in tags if x.strip()] browser_support = [ x.lower() for x in (self.request.get_all('browser_support') or []) ] pub = datetime.datetime.strptime( self.request.get('publication_date'), '%Y-%m-%d') update_date = self.request.get('update_date') or None tutorial = None if self.request.get('post_id'): tutorial = models.Resource.get_by_id( int(self.request.get('post_id'))) # Updating existing resource. if tutorial: try: #TODO: This is also hacky. tutorial.title = self.request.get('title') tutorial.description = self.request.get('description') tutorial.author = author_key tutorial.second_author = author_key2 tutorial.url = self.request.get('url') or None tutorial.browser_support = browser_support tutorial.update_date = datetime.date.today() tutorial.publication_date = datetime.date( pub.year, pub.month, pub.day) tutorial.tags = tags tutorial.draft = self.request.get('draft') == 'on' tutorial.social_url = unicode( self.request.get('social_url') or '') except TypeError: pass else: # Create new resource. try: tutorial = models.Resource( title=self.request.get('title'), description=self.request.get('description'), author=author_key, second_author=author_key2, url=self.request.get('url') or None, browser_support=browser_support, update_date=datetime.date.today(), publication_date=datetime.date(pub.year, pub.month, pub.day), tags=tags, draft=self.request.get('draft') == 'on', social_url=self.request.get('social_url') or None) except TypeError: pass tutorial.put() # TODO: Don't use flush_all. Use flush_all_async() or only purge tutorials. # Once new entry is saved, flush memcache. memcache.flush_all() return self.redirect('/database/')
def testNew(self): a = models.Author(name='straight') a.save() self.assertTrue(models.AuthorBase.objects.filter(pk=a.object_id ).exists())
def add_author(name, country, birth_year): author = models.Author(name, country, birth_year) models.db.session.add(author) models.db.session.commit() return author.id
def test(): models.User.query.delete() models.Book.query.delete() models.Review.query.delete() models.Author.query.delete() # Adding users user1 = models.User( 'admin', '*****@*****.**', 'Sweden', 'Send me an email if a book is missing in the database.', str(datetime.date.today()), True) models.db.session.add(user1) models.db.session.commit() user1 = models.User('Pelle Nordfors', '*****@*****.**', 'Sweden', 'hi everyone!', str(datetime.date.today()), False) models.db.session.add(user1) models.db.session.commit() user2 = models.User( 'mrs. Lovett', '*****@*****.**', 'England', '''A great fan of fiction. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. ''', str(datetime.date.today()), False) models.db.session.add(user2) models.db.session.commit() user3 = models.User('Elphaba Thropp', '*****@*****.**', 'Oz', 'I like books. Especially The Grimmerie!', str(datetime.date.today()), False) models.db.session.add(user3) models.db.session.commit() user4 = models.User('George Banks', '*****@*****.**', 'England', 'Hi! Mary Poppins in my favorite book!', str(datetime.date.today()), False) models.db.session.add(user4) models.db.session.commit() user5 = models.User('Magda Keller', '*****@*****.**', 'Hungary', 'Umm... I rarely read at all.', str(datetime.date.today()), False) models.db.session.add(user5) models.db.session.commit() # Adding Lolita by Nabokov author = models.Author('Vladimir Nabokov', 'Russia', 1899) book = models.Book( 'Lolita', 1955, 'A man marries his landlady so he can take advantage of her daughter.', 'English') book.written_by.append(author) models.db.session.add(author) models.db.session.commit() review = models.Review('Awesome!', 'blabla', 9, 'Swedish', str(datetime.date.today()), user1, book) models.db.session.add(review) review = models.Review('This is filth!', 'blablabla', 2, 'English', str(datetime.date.today()), user2, book) models.db.session.add(review) models.db.session.commit() database_helper.update_avg_score(book.id) # Adding It by King author = models.Author('Stephen King', 'USA', 1947) book = models.Book( 'It', 1986, 'In 1960, seven outcast kids known as "The Loser Club" fight an evil demon.', 'English') book.written_by.append(author) models.db.session.add(author) models.db.session.commit() database_helper.update_avg_score(book.id) # Adding The Shining by King book = models.Book( 'The Shining', 1977, 'A recovering alcoholic and his family move into a haunted hotel as caretakers.', 'English') book.written_by.append(author) models.db.session.add(author) models.db.session.commit() database_helper.update_avg_score(book.id) # Adding Carrie by King book = models.Book( 'Carrie', 1974, 'A classic horror tale about Carrie White, a shy girl.', 'English') book.written_by.append(author) models.db.session.add(author) models.db.session.commit() database_helper.update_avg_score(book.id) # Adding Misery by King book = models.Book( 'Misery', 1987, 'Paul Sheldon, a successful novelist, is rescued after a snow storm by his "Number One Fan".', 'English') book.written_by.append(author) models.db.session.add(author) models.db.session.commit() database_helper.update_avg_score(book.id)
def testNew(self): author = models.Author(name='MoYan') author.save() a = models.Author.objects.get(name='MoYan') self.assertTrue(a.pk) self.assertEqual(a.state, models.Author.DRAFT)
def get_author_from_db(scholar_id, session): to_add = session.query(models.Author).filter_by(scholar_id = scholar_id).first() if to_add is None: to_add = models.Author() return to_add
def get_article_authors(self, url, retry_count=0): """ Returns a list of author objects when given a preprint URL. """ self.log.record(f'Getting authors for {url}') try: resp = self.session.get(f"{url}.article-metrics", timeout=10) except Exception as e: if retry_count < 3: self.log.record( f"Error requesting article authors. Retrying: {e}", "error") return self.get_article_authors(url, retry_count + 1) else: self.log.record( f"Error AGAIN requesting article authors. Bailing: {e}", "error") return (None, None) if resp.status_code != 200: self.log.record(f" Got weird status code: {resp.status_code}", 'warn') if retry_count < 1: # only retry once time.sleep(5) return self.get_article_authors(url, retry_count + 1) else: # 403s here appear to be mostly caused by papers being "processed" raise NameError # Figure out the date this was posted datestring = helpers.Find_posted_date(resp) self.log.record(f'FOUND DATE: {datestring}', 'debug') # Then get the authors: authors = [] author_tags = resp.html.find('meta[name^="citation_author"]') current_name = "" current_institution = "" current_email = "" current_orcid = "" for tag in author_tags: if tag.attrs["name"] == "citation_author": if current_name != "": # if this isn't the first author authors.append( models.Author(current_name, current_institution, current_email, datestring, current_orcid)) current_name = tag.attrs["content"] current_institution = "" current_email = "" current_orcid = "" elif tag.attrs["name"] == "citation_author_institution": current_institution = tag.attrs["content"] elif tag.attrs["name"] == "citation_author_email": current_email = tag.attrs["content"] elif tag.attrs["name"] == "citation_author_orcid": current_orcid = tag.attrs["content"] # since we record each author once we find the beginning of the # next author's entry, the last step has to be to record whichever # author we were looking at when the author list ended: if current_name != "": # if we somehow didn't find a single author authors.append( models.Author(current_name, current_institution, current_email, datestring, current_orcid)) return authors
def add_author_to_db(author, session, org=None): to_add = get_author_from_db(author.scholar_id, session) to_add.scholar_id = author.scholar_id to_add.name = author.name to_add.email_domain = author.email_domain to_add.total_citations = author.total_citations to_add.h_index = author.h_index to_add.i10_index = author.i10_index # Try to assign organization by a tag in the html/Google Org_id if not to_add.organization and org: candidate_org = session.query(models.Organization).filter_by( scholar_org_id = org).first() if candidate_org: to_add.organization = candidate_org to_add.auto_org_assignment = True # Try to assign organization by email domain if not to_add.organization and author.email_domain: clg = session.query(models.Author).filter_by( email_domain = author.email_domain).first() candidate_org = clg.organization if clg else None if candidate_org and candidate_org.parent: to_add.organization = candidate_org.ancestors()[-1] to_add.auto_org_assignment = True elif candidate_org: to_add.organization = candidate_org to_add.auto_org_assignment = True # # LIST: # citations_per_year author_citations_per_year = [] for cpy in author.citations_per_year: citations_per_year = session.query(models.AuthorCitationsPerYear).filter_by(author_id = to_add.id, year = cpy.year).first() if citations_per_year is None: citations_per_year = models.AuthorCitationsPerYear() citations_per_year.year = cpy.year citations_per_year.citations = cpy.citations author_citations_per_year.append(citations_per_year) # # LIST: # coauthors author_coauthors = [] for co in author.coauthors: coauthor = session.query(models.Author).filter_by(scholar_id = co.scholar_id).first() if coauthor is None: coauthor = models.Author() coauthor.scholar_id = co.scholar_id author_coauthors.append(coauthor) to_add.coauthors = author_coauthors # # LIST: # publications author_publications = [] for pub in author.publications: publication = session.query(models.Publication).filter_by(scholar_id = pub.scholar_id).first() if publication is None: publication = models.Publication() publication.scholar_id = pub.scholar_id publication.title = pub.title publication.year_of_publication = pub.year_of_publication publication.total_citations = pub.total_citations author_publications.append(publication) to_add.publications = author_publications to_add.retrieved_at = datetime.datetime.now() session.add(to_add) session.commit()