def mutate_and_get_payload(cls, root, info, **input): # проверяет токен token = input.get('token', None) if token != '******************': return "Invalid token" publication = Publication( title=input.get('title'), preview_text=input.get('preview_text', ''), detail_text=input.get('detail_text', ''), image=input.get('image', ''), rating=input.get('rating', 0), user_id=input.get('user_id', 5227), date_create=input.get('date_create'), show_counter=input.get('show_counter'), ) if input.get('pk'): publication.pk = input.get('pk') if input.get('age'): publication.age = input.get('age', 0) publication.save() if input.get('author'): publication.author.set(input.get('author')) publication.save() if input.get('book'): try: publication.book.set(input.get('book')) publication.save() except Exception as e: pass return AddPublication(publication=publication)
def test_clean_pass(self): publication = mock.Mock(spec=Publication) publication.title = 'Title' publication.pdf_creation = File(BytesIO(), name='lol') publication.source_creation = File(BytesIO(), name='lol') # just pass the test Publication.clean(publication)
def post(self, request): """ Procesa el formulario :param request: objeto HttpRequest :return: HttpResponse con la respuesta """ publication = Publication() publication.publisher = request.user form = PublicationForm(request.POST, request.FILES, instance=publication) if form.is_valid(): # Guardar el anuncio new_publication = form.save() # Limpiar el formulario form = PublicationForm() # Devolvemos un mensaje de OK messages.success(request, 'Publicación creada correctamente') context = {'form': form, 'navbar': "publication-form"} return render(request, 'publications/publication_form.html', context)
def test_clean_error(self): publication = mock.Mock(spec=Publication) publication.title = 'Title' publication.pdf_creation = File(BytesIO(), name='lol') publication.source_creation = None with self.assertRaises(ValidationError): Publication.clean(publication)
def create(cls, variantreview=None, pmid=None): try: pub = Publication.pub_lookup(pmid) except Publication.DoesNotExist: pub = Publication.create(pmid=pmid) pub.save() varpubreview = cls(variantreview=variantreview, publication=pub) varpubreview.save() return varpubreview
def vue_publication_save(request): to_template = {} if request.user.is_authenticated: image = request.FILES.get('image') data = request.POST.get('data') data = json.loads(data) id = data.get('id', None) title = data.get('title') text = data.get('text') books = data.get('books', None) authors = data.get('authors', None) series = data.get('series', None) if id: # Сохраняет изменения publication = Publication.objects.get(pk=id, user=request.user) publication.title = title publication.preview_text = Truncator(strip_tags(text)).words(33) publication.detail_text = text publication.user = request.user publication.series_id = series publication.save() publication.book.set(books) publication.author.set(authors) publication.series.set(series) else: # Создает новую коллекцию publication = Publication(title=title, user=request.user, preview_text=Truncator( strip_tags(text)).words(33), detail_text=text) publication.save() if books: publication.book.set(books) if authors: publication.author.set(authors) if series: publication.series.set(series) if image: publication.image.save(image.name, image) to_template['image'] = publication.image.url to_template['id'] = publication.id return JsonResponse(to_template)
def _flag_publication_as_needing_refetch(search_publication_element): """ Takes an XML publication element lifted from a Symplectic Modified file (which does not contain as much info about each publication/biblio-record as a proper publication XML file) extracts the minimum info (key-fields: guid) about publication loads the related publication object from the db flags the publication object as needing a re-fetch from Symplectic NOTE: an attempt is made to load an existing publication but if it does not already exist IT IS NOT CREATED because if the db doesn`t know about it already then the publication has not been linked to a reasearcher so who cares about it """ #++++++PUBLICATION LITE++++++ #check publication Element if search_publication_element is None: return #publication guid if search_publication_element is not None: guid = search_publication_element.get('id', '') if guid == '': return # load Publication from db if it exists, otherwise give up - DONT # create it! publication_object = Publication.getPublication(guid) if publication_object is None: return #symplectic has an updated version of this publication publication_object.needs_refetch = True #save this change to the publication object publication_object.save()
def uploadpublication(request, form_class=PublicationUploadForm, template_name="publications/upload.html"): """ upload form for publications """ publication = Publication() publication.author = request.user publication_form = form_class() if request.method == "POST": if request.POST.get("action") == "upload": publication_form = form_class(request.user, request.POST, request.FILES, instance=publication) if publication_form.is_valid(): if not is_valid_format(request.FILES["file_name"].name, request.FILES["file_name"].content_type): request.user.message_set.create( message=u"Tipo de arquivo inválido (Somente arquivos PDF/CBR/CBZ ou Imagem: JPG/GIF/PNG) ou nome do arquivo muito longo" ) else: publication = publication_form.save(commit=False) publication.date_added = datetime.datetime.now() publication.status = 0 publication.nr_pages = 0 publication.save() request.user.message_set.create(message=_(u"Publicação feita com sucesso '%s'") % publication.title) return HttpResponseRedirect(reverse("publications", args=(publication.author,))) calc_age(request.user.get_profile()) return render_to_response( template_name, {"form": publication_form, "is_me": True, "other_user": request.user}, context_instance=RequestContext(request), )
def get_publication(unique_name, pmid): #check if publication exist, if so return, if not create new try: result = get_esummary_pmid(pmid) tmp_date = datetime.strptime(result["result"][pmid]["sortpubdate"], '%Y/%m/%d %H:%M') publication = Publication(title=result["result"][pmid]["title"], volume=result["result"][pmid]["volume"], issue=result["result"][pmid]["issue"], publication_year=tmp_date.strftime('%Y-%m-%d'), pages=result["result"][pmid]["pages"], unique_name=pmid+'-'+result["result"][pmid]["title"], publisher=result["result"][pmid]["fulljournalname"]) publication.validate_unique() publication.save() db = Db.objects.get(name='Pubmed') dbxref = Dbxref(accession=pmid, version='1', db=db) dbxref.validate_unique() dbxref.save() publication_dbxref = PublicationDbxref(publication=publication, dbxref=dbxref) publication_dbxref.validate_unique() publication_dbxref.save() rank = 1 tmp_list = result["result"][pmid]["authors"] for tmp in tmp_list: author = get_author(tmp["name"]) publication_author = PublicationAuthor(rank=rank, publication=publication, author=author) publication_author.validate_unique() publication_author.save() rank+=1 except ValidationError as e: publication = Publication.objects.get(unique_name=pmid+'-'+result["result"][pmid]["title"]) return publication
def add_publications(self, number): if number > 0: random = Random() users = User.objects.all() charts = string.lowercase + " " if len(users) > 0: for i in range(number): content = "".join([random.choice(charts) for a in range(random.randint(40, 100))]) publication_title = "".join( [random.choice(string.lowercase) for a in range(random.randint(15, 30))]).title() author = random.choice(users) publication_type = random.choice(publication_types) publication = Publication(author=author, title=publication_title, content=content, publication_type=publication_type) publication.save() self.stdout.write("%i publications was added to database" % number) else: self.stdout.write("No user to add publication") else: self.stdout.write("Invalid number of publications to add was set. Nothing done")
def add_association(acc, article, is_associated=False): """ Adding association between `Publication` and `Experiment` objects in the Django models which is used by curators to approve or reject linkage between an article and a study. :param acc: ArrayExpress accession. e.g. ``E-MTAB-xxxx`` :type acc: str :param article: Json object as collected from Europe BMC. :type article: dict :param is_associated: Flag indicating whether the publication is already associated with the study in the AE database or not :type is_associated: bool """ experiment = retrieve_study_by_acc(acc)[0] exp = Experiment.objects.filter( Q(accession=acc) | Q(title=experiment.title) | Q(description=experiment.description)).first() # print exp, exp_created if not exp: exp = Experiment(accession=acc, title=experiment.title, description=experiment.description) exp.save() pub = Publication.objects.filter( Q(pubmed=article.get('pmid', -1)) | Q(pmc_id=article.get('pmcid')) | Q(doi=article.get('doi', 'ANY THING ELSE')) | Q(title=article['title'])).first() if not pub: pub = Publication(pubmed=article.get('pmid', None), pmc_id=article.get('pmcid'), doi=article.get('doi', None), title=article['title'], whole_article=json.dumps(article)) pub.save() else: pub.whole_article = json.dumps(article) pub.save() ass, ass_created = Association.objects.get_or_create(experiment=exp, publication=pub) if ass_created: ass.is_associated = is_associated ass.save()
def import_bibtex(request): if request.method == 'POST': # try to parse BibTex bib = parse(request.POST['bibliography']) # container for error messages errors = {} # publication types types = Type.objects.all() # check for errors if not bib: if not request.POST['bibliography']: errors['bibliography'] = 'This field is required.' if not errors: publications = [] # try adding publications for entry in bib: if entry.has_key('title') and \ entry.has_key('author') and \ entry.has_key('year'): # parse authors authors = split(entry['author'], ' and ') for i in range(len(authors)): author = split(authors[i], ',') author = [author[-1]] + author[:-1] authors[i] = join(author, ' ') authors = join(authors, ', ') # add missing keys keys = [ 'journal', 'booktitle', 'publisher', 'url', 'doi', 'keywords', 'note', 'abstract', 'month' ] for key in keys: if not entry.has_key(key): entry[key] = '' # map integer fields to integers entry['month'] = MONTHS.get(entry['month'].lower(), 0) entry['volume'] = entry.get('volume', None) entry['number'] = entry.get('number', None) # determine type type_id = None for t in types: if entry['type'] in t.bibtex_type_list: type_id = t.id break if type_id is None: errors['bibliography'] = 'Type "' + entry[ 'type'] + '" unknown.' break # add publication publications.append( Publication(type_id=type_id, citekey=entry['key'], title=entry['title'], authors=authors, year=entry['year'], month=entry['month'], journal=entry['journal'], book_title=entry['booktitle'], publisher=entry['publisher'], volume=entry['volume'], number=entry['number'], note=entry['note'], url=entry['url'], doi=entry['doi'], abstract=entry['abstract'], keywords=entry['keywords'])) else: errors[ 'bibliography'] = 'Make sure that the keys title, author and year are present.' break if not errors and not publications: errors['bibliography'] = 'No valid BibTex entries found.' if errors: # some error occurred return render_to_response( 'admin/publications/import_bibtex.html', { 'errors': errors, 'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request }, RequestContext(request)) else: try: # save publications for publication in publications: publication.save() except: msg = 'Some error occured during saving of publications.' else: if len(publications) > 1: msg = 'Successfully added ' + str( len(publications)) + ' publications.' else: msg = 'Successfully added ' + str( len(publications)) + ' publication.' # show message messages.info(request, msg) # redirect to publication listing return HttpResponseRedirect('../') else: return render_to_response( 'admin/publications/import_bibtex.html', { 'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request }, RequestContext(request))
def person(request, name): author = capwords(name.replace('+', ' ')) author = author.replace(' Von ', ' von ').replace(' Van ', ' van ') author = author.replace(' Der ', ' der ') # take care of dashes off = author.find('-') while off > 0: off += 1 if off <= len(author): author = author[:off] + author[off].upper() + author[off + 1:] off = author.find('-', off) # split into forename, middlenames and surname names = name.replace(' ', '+').split('+') # find publications of this author publications = [] types = Type.objects.all() types_dict = {} for t in types: types_dict[t] = [] # construct a liberal query surname = names[-1] surname = surname.replace(u'ä', u'%%') surname = surname.replace(u'ae', u'%%') surname = surname.replace(u'ö', u'%%') surname = surname.replace(u'oe', u'%%') surname = surname.replace(u'ü', u'%%') surname = surname.replace(u'ue', u'%%') surname = surname.replace(u'ß', u'%%') surname = surname.replace(u'ss', u'%%') query_str = u'SELECT * FROM {table} WHERE lower(authors) LIKE lower(\'%%{surname}%%\') ORDER BY year DESC, month DESC, id DESC' query = Publication.objects.raw( query_str.format(table=Publication._meta.db_table, surname=surname)) # further filter results if len(names) > 1: name_simple = Publication.simplify_name(names[0][0] + '. ' + names[-1]) for publication in query: if name_simple in publication.authors_list_simple: publications.append(publication) types_dict[publication.type].append(publication) elif len(names) > 0: for publication in query: if Publication.simplify_name( names[-1].lower()) in publication.authors_list_simple: publications.append(publication) types_dict[publication.type].append(publication) # remove empty types for t in types: if not types_dict[t]: types = types.exclude(pk=t.pk) # attach publications to types for t in types: t.publications = types_dict[t] if 'ascii' in request.GET: return render_to_response('publications/publications.txt', {'publications': publications}, context_instance=RequestContext(request), content_type='text/plain; charset=UTF-8') elif 'bibtex' in request.GET: return render_to_response('publications/publications.bib', {'publications': publications}, context_instance=RequestContext(request), content_type='text/x-bibtex; charset=UTF-8') elif 'rss' in request.GET: return render_to_response( 'publications/publications.rss', { 'url': 'http://' + request.META['HTTP_HOST'] + request.path, 'author': author, 'publications': publications }, context_instance=RequestContext(request), content_type='application/rss+xml; charset=UTF-8') else: for publication in publications: publication.links = publication.customlink_set.all() publication.files = publication.customfile_set.all() return render_to_response('publications/person.html', { 'publications': publications, 'types': types, 'author': author }, context_instance=RequestContext(request))
def import_bibtex(request): if request.method == 'POST': # try to parse BibTex bib = parse(request.POST['bibliography']) # container for error messages errors = {} # publication types types = Type.objects.all() # check for errors if not bib: if not request.POST['bibliography']: errors['bibliography'] = 'This field is required.' if not errors: publications = [] # try adding publications for entry in bib: if 'title' in entry and \ 'author' in entry and \ 'year' in entry: # parse authors authors = entry['author'].split(' and ') for i in range(len(authors)): author = authors[i].split(',') author = [author[-1]] + author[:-1] authors[i] = ' '.join(author) authors = ', '.join(authors) # add missing keys keys = [ 'journal', 'booktitle', 'publisher', 'institution', 'url', 'doi', 'isbn', 'keywords', 'pages', 'note', 'abstract', 'month' ] for key in keys: if not key in entry: entry[key] = '' # map integer fields to integers entry['month'] = MONTHS.get(entry['month'].lower(), 0) entry['volume'] = entry.get('volume', None) entry['number'] = entry.get('number', None) if isinstance(entry['volume'], six.text_type): entry['volume'] = int( re.sub('[^0-9]', '', entry['volume'])) if isinstance(entry['number'], six.text_type): entry['number'] = int( re.sub('[^0-9]', '', entry['number'])) # remove whitespace characters (likely due to line breaks) entry['url'] = re.sub(r'\s', '', entry['url']) # determine type type_id = None for t in types: if entry['type'] in t.bibtex_type_list: type_id = t.id break if type_id is None: errors['bibliography'] = 'Type "' + entry[ 'type'] + '" unknown.' break # add publication publications.append( Publication(type_id=type_id, citekey=entry['key'], title=entry['title'], authors=authors, year=entry['year'], month=entry['month'], journal=entry['journal'], book_title=entry['booktitle'], publisher=entry['publisher'], institution=entry['institution'], volume=entry['volume'], number=entry['number'], pages=entry['pages'], note=entry['note'], url=entry['url'], doi=entry['doi'], isbn=entry['isbn'], external=False, abstract=entry['abstract'], keywords=entry['keywords'])) else: errors[ 'bibliography'] = 'Make sure that the keys title, author and year are present.' break if not errors and not publications: errors['bibliography'] = 'No valid BibTex entries found.' if errors: # some error occurred return render( request, 'admin/publications/import_bibtex.html', { 'errors': errors, 'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request }) else: try: # save publications for publication in publications: publication.save() except: msg = 'Some error occured during saving of publications.' else: if len(publications) > 1: msg = 'Successfully added ' + str( len(publications)) + ' publications.' else: msg = 'Successfully added ' + str( len(publications)) + ' publication.' # show message messages.info(request, msg) # redirect to publication listing if len(publications) == 1: return HttpResponseRedirect('../%s/change/' % publications[0].id) else: return HttpResponseRedirect('../') else: return render( request, 'admin/publications/import_bibtex.html', { 'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request })
def person(request, name): author = capwords(name.replace('+', ' ')) author = author.replace(' Von ', ' von ').replace(' Van ', ' van ') author = author.replace(' Der ', ' der ') # take care of dashes off = author.find('-') while off > 0: off += 1 if off <= len(author): author = author[:off] + author[off].upper() + author[off + 1:] off = author.find('-', off) # split into forename, middlenames and surname names = name.replace(' ', '+').split('+') # find publications of this author publications = [] types = Type.objects.all() types_dict = {} for t in types: types_dict[t] = [] # construct a liberal query surname = names[-1] surname = surname.replace(u'ä', u'%%') surname = surname.replace(u'ae', u'%%') surname = surname.replace(u'ö', u'%%') surname = surname.replace(u'oe', u'%%') surname = surname.replace(u'ü', u'%%') surname = surname.replace(u'ue', u'%%') surname = surname.replace(u'ß', u'%%') surname = surname.replace(u'ss', u'%%') query_str = u'SELECT * FROM {table} WHERE lower(authors) LIKE lower(\'%%{surname}%%\') ORDER BY year DESC, month DESC, id DESC' query = Publication.objects.raw( query_str.format(table=Publication._meta.db_table, surname=surname)) # further filter results if len(names) > 1: name_simple = Publication.simplify_name(names[0][0] + '. ' + names[-1]) for publication in query: if name_simple in publication.authors_list_simple: publications.append(publication) types_dict[publication.type].append(publication) elif len(names) > 0: for publication in query: if Publication.simplify_name(names[-1].lower()) in publication.authors_list_simple: publications.append(publication) types_dict[publication.type].append(publication) # remove empty types for t in types: if not types_dict[t]: types = types.exclude(pk=t.pk) # attach publications to types for t in types: t.publications = types_dict[t] if 'ascii' in request.GET: return render_to_response('publications/publications.txt', { 'publications': publications }, context_instance=RequestContext(request), content_type='text/plain; charset=UTF-8') elif 'bibtex' in request.GET: return render_to_response('publications/publications.bib', { 'publications': publications }, context_instance=RequestContext(request), content_type='text/x-bibtex; charset=UTF-8') elif 'rss' in request.GET: return render_to_response('publications/publications.rss', { 'url': 'http://' + request.META['HTTP_HOST'] + request.path, 'author': author, 'publications': publications }, context_instance=RequestContext(request), content_type='application/rss+xml; charset=UTF-8') else: for publication in publications: publication.links = publication.customlink_set.all() publication.files = publication.customfile_set.all() return render_to_response('publications/person.html', { 'publications': publications, 'types': types, 'author': author }, context_instance=RequestContext(request))
def import_bibtex(request): if request.method == 'POST': # try to parse BibTex the_bibtex_file_content = '' creators = request.POST.getlist('creators') productions = request.POST.getlist('productions') work_records = request.POST.getlist('work_records') the_bibtex_file = request.FILES.get('bibtex_file', '') if the_bibtex_file: if the_bibtex_file.multiple_chunks(): the_bibtex_file_content = ''.join( chunk for chunk in the_bibtex_file.chunks()) else: the_bibtex_file_content = the_bibtex_file.read() bib = parse(the_bibtex_file_content) if not bib: bib = parse(request.POST['bibliography']) # container for error messages errors = {} # publication types types = Type.objects.all() # check for errors if not bib: if not request.POST['bibliography']: errors[ 'bibliography'] = 'Please populate Bibliography or click browse to upload a Bibtex format file.' if not errors: publications = [] # try adding publications for entry in bib: if (entry.has_key('title') and entry.has_key('author') and entry.has_key('year')): # parse authors authors = split(entry['author'], ' and ') for i in range(len(authors)): author = split(authors[i], ',') author = [author[-1]] + author[:-1] authors[i] = join(author, ' ') authors = join(authors, ', ') # add missing keys keys = [ 'annote', 'booktitle', 'chapter', 'edition', 'section', 'editor', 'howpublished', 'institution', 'journal', 'key', 'month', 'note', 'number', 'organization', 'pages', 'publisher', 'address', 'school', 'series', 'volume', 'issue', 'url', 'isbn', 'issn', 'lccn', 'abstract', 'keywords', 'price', 'copyright', 'language', 'contents', 'doi' ] for key in keys: if not entry.has_key(key): if key == 'price': entry[key] = 0 else: entry[key] = '' # map integer fields to integers entry['month'] = MONTHS.get(entry['month'].lower(), 0) entry['volume'] = entry.get('volume', None) entry['number'] = entry.get('number', None) # determine type type_id = None for t in types: if entry['type'] in t.bibtex_type_list: type_id = t.id break if type_id is None: errors['bibliography'] = 'Type "' + entry[ 'type'] + '" unknown.' break # add publication publications.append( Publication(type_id=type_id, annote=entry['annote'], authors=authors, book_title=entry['booktitle'], chapter=entry['chapter'], edition=entry['edition'], section=entry['section'], editor=entry['editor'], how_published=entry['howpublished'], institution=entry['institution'], journal=entry['journal'], citekey=entry['key'], year=entry['year'], month=entry['month'], note=entry['note'], organization=entry['organization'], pages=entry['pages'], publisher=entry['publisher'], address=entry['address'], university=entry['school'], series=entry['series'], title=entry['title'], volume=entry['volume'], number=entry['issue'], url=entry['url'], isbn=entry['isbn'], issn=entry['issn'], archive_location=entry['lccn'], abstract=entry['abstract'], keywords=entry['keywords'], price=entry['price'], rights=entry['copyright'], language=entry['language'], table_of_content=entry['contents'], doi=entry['doi'])) else: errors[ 'bibliography'] = 'Make sure that the keys title, author and year are present.' break if not errors and not publications: errors['bibliography'] = 'No valid BibTex entries found.' if errors: # some error occurred return render_to_response( 'admin/publications/import_bibtex.html', { 'errors': errors, 'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request }, RequestContext(request)) else: try: # save publications for publication in publications: publication.save() try: for creator_id in creators: creator = Creator.objects.get(pk=creator_id) creator.primary_publications.add(publication) except: pass try: for production_id in productions: production = Production.objects.get( pk=production_id) production.primary_publications.add(publication) except: pass try: for work_record_id in work_records: work_record = WorkRecord.objects.get( pk=work_record_id) work_record.primary_publications.add(publication) except: pass except: msg = 'Some error occured during saving of publications.' else: if len(publications) > 1: msg = 'Successfully added ' + str( len(publications)) + ' publications.' else: msg = 'Successfully added ' + str( len(publications)) + ' publication.' # show message messages.info(request, msg) # redirect to publication listing return HttpResponseRedirect('../') else: Creators_qs = Creator.objects.all() Productions_qs = Production.objects.all().order_by('title') WorkRecord_qs = WorkRecord.objects.all().order_by('title') CREATOR_CHOICES = [] #[("", "-- Select a Creator --")] CREATOR_CHOICES += [(e.id, e.creator_name) for e in Creators_qs] PRODUCTION_CHOICES = [] #[("", "-- Select a Production --")] PRODUCTION_CHOICES += [(e.id, e.title) for e in Productions_qs] WORKRECORD_CHOICES = [] #[("", "-- Select a Written Work --")] WORKRECORD_CHOICES += [(e.id, e.title) for e in WorkRecord_qs] return render_to_response( 'admin/publications/import_bibtex.html', { 'title': 'Import BibTex', 'types': Type.objects.all(), 'creators': CREATOR_CHOICES, 'productions': PRODUCTION_CHOICES, 'work_records': WORKRECORD_CHOICES, 'request': request }, RequestContext(request))
def person(request, name): author = capwords(name.replace('+', ' ')) author = author.replace(' Von ', ' von ').replace(' Van ', ' van ') author = author.replace(' Der ', ' der ') # take care of dashes off = author.find('-') while off > 0: off += 1 if off <= len(author): author = author[:off] + author[off].upper() + author[off + 1:] off = author.find('-', off) # split into forename, middlenames and surname names = name.replace(' ', '+').split('+') # construct a liberal query surname = names[-1] surname = surname.replace(u'ä', u'%%') surname = surname.replace(u'ae', u'%%') surname = surname.replace(u'ö', u'%%') surname = surname.replace(u'oe', u'%%') surname = surname.replace(u'ü', u'%%') surname = surname.replace(u'ue', u'%%') surname = surname.replace(u'ß', u'%%') surname = surname.replace(u'ss', u'%%') query_str = u'SELECT * FROM {table} ' \ 'WHERE lower({table}.authors) LIKE lower(\'%%{surname}%%\') ' \ 'ORDER BY {table}.year DESC, {table}.month DESC, {table}.id DESC' query = Publication.objects.raw( query_str.format(table=Publication._meta.db_table, surname=surname)) # find publications of this author publications = [] publications_by_type = defaultdict(lambda: []) # further filter results if len(names) > 1: name_simple = Publication.simplify_name(names[0][0] + '. ' + names[-1]) for publication in query: if name_simple in publication.authors_list_simple: publications.append(publication) publications_by_type[publication.type_id].append(publication) elif len(names) > 0: for publication in query: if Publication.simplify_name(names[-1].lower()) in publication.authors_list_simple: publications.append(publication) publications_by_type[publication.type_id].append(publication) # attach publications to types types = Type.objects.filter(id__in=publications_by_type.keys()) for t in types: t.publications = publications_by_type[t.id] if 'ascii' in request.GET: return render_to_response('publications/publications.txt', { 'publications': publications }, context_instance=RequestContext(request), content_type='text/plain; charset=UTF-8') elif 'bibtex' in request.GET: return render_to_response('publications/publications.bib', { 'publications': publications }, context_instance=RequestContext(request), content_type='text/x-bibtex; charset=UTF-8') elif 'mods' in request.GET: return render_to_response('publications/publications.mods', { 'publications': publications }, context_instance=RequestContext(request), content_type='application/xml; charset=UTF-8') elif 'rss' in request.GET: return render_to_response('publications/publications.rss', { 'url': 'http://' + request.META['HTTP_HOST'] + request.path, 'author': author, 'publications': publications }, context_instance=RequestContext(request), content_type='application/rss+xml; charset=UTF-8') else: customlinks = CustomLink.objects.filter(publication__in=publications) customfiles = CustomFile.objects.filter(publication__in=publications) publications_ = {} for publication in publications: publication.links = [] publication.files = [] publications_[publication.id] = publication for link in customlinks: publications_[link.publication_id].links.append(link) for file in customfiles: publications_[file.publication_id].files.append(file) return render_to_response('publications/person.html', { 'publications': publications, 'types': types, 'author': author }, context_instance=RequestContext(request))
volume = volume[4:] if 'Vol. ' in volume: volume = volume[5:] if '(' in volume: number = int(volume.split('(')[1][:-1]) volume = int(volume.split('(')[0]) if not volume: volume = None year = pub['year'] if not year: year = None # tukšo string aizstājam ar none publication = Publication( type_id=2, title=bibtex, # excelī tajā vietā bija raksta nosaukums authors=pub.get('authors'), year=year, journal=pub['journal'], publisher=pub['publisher'], volume=volume, external=False) publication.save() for author in publication.authors_list: person = findPerson(author.strip()) if person: person.publications.add(publication) # --- beigas tam, ja jāveido publikācija pašam jo nav bibtex if pub['indexed']: publication.indexed = pub['indexed'] publication.save()
def author(request, name): fullname = capwords(name.replace('+', ' ')) fullname = fullname.replace(' Von ', ' von ').replace(' Van ', ' van ') fullname = fullname.replace(' Der ', ' der ') # take care of dashes off = fullname.find('-') while off > 0: off += 1 if off <= len(fullname): fullname = fullname[:off] + fullname[off].upper() + fullname[off + 1:] off = fullname.find('-', off) # split into forename, middlenames and surname names = name.replace(' ', '+').split('+') # construct a liberal query surname = names[-1] surname = surname.replace(u'ä', u'%%') surname = surname.replace(u'ae', u'%%') surname = surname.replace(u'ö', u'%%') surname = surname.replace(u'oe', u'%%') surname = surname.replace(u'ü', u'%%') surname = surname.replace(u'ue', u'%%') surname = surname.replace(u'ß', u'%%') surname = surname.replace(u'ss', u'%%') query_str = u'SELECT * FROM {table} ' \ 'WHERE lower({table}.authors) LIKE lower(\'%%{surname}%%\') ' \ 'ORDER BY {table}.year DESC, {table}.month DESC, {table}.id DESC' query = Publication.objects.raw( query_str.format(table=Publication._meta.db_table, surname=surname)) # find publications of this author publications = [] publications_by_type = defaultdict(lambda: []) # further filter results if len(names) > 1: name_simple = Publication.simplify_name(names[0][0] + '. ' + names[-1]) for publication in query: if name_simple in publication.authors_list_simple: publications.append(publication) publications_by_type[publication.type_id].append(publication) elif len(names) > 0: for publication in query: if Publication.simplify_name( names[-1].lower()) in publication.authors_list_simple: publications.append(publication) publications_by_type[publication.type_id].append(publication) # attach publications to types types = Type.objects.filter(id__in=publications_by_type.keys()) for t in types: t.publications = publications_by_type[t.id] if 'plain' in request.GET: return render(request, 'publications/publications.txt', {'publications': publications}, content_type='text/plain; charset=UTF-8') if 'bibtex' in request.GET: return render(request, 'publications/publications.bib', {'publications': publications}, content_type='text/x-bibtex; charset=UTF-8') if 'mods' in request.GET: return render(request, 'publications/publications.mods', {'publications': publications}, content_type='application/xml; charset=UTF-8') if 'ris' in request.GET: return render( request, 'publications/publications.ris', {'publications': publications}, content_type='application/x-research-info-systems; charset=UTF-8') if 'rss' in request.GET: return render(request, 'publications/publications.rss', { 'url': 'http://' + request.get_host() + request.path, 'author': fullname, 'publications': publications }, content_type='application/rss+xml; charset=UTF-8') # load custom links and files populate(publications) return render(request, 'publications/author.html', { 'publications': publications, 'types': types, 'author': fullname })
def test_str(self): publication = mock.Mock(spec=Publication) publication.title = 'Title' self.assertEqual(Publication.__str__(publication), 'Title')
def load_pmids(pmids, force_update=False): """ Loads publications into the database from a list of PubMed IDs passed as integers into the database when they do not already exist. """ pmids = list(set([int(x) for x in pmids])) logger.debug('Starting to load PMID(S) %s', pmids) if not force_update: logger.info('Checking %s PMIDS', len(pmids)) existing_pubs = set(Publication.objects.filter(pmid__in=pmids).values_list('pmid', flat=True)) pmids = set(pmids) pmids.difference_update(existing_pubs) logger.info('About to fetch %s new PMIDs.', len(pmids), extra={'data':{'pmids': pmids}}) if not pmids: logger.debug('pmids are none') return None pmids_mia = [str(x) for x in pmids] for i in xrange(len(pmids_mia) / 5000 + 1): #Efetch Maximum, Batch 5000 per request query_list = pmids_mia[i * 5000:(i + 1) * 5000] query_str = ','.join(query_list) qdict = settings.ETOOLS_CONFIG['query_params'] qdict['id'] = query_str # Have to use post if data being sent is > 200 r = requests.post(settings.ETOOLS_CONFIG['base_url'], data=qdict) error_cnt = 0 while r.status_code != 200 and error_cnt < NUM_PUBMED_RETRIES: error_cnt += 1 time.sleep(0.5) r = requests.post(settings.ETOOLS_CONFIG['base_url'], data=qdict) if r.status_code != 200: logger.warning('Requests to the PubMed server with data %s failed ' 'after %s attempts.', qdict, NUM_PUBMED_RETRIES + 1) pub_page = r.text if pub_page: logger.debug('Request to pubmed server returned pub_page') xmltree = ET.fromstring(pub_page.encode('utf-8')) pubs = xmltree.findall('.//DocumentSummary') # pub_dicts will be a list of publications, where each # of them is a dictionary pub_dicts = map(parse_pub, pubs) for index, pub in enumerate(pub_dicts): logger.debug('Making new pub %s', pub) if pub is not None: new_pub = None if force_update: try: new_pub = Publication.objects.get(pmid=pub['pmid']) except Publication.DoesNotExist: new_pub = Publication() new_pub.pmid = pub['pmid'] new_pub.title = pub['title'] new_pub.authors = pub['authors'] new_pub.date = pub['date'] new_pub.journal = pub['journal'] new_pub.volume = pub['volume'] new_pub.pages = pub['pages'] new_pub.issue = pub['issue'] else: new_pub = Publication(**pub) if not new_pub.issue: logger.info('no issue for %s', new_pub.pmid) if not new_pub.volume: logger.info('no volume for %s', new_pub.pmid) if not new_pub.pages: logger.info('no pages for %s', new_pub.pmid) new_pub.save() logger.debug('Finished saving pub %s', new_pub) else: bad_pmid = pubs[index].get('uid') logger.warning('PMID %s has no publication in pub_page %s', bad_pmid, pub_page) else: logger.warning('There was no page returned from pubmed server!!')
def _create_authored(publication_element, researcher_object): """ Takes an XML publication element lifted from a Symplectic User file (which does not contain as much info about each publication/biblio-record as a proper publication XML file) extracts the minimum info (key-fields: guid) about publication, and links publication to researcher extracts the minimum info (key-fields: data-source) about indicated favourite biblio-record for that publication and links biblio-record to researcher extracts full preferences (visible, favourite, sort-order) that researcher has for that publication NOTE: an attempt is made to load an existing publication/biblio-record based on the key-fields extracted if that fails then a new one is created with only the key-fields populated and is then saved """ #++++++PUBLICATION LITE++++++ #check publication Element if publication_element is None: return #publication guid if publication_element is not None: guid = publication_element.get('id', '') if guid == '': return # load Publication from db or create (flagged as needing refetch from # symplectic) if doesnt exist publication_object = Publication.getOrCreatePublication(guid) # ++++++BIBLIOGRAPHICRECORD LITE++++++ # bibliographic-record element -> publication sub element (used to # read XML) if publication_element is not None: # only ONE biblio element per publication will be returned when querying # by User_id this is in contrast to the multiple biblios elements per # publication returned when querying by a Publication_guid biblio_element = publication_element.find( SYMPLECTIC_NAMESPACE + 'bibliographic-record' ) #biblio data-source if biblio_element is not None: data_source = biblio_element.get('data-source', '') # load BibliographicRecord from db or create if doesnt exist (NB # links biblio & publication) # print " going to get or create a BibliographicRecord" biblio_object = BibliographicRecord.getOrCreateBibliographicRecord( publication_object, data_source ) # ++++++AUTHORED++++++ # authored preferences -> publication sub-elements (used to read XML) if publication_element is not None: preferences_element = publication_element.find( SYMPLECTIC_NAMESPACE + 'preferences-for-this-publication' ) # load Authored from db or create if doesnt exist (NB links authored # & publication & researcher & bibliographic-record) authored_object = Authored.getOrCreateAuthored( publication_object, researcher_object, biblio_object ) # preferences if preferences_element is not None: # Show this publication if preferences_element.get('visible', 'false') == 'true': authored_object.visible = True else: authored_object.visible = False # Favourite publication if preferences_element.get('is-a-favourite', 'false') == 'true': authored_object.is_a_favourite = True else: authored_object.is_a_favourite = False # Display order authored_object.reverse_sort_cue = preferences_element.get( 'reverse-sort-cue', '' ) authored_object.save()